/[escript]/branches/clazy/escriptcore/src/DataLazy.cpp
ViewVC logotype

Diff of /branches/clazy/escriptcore/src/DataLazy.cpp

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

trunk/escript/src/DataLazy.cpp revision 2514 by jfenwick, Fri Jul 3 00:57:45 2009 UTC trunk/escriptcore/src/DataLazy.cpp revision 6114 by jfenwick, Fri Apr 1 03:37:52 2016 UTC
# Line 1  Line 1 
1    
2  /*******************************************************  /*****************************************************************************
3  *  *
4  * Copyright (c) 2003-2008 by University of Queensland  * Copyright (c) 2003-2016 by The University of Queensland
5  * Earth Systems Science Computational Center (ESSCC)  * http://www.uq.edu.au
 * http://www.uq.edu.au/esscc  
6  *  *
7  * Primary Business: Queensland, Australia  * Primary Business: Queensland, Australia
8  * Licensed under the Open Software License version 3.0  * Licensed under the Apache License, version 2.0
9  * http://www.opensource.org/licenses/osl-3.0.php  * http://www.apache.org/licenses/LICENSE-2.0
10  *  *
11  *******************************************************/  * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12    * Development 2012-2013 by School of Earth Sciences
13    * Development from 2014 by Centre for Geoscience Computing (GeoComp)
14    *
15    *****************************************************************************/
16    
17  #include "DataLazy.h"  #include "DataLazy.h"
18    #include "Data.h"
19    #include "DataTypes.h"
20    #include "EscriptParams.h"
21    #include "FunctionSpace.h"
22    #include "Utils.h"
23    #include "DataVectorOps.h"
24    
25  #ifdef USE_NETCDF  #ifdef USE_NETCDF
26  #include <netcdfcpp.h>  #include <netcdfcpp.h>
27  #endif  #endif
 #ifdef PASO_MPI  
 #include <mpi.h>  
 #endif  
 #ifdef _OPENMP  
 #include <omp.h>  
 #endif  
 #include "FunctionSpace.h"  
 #include "DataTypes.h"  
 #include "Data.h"  
 #include "UnaryFuncs.h"     // for escript::fsign  
 #include "Utils.h"  
28    
29  #include "EscriptParams.h"  #include <iomanip> // for some fancy formatting in debug
30    
31    using namespace escript::DataTypes;
32    
33  #include <iomanip>      // for some fancy formatting in debug  #define NO_ARG
34    
35  // #define LAZYDEBUG(X) if (privdebug){X;}  // #define LAZYDEBUG(X) if (privdebug){X;}
36  #define LAZYDEBUG(X)  #define LAZYDEBUG(X)
# Line 44  bool privdebug=false; Line 44  bool privdebug=false;
44    
45  // #define SIZELIMIT if ((m_height>escript::escriptParams.getTOO_MANY_LEVELS()) || (m_children>escript::escriptParams.getTOO_MANY_NODES())) {cerr << "\n!!!!!!! SIZE LIMIT EXCEEDED " << m_children << ";" << m_height << endl << toString() << endl;resolveToIdentity();}  // #define SIZELIMIT if ((m_height>escript::escriptParams.getTOO_MANY_LEVELS()) || (m_children>escript::escriptParams.getTOO_MANY_NODES())) {cerr << "\n!!!!!!! SIZE LIMIT EXCEEDED " << m_children << ";" << m_height << endl << toString() << endl;resolveToIdentity();}
46    
47  #define SIZELIMIT if ((m_height>escript::escriptParams.getTOO_MANY_LEVELS()) || (m_children>escript::escriptParams.getTOO_MANY_NODES())) {resolveToIdentity();}  // #define SIZELIMIT if ((m_height>escript::escriptParams.getTOO_MANY_LEVELS()) || (m_children>escript::escriptParams.getTOO_MANY_NODES())) {cerr << "SIZE LIMIT EXCEEDED " << m_height << endl;resolveToIdentity();}
48    
49    
50    #define SIZELIMIT if (m_height>escript::escriptParams.getTOO_MANY_LEVELS())  {if (escript::escriptParams.getLAZY_VERBOSE()){cerr << "SIZE LIMIT EXCEEDED height=" << m_height << endl;}resolveToIdentity();}
51    
52  /*  /*
53  How does DataLazy work?  How does DataLazy work?
54  ~~~~~~~~~~~~~~~~~~~~~~~  ~~~~~~~~~~~~~~~~~~~~~~~
# Line 58  A special operation, IDENTITY, stores an Line 60  A special operation, IDENTITY, stores an
60  This means that all "internal" nodes in the structure are instances of DataLazy.  This means that all "internal" nodes in the structure are instances of DataLazy.
61    
62  Each operation has a string representation as well as an opgroup - eg G_IDENTITY, G_BINARY, ...  Each operation has a string representation as well as an opgroup - eg G_IDENTITY, G_BINARY, ...
63  Note that IDENITY is not considered a unary operation.  Note that IDENTITY is not considered a unary operation.
64    
65  I am avoiding calling the structure formed a tree because it is not guaranteed to be one (eg c=a+a).  I am avoiding calling the structure formed a tree because it is not guaranteed to be one (eg c=a+a).
66  It must however form a DAG (directed acyclic graph).  It must however form a DAG (directed acyclic graph).
# Line 66  I will refer to individual DataLazy obje Line 68  I will refer to individual DataLazy obje
68    
69  Each node also stores:  Each node also stores:
70  - m_readytype \in {'E','T','C','?'} ~ indicates what sort of DataReady would be produced if the expression was  - m_readytype \in {'E','T','C','?'} ~ indicates what sort of DataReady would be produced if the expression was
71      evaluated.          evaluated.
72  - m_buffsrequired ~ the larged number of samples which would need to be kept simultaneously in order to  - m_buffsrequired ~ the large number of samples which would need to be kept simultaneously in order to
73      evaluate the expression.          evaluate the expression.
74  - m_samplesize ~ the number of doubles stored in a sample.  - m_samplesize ~ the number of doubles stored in a sample.
75    
76  When a new node is created, the above values are computed based on the values in the child nodes.  When a new node is created, the above values are computed based on the values in the child nodes.
# Line 109  namespace escript Line 111  namespace escript
111  namespace  namespace
112  {  {
113    
114    
115    // enabling this will print out when ever the maximum stacksize used by resolve increases
116    // it assumes _OPENMP is also in use
117    //#define LAZY_STACK_PROF
118    
119    
120    
121    #ifndef _OPENMP
122      #ifdef LAZY_STACK_PROF
123      #undef LAZY_STACK_PROF
124      #endif
125    #endif
126    
127    
128    #ifdef LAZY_STACK_PROF
129    std::vector<void*> stackstart(getNumberOfThreads());
130    std::vector<void*> stackend(getNumberOfThreads());
131    size_t maxstackuse=0;
132    #endif
133    
134  enum ES_opgroup  enum ES_opgroup
135  {  {
136     G_UNKNOWN,     G_UNKNOWN,
137     G_IDENTITY,     G_IDENTITY,
138     G_BINARY,        // pointwise operations with two arguments     G_BINARY,            // pointwise operations with two arguments
139     G_UNARY,     // pointwise operations with one argument     G_UNARY,             // pointwise operations with one argument
140     G_UNARY_P,       // pointwise operations with one argument, requiring a parameter     G_UNARY_P,           // pointwise operations with one argument, requiring a parameter
141     G_NP1OUT,        // non-pointwise op with one output     G_NP1OUT,            // non-pointwise op with one output
142     G_NP1OUT_P,      // non-pointwise op with one output requiring a parameter     G_NP1OUT_P,          // non-pointwise op with one output requiring a parameter
143     G_TENSORPROD,    // general tensor product     G_TENSORPROD,        // general tensor product
144     G_NP1OUT_2P      // non-pointwise op with one output requiring two params     G_NP1OUT_2P,         // non-pointwise op with one output requiring two params
145       G_REDUCTION,         // non-pointwise unary op with a scalar output
146       G_CONDEVAL
147  };  };
148    
149    
150    
151    
152  string ES_opstrings[]={"UNKNOWN","IDENTITY","+","-","*","/","^",  string ES_opstrings[]={"UNKNOWN","IDENTITY","+","-","*","/","^",
153              "sin","cos","tan",                          "sin","cos","tan",
154              "asin","acos","atan","sinh","cosh","tanh","erf",                          "asin","acos","atan","sinh","cosh","tanh","erf",
155              "asinh","acosh","atanh",                          "asinh","acosh","atanh",
156              "log10","log","sign","abs","neg","pos","exp","sqrt",                          "log10","log","sign","abs","neg","pos","exp","sqrt",
157              "1/","where>0","where<0","where>=0","where<=0", "where<>0","where=0",                          "1/","where>0","where<0","where>=0","where<=0", "where<>0","where=0",
158              "symmetric","nonsymmetric",                          "symmetric","antisymmetric",
159              "prod",                          "prod",
160              "transpose", "trace",                          "transpose", "trace",
161              "swapaxes"};                          "swapaxes",
162  int ES_opcount=41;                          "minval", "maxval",
163                            "condEval"};
164    int ES_opcount=44;
165  ES_opgroup opgroups[]={G_UNKNOWN,G_IDENTITY,G_BINARY,G_BINARY,G_BINARY,G_BINARY, G_BINARY,  ES_opgroup opgroups[]={G_UNKNOWN,G_IDENTITY,G_BINARY,G_BINARY,G_BINARY,G_BINARY, G_BINARY,
166              G_UNARY,G_UNARY,G_UNARY, //10                          G_UNARY,G_UNARY,G_UNARY, //10
167              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,    // 17                          G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,        // 17
168              G_UNARY,G_UNARY,G_UNARY,                    // 20                          G_UNARY,G_UNARY,G_UNARY,                                        // 20
169              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,    // 28                          G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,        // 28
170              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY, G_UNARY_P, G_UNARY_P,      // 35                          G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY, G_UNARY_P, G_UNARY_P,          // 35
171              G_NP1OUT,G_NP1OUT,                          G_NP1OUT,G_NP1OUT,
172              G_TENSORPROD,                          G_TENSORPROD,
173              G_NP1OUT_P, G_NP1OUT_P,                          G_NP1OUT_P, G_NP1OUT_P,
174              G_NP1OUT_2P};                          G_NP1OUT_2P,
175                            G_REDUCTION, G_REDUCTION,
176                            G_CONDEVAL};
177  inline  inline
178  ES_opgroup  ES_opgroup
179  getOpgroup(ES_optype op)  getOpgroup(ES_optype op)
# Line 157  getOpgroup(ES_optype op) Line 185  getOpgroup(ES_optype op)
185  FunctionSpace  FunctionSpace
186  resultFS(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)  resultFS(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
187  {  {
188      // perhaps this should call interpolate and throw or something?          // perhaps this should call interpolate and throw or something?
189      // maybe we need an interpolate node -          // maybe we need an interpolate node -
190      // that way, if interpolate is required in any other op we can just throw a          // that way, if interpolate is required in any other op we can just throw a
191      // programming error exception.          // programming error exception.
192    
193    FunctionSpace l=left->getFunctionSpace();    FunctionSpace l=left->getFunctionSpace();
194    FunctionSpace r=right->getFunctionSpace();    FunctionSpace r=right->getFunctionSpace();
195    if (l!=r)    if (l!=r)
196    {    {
197      if (r.probeInterpolation(l))      signed char res=r.getDomain()->preferredInterpolationOnDomain(r.getTypeCode(), l.getTypeCode());
198        if (res==1)
199      {      {
200      return l;          return l;
201      }      }
202      if (l.probeInterpolation(r))      if (res==-1)
203      {      {
204      return r;          return r;
205      }      }
206      throw DataException("Cannot interpolate between the FunctionSpaces given for operation "+opToString(op)+".");      throw DataException("Cannot interpolate between the FunctionSpaces given for operation "+opToString(op)+".");
207    }    }
# Line 184  resultFS(DataAbstract_ptr left, DataAbst Line 213  resultFS(DataAbstract_ptr left, DataAbst
213  DataTypes::ShapeType  DataTypes::ShapeType
214  resultShape(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)  resultShape(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
215  {  {
216      if (left->getShape()!=right->getShape())          if (left->getShape()!=right->getShape())
217      {          {
218        if ((getOpgroup(op)!=G_BINARY) && (getOpgroup(op)!=G_NP1OUT))            if ((getOpgroup(op)!=G_BINARY) && (getOpgroup(op)!=G_NP1OUT))
219        {            {
220          throw DataException("Shapes not the name - shapes must match for (point)binary operations.");                  throw DataException("Shapes not the name - shapes must match for (point)binary operations.");
221        }            }
222        if (left->getRank()==0)   // we need to allow scalar * anything  
223        {            if (left->getRank()==0)       // we need to allow scalar * anything
224          return right->getShape();            {
225        }                  return right->getShape();
226        if (right->getRank()==0)            }
227        {            if (right->getRank()==0)
228          return left->getShape();            {
229        }                  return left->getShape();
230        throw DataException("Shapes not the same - arguments must have matching shapes (or be scalars) for (point)binary operations on lazy data.");            }
231      }            throw DataException("Shapes not the same - arguments must have matching shapes (or be scalars) for (point)binary operations on lazy data.");
232      return left->getShape();          }
233            return left->getShape();
234  }  }
235    
236  // return the shape for "op left"  // return the shape for "op left"
# Line 208  resultShape(DataAbstract_ptr left, DataA Line 238  resultShape(DataAbstract_ptr left, DataA
238  DataTypes::ShapeType  DataTypes::ShapeType
239  resultShape(DataAbstract_ptr left, ES_optype op, int axis_offset)  resultShape(DataAbstract_ptr left, ES_optype op, int axis_offset)
240  {  {
241      switch(op)          switch(op)
242      {          {
243          case TRANS:          case TRANS:
244         {            // for the scoping of variables             {                    // for the scoping of variables
245          const DataTypes::ShapeType& s=left->getShape();                  const DataTypes::ShapeType& s=left->getShape();
246          DataTypes::ShapeType sh;                  DataTypes::ShapeType sh;
247          int rank=left->getRank();                  int rank=left->getRank();
248          if (axis_offset<0 || axis_offset>rank)                  if (axis_offset<0 || axis_offset>rank)
249          {                  {
250                 throw DataException("Error - Data::transpose must have 0 <= axis_offset <= rank=" + rank);              stringstream e;
251              }              e << "Error - Data::transpose must have 0 <= axis_offset <= rank=" << rank;
252              for (int i=0; i<rank; i++)              throw DataException(e.str());
253          {          }
254             int index = (axis_offset+i)%rank;          for (int i=0; i<rank; i++)
255                 sh.push_back(s[index]); // Append to new shape                  {
256              }                     int index = (axis_offset+i)%rank;
257          return sh;             sh.push_back(s[index]); // Append to new shape
258         }          }
259      break;                  return sh;
260      case TRACE:             }
261         {          break;
262          int rank=left->getRank();          case TRACE:
263          if (rank<2)             {
264          {                  int rank=left->getRank();
265             throw DataException("Trace can only be computed for objects with rank 2 or greater.");                  if (rank<2)
266          }                  {
267          if ((axis_offset>rank-2) || (axis_offset<0))                     throw DataException("Trace can only be computed for objects with rank 2 or greater.");
268          {                  }
269             throw DataException("Trace: axis offset must lie between 0 and rank-2 inclusive.");                  if ((axis_offset>rank-2) || (axis_offset<0))
270          }                  {
271          if (rank==2)                     throw DataException("Trace: axis offset must lie between 0 and rank-2 inclusive.");
272          {                  }
273             return DataTypes::scalarShape;                  if (rank==2)
274          }                  {
275          else if (rank==3)                     return DataTypes::scalarShape;
276          {                  }
277             DataTypes::ShapeType sh;                  else if (rank==3)
278                 if (axis_offset==0)                  {
279             {                     DataTypes::ShapeType sh;
280                  sh.push_back(left->getShape()[2]);                     if (axis_offset==0)
281                 }                     {
282                 else     // offset==1                          sh.push_back(left->getShape()[2]);
283             {                     }
284              sh.push_back(left->getShape()[0]);                     else         // offset==1
285                 }                     {
286             return sh;                          sh.push_back(left->getShape()[0]);
287          }                     }
288          else if (rank==4)                     return sh;
289          {                  }
290             DataTypes::ShapeType sh;                  else if (rank==4)
291             const DataTypes::ShapeType& s=left->getShape();                  {
292                 if (axis_offset==0)                     DataTypes::ShapeType sh;
293             {                     const DataTypes::ShapeType& s=left->getShape();
294                  sh.push_back(s[2]);                     if (axis_offset==0)
295                  sh.push_back(s[3]);                     {
296                 }                          sh.push_back(s[2]);
297                 else if (axis_offset==1)                          sh.push_back(s[3]);
298             {                     }
299                  sh.push_back(s[0]);                     else if (axis_offset==1)
300                  sh.push_back(s[3]);                     {
301                 }                          sh.push_back(s[0]);
302             else     // offset==2                          sh.push_back(s[3]);
303             {                     }
304              sh.push_back(s[0]);                     else         // offset==2
305              sh.push_back(s[1]);                     {
306             }                          sh.push_back(s[0]);
307             return sh;                          sh.push_back(s[1]);
308          }                     }
309          else        // unknown rank                     return sh;
310          {                  }
311             throw DataException("Error - Data::trace can only be calculated for rank 2, 3 or 4 object.");                  else            // unknown rank
312          }                  {
313         }                     throw DataException("Error - Data::trace can only be calculated for rank 2, 3 or 4 object.");
314      break;                  }
315          default:             }
316      throw DataException("Programmer error - resultShape(left,op) can't compute shapes for operator "+opToString(op)+".");          break;
317      }          default:
318            throw DataException("Programmer error - resultShape(left,op) can't compute shapes for operator "+opToString(op)+".");
319            }
320  }  }
321    
322  DataTypes::ShapeType  DataTypes::ShapeType
# Line 302  SwapShape(DataAbstract_ptr left, const i Line 334  SwapShape(DataAbstract_ptr left, const i
334          throw DataException("Error - Data::swapaxes argument must have at least rank 2.");          throw DataException("Error - Data::swapaxes argument must have at least rank 2.");
335       }       }
336       if (axis0<0 || axis0>rank-1) {       if (axis0<0 || axis0>rank-1) {
337          throw DataException("Error - Data::swapaxes: axis0 must be between 0 and rank-1=" + rank-1);          stringstream e;
338            e << "Error - Data::swapaxes: axis0 must be between 0 and rank-1=" << (rank-1);
339            throw DataException(e.str());
340       }       }
341       if (axis1<0 || axis1>rank-1) {       if (axis1<0 || axis1>rank-1) {
342           throw DataException("Error - Data::swapaxes: axis1 must be between 0 and rank-1=" + rank-1);          stringstream e;
343            e << "Error - Data::swapaxes: axis1 must be between 0 and rank-1=" << (rank-1);
344            throw DataException(e.str());
345       }       }
346       if (axis0 == axis1) {       if (axis0 == axis1) {
347           throw DataException("Error - Data::swapaxes: axis indices must be different.");           throw DataException("Error - Data::swapaxes: axis indices must be different.");
# Line 336  SwapShape(DataAbstract_ptr left, const i Line 372  SwapShape(DataAbstract_ptr left, const i
372  DataTypes::ShapeType  DataTypes::ShapeType
373  GTPShape(DataAbstract_ptr left, DataAbstract_ptr right, int axis_offset, int transpose, int& SL, int& SM, int& SR)  GTPShape(DataAbstract_ptr left, DataAbstract_ptr right, int axis_offset, int transpose, int& SL, int& SM, int& SR)
374  {  {
375                
376    // Get rank and shape of inputs    // Get rank and shape of inputs
377    int rank0 = left->getRank();    int rank0 = left->getRank();
378    int rank1 = right->getRank();    int rank1 = right->getRank();
# Line 345  GTPShape(DataAbstract_ptr left, DataAbst Line 381  GTPShape(DataAbstract_ptr left, DataAbst
381    
382    // Prepare for the loops of the product and verify compatibility of shapes    // Prepare for the loops of the product and verify compatibility of shapes
383    int start0=0, start1=0;    int start0=0, start1=0;
384    if (transpose == 0)       {}    if (transpose == 0)           {}
385    else if (transpose == 1)  { start0 = axis_offset; }    else if (transpose == 1)      { start0 = axis_offset; }
386    else if (transpose == 2)  { start1 = rank1-axis_offset; }    else if (transpose == 2)      { start1 = rank1-axis_offset; }
387    else              { throw DataException("DataLazy GeneralTensorProduct Constructor: Error - transpose should be 0, 1 or 2"); }    else                          { throw DataException("DataLazy GeneralTensorProduct Constructor: Error - transpose should be 0, 1 or 2"); }
388    
389    if (rank0<axis_offset)    if (rank0<axis_offset)
390    {    {
391      throw DataException("DataLazy GeneralTensorProduct Constructor: Error - rank of left < axisoffset");          throw DataException("DataLazy GeneralTensorProduct Constructor: Error - rank of left < axisoffset");
392    }    }
393    
394    // Adjust the shapes for transpose    // Adjust the shapes for transpose
395    DataTypes::ShapeType tmpShape0(rank0);    // pre-sizing the vectors rather    DataTypes::ShapeType tmpShape0(rank0);        // pre-sizing the vectors rather
396    DataTypes::ShapeType tmpShape1(rank1);    // than using push_back    DataTypes::ShapeType tmpShape1(rank1);        // than using push_back
397    for (int i=0; i<rank0; i++)   { tmpShape0[i]=shape0[(i+start0)%rank0]; }    for (int i=0; i<rank0; i++)   { tmpShape0[i]=shape0[(i+start0)%rank0]; }
398    for (int i=0; i<rank1; i++)   { tmpShape1[i]=shape1[(i+start1)%rank1]; }    for (int i=0; i<rank1; i++)   { tmpShape1[i]=shape1[(i+start1)%rank1]; }
399    
400    // Prepare for the loops of the product    // Prepare for the loops of the product
401    SL=1, SM=1, SR=1;    SL=1, SM=1, SR=1;
402    for (int i=0; i<rank0-axis_offset; i++)   {    for (int i=0; i<rank0-axis_offset; i++)       {
403      SL *= tmpShape0[i];      SL *= tmpShape0[i];
404    }    }
405    for (int i=rank0-axis_offset; i<rank0; i++)   {    for (int i=rank0-axis_offset; i<rank0; i++)   {
406      if (tmpShape0[i] != tmpShape1[i-(rank0-axis_offset)]) {      if (tmpShape0[i] != tmpShape1[i-(rank0-axis_offset)]) {
407        throw DataException("C_GeneralTensorProduct: Error - incompatible shapes");        throw DataException("C_GeneralTensorProduct: Error - incompatible shapes");
408      }      }
409      SM *= tmpShape0[i];      SM *= tmpShape0[i];
410    }    }
411    for (int i=axis_offset; i<rank1; i++)     {    for (int i=axis_offset; i<rank1; i++)         {
412      SR *= tmpShape1[i];      SR *= tmpShape1[i];
413    }    }
414    
415    // Define the shape of the output (rank of shape is the sum of the loop ranges below)    // Define the shape of the output (rank of shape is the sum of the loop ranges below)
416    DataTypes::ShapeType shape2(rank0+rank1-2*axis_offset);      DataTypes::ShapeType shape2(rank0+rank1-2*axis_offset);      
417    {         // block to limit the scope of out_index    {                     // block to limit the scope of out_index
418       int out_index=0;       int out_index=0;
419       for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z       for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z
420       for (int i=axis_offset; i<rank1; i++, ++out_index)   { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z       for (int i=axis_offset; i<rank1; i++, ++out_index)   { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z
# Line 394  GTPShape(DataAbstract_ptr left, DataAbst Line 430  GTPShape(DataAbstract_ptr left, DataAbst
430    return shape2;    return shape2;
431  }  }
432    
433  // determine the number of samples requires to evaluate an expression combining left and right  }       // end anonymous namespace
 // NP1OUT needs an extra buffer because we can't write the answers over the top of the input.  
 // The same goes for G_TENSORPROD  
 // It might seem that pointwise binary ops (G_BINARY) could be written over the top of the lefts.  
 // This would be true were it not for the possibility that the LHS could be a scalar which needs to be examined  
 // multiple times  
 int  
 calcBuffs(const DataLazy_ptr& left, const DataLazy_ptr& right, ES_optype op)  
 {  
    switch(getOpgroup(op))  
    {  
    case G_IDENTITY: return 1;  
    case G_BINARY: return 1+max(left->getBuffsRequired(),right->getBuffsRequired()+1);  
    case G_UNARY:  
    case G_UNARY_P: return max(left->getBuffsRequired(),1);  
    case G_NP1OUT: return 1+max(left->getBuffsRequired(),1);  
    case G_NP1OUT_P: return 1+max(left->getBuffsRequired(),1);  
    case G_TENSORPROD: return 1+max(left->getBuffsRequired(),right->getBuffsRequired()+1);  
    case G_NP1OUT_2P: return 1+max(left->getBuffsRequired(),1);  
    default:  
     throw DataException("Programmer Error - attempt to calcBuffs() for operator "+opToString(op)+".");  
    }  
 }  
   
   
 }   // end anonymous namespace  
434    
435    
436    
# Line 434  opToString(ES_optype op) Line 445  opToString(ES_optype op)
445    return ES_opstrings[op];    return ES_opstrings[op];
446  }  }
447    
 #ifdef LAZY_NODE_STORAGE  
448  void DataLazy::LazyNodeSetup()  void DataLazy::LazyNodeSetup()
449  {  {
450  #ifdef _OPENMP  #ifdef _OPENMP
# Line 451  void DataLazy::LazyNodeSetup() Line 461  void DataLazy::LazyNodeSetup()
461      m_sampleids[0]=-1;      m_sampleids[0]=-1;
462  #endif  // _OPENMP  #endif  // _OPENMP
463  }  }
 #endif   // LAZY_NODE_STORAGE  
464    
465    
466  // Creates an identity node  // Creates an identity node
467  DataLazy::DataLazy(DataAbstract_ptr p)  DataLazy::DataLazy(DataAbstract_ptr p)
468      : parent(p->getFunctionSpace(),p->getShape())          : parent(p->getFunctionSpace(),p->getShape())
469  #ifdef LAZY_NODE_STORAGE          ,m_sampleids(0),
470      ,m_sampleids(0),          m_samples(1)
     m_samples(1)  
 #endif  
471  {  {
472     if (p->isLazy())     if (p->isLazy())
473     {     {
474      // I don't want identity of Lazy.          // I don't want identity of Lazy.
475      // Question: Why would that be so bad?          // Question: Why would that be so bad?
476      // Answer: We assume that the child of ID is something we can call getVector on          // Answer: We assume that the child of ID is something we can call getVector on
477      throw DataException("Programmer error - attempt to create identity from a DataLazy.");          throw DataException("Programmer error - attempt to create identity from a DataLazy.");
478     }     }
479     else     else
480     {     {
481      p->makeLazyShared();          DataReady_ptr dr=dynamic_pointer_cast<DataReady>(p);
482      DataReady_ptr dr=dynamic_pointer_cast<DataReady>(p);          makeIdentity(dr);
     makeIdentity(dr);  
483  LAZYDEBUG(cout << "Wrapping " << dr.get() << " id=" << m_id.get() << endl;)  LAZYDEBUG(cout << "Wrapping " << dr.get() << " id=" << m_id.get() << endl;)
484     }     }
485  LAZYDEBUG(cout << "(1)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(1)Lazy created with " << m_samplesize << endl;)
486  }  }
487    
488  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op)  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op)
489      : parent(left->getFunctionSpace(),left->getShape()),          : parent(left->getFunctionSpace(),(getOpgroup(op)!=G_REDUCTION)?left->getShape():DataTypes::scalarShape),
490      m_op(op),          m_op(op),
491      m_axis_offset(0),          m_axis_offset(0),
492      m_transpose(0),          m_transpose(0),
493      m_SL(0), m_SM(0), m_SR(0)          m_SL(0), m_SM(0), m_SR(0)
494  {  {
495     if ((getOpgroup(op)!=G_UNARY) && (getOpgroup(op)!=G_NP1OUT))     if ((getOpgroup(op)!=G_UNARY) && (getOpgroup(op)!=G_NP1OUT) && (getOpgroup(op)!=G_REDUCTION))
496     {     {
497      throw DataException("Programmer error - constructor DataLazy(left, op) will only process UNARY operations.");          throw DataException("Programmer error - constructor DataLazy(left, op) will only process UNARY operations.");
498     }     }
499    
500     DataLazy_ptr lleft;     DataLazy_ptr lleft;
501     if (!left->isLazy())     if (!left->isLazy())
502     {     {
503      lleft=DataLazy_ptr(new DataLazy(left));          lleft=DataLazy_ptr(new DataLazy(left));
504     }     }
505     else     else
506     {     {
507      lleft=dynamic_pointer_cast<DataLazy>(left);          lleft=dynamic_pointer_cast<DataLazy>(left);
508     }     }
509     m_readytype=lleft->m_readytype;     m_readytype=lleft->m_readytype;
510     m_left=lleft;     m_left=lleft;
    m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point  
511     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
    m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());  
512     m_children=m_left->m_children+1;     m_children=m_left->m_children+1;
513     m_height=m_left->m_height+1;     m_height=m_left->m_height+1;
 #ifdef LAZY_NODE_STORAGE  
514     LazyNodeSetup();     LazyNodeSetup();
 #endif  
515     SIZELIMIT     SIZELIMIT
516  }  }
517    
518    
519  // In this constructor we need to consider interpolation  // In this constructor we need to consider interpolation
520  DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)  DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
521      : parent(resultFS(left,right,op), resultShape(left,right,op)),          : parent(resultFS(left,right,op), resultShape(left,right,op)),
522      m_op(op),          m_op(op),
523      m_SL(0), m_SM(0), m_SR(0)          m_SL(0), m_SM(0), m_SR(0)
524  {  {
525  LAZYDEBUG(cout << "Forming operator with " << left.get() << " " << right.get() << endl;)  LAZYDEBUG(cout << "Forming operator with " << left.get() << " " << right.get() << endl;)
526     if ((getOpgroup(op)!=G_BINARY))     if ((getOpgroup(op)!=G_BINARY))
527     {     {
528      throw DataException("Programmer error - constructor DataLazy(left, right, op) will only process BINARY operations.");          throw DataException("Programmer error - constructor DataLazy(left, right, op) will only process BINARY operations.");
529     }     }
530    
531     if (getFunctionSpace()!=left->getFunctionSpace())    // left needs to be interpolated     if (getFunctionSpace()!=left->getFunctionSpace())    // left needs to be interpolated
532     {     {
533      FunctionSpace fs=getFunctionSpace();          FunctionSpace fs=getFunctionSpace();
534      Data ltemp(left);          Data ltemp(left);
535      Data tmp(ltemp,fs);          Data tmp(ltemp,fs);
536      left=tmp.borrowDataPtr();          left=tmp.borrowDataPtr();
537     }     }
538     if (getFunctionSpace()!=right->getFunctionSpace())   // right needs to be interpolated     if (getFunctionSpace()!=right->getFunctionSpace())   // right needs to be interpolated
539     {     {
540      Data tmp(Data(right),getFunctionSpace());          Data tmp(Data(right),getFunctionSpace());
541      right=tmp.borrowDataPtr();          right=tmp.borrowDataPtr();
542  LAZYDEBUG(cout << "Right interpolation required " << right.get() << endl;)  LAZYDEBUG(cout << "Right interpolation required " << right.get() << endl;)
543     }     }
544     left->operandCheck(*right);     left->operandCheck(*right);
545    
546     if (left->isLazy())          // the children need to be DataLazy. Wrap them in IDENTITY if required     if (left->isLazy())                  // the children need to be DataLazy. Wrap them in IDENTITY if required
547     {     {
548      m_left=dynamic_pointer_cast<DataLazy>(left);          m_left=dynamic_pointer_cast<DataLazy>(left);
549  LAZYDEBUG(cout << "Left is " << m_left->toString() << endl;)  LAZYDEBUG(cout << "Left is " << m_left->toString() << endl;)
550     }     }
551     else     else
552     {     {
553      m_left=DataLazy_ptr(new DataLazy(left));          m_left=DataLazy_ptr(new DataLazy(left));
554  LAZYDEBUG(cout << "Left " << left.get() << " wrapped " << m_left->m_id.get() << endl;)  LAZYDEBUG(cout << "Left " << left.get() << " wrapped " << m_left->m_id.get() << endl;)
555     }     }
556     if (right->isLazy())     if (right->isLazy())
557     {     {
558      m_right=dynamic_pointer_cast<DataLazy>(right);          m_right=dynamic_pointer_cast<DataLazy>(right);
559  LAZYDEBUG(cout << "Right is " << m_right->toString() << endl;)  LAZYDEBUG(cout << "Right is " << m_right->toString() << endl;)
560     }     }
561     else     else
562     {     {
563      m_right=DataLazy_ptr(new DataLazy(right));          m_right=DataLazy_ptr(new DataLazy(right));
564  LAZYDEBUG(cout << "Right " << right.get() << " wrapped " << m_right->m_id.get() << endl;)  LAZYDEBUG(cout << "Right " << right.get() << " wrapped " << m_right->m_id.get() << endl;)
565     }     }
566     char lt=m_left->m_readytype;     char lt=m_left->m_readytype;
567     char rt=m_right->m_readytype;     char rt=m_right->m_readytype;
568     if (lt=='E' || rt=='E')     if (lt=='E' || rt=='E')
569     {     {
570      m_readytype='E';          m_readytype='E';
571     }     }
572     else if (lt=='T' || rt=='T')     else if (lt=='T' || rt=='T')
573     {     {
574      m_readytype='T';          m_readytype='T';
575     }     }
576     else     else
577     {     {
578      m_readytype='C';          m_readytype='C';
579     }     }
580     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
    m_maxsamplesize=max(max(m_samplesize,m_right->getMaxSampleSize()),m_left->getMaxSampleSize());    
    m_buffsRequired=calcBuffs(m_left, m_right,m_op);  
581     m_children=m_left->m_children+m_right->m_children+2;     m_children=m_left->m_children+m_right->m_children+2;
582     m_height=max(m_left->m_height,m_right->m_height)+1;     m_height=max(m_left->m_height,m_right->m_height)+1;
 #ifdef LAZY_NODE_STORAGE  
583     LazyNodeSetup();     LazyNodeSetup();
 #endif  
584     SIZELIMIT     SIZELIMIT
585  LAZYDEBUG(cout << "(3)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(3)Lazy created with " << m_samplesize << endl;)
586  }  }
587    
588  DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op, int axis_offset, int transpose)  DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op, int axis_offset, int transpose)
589      : parent(resultFS(left,right,op), GTPShape(left,right, axis_offset, transpose, m_SL,m_SM, m_SR)),          : parent(resultFS(left,right,op), GTPShape(left,right, axis_offset, transpose, m_SL,m_SM, m_SR)),
590      m_op(op),          m_op(op),
591      m_axis_offset(axis_offset),          m_axis_offset(axis_offset),
592      m_transpose(transpose)          m_transpose(transpose)
593  {  {
594     if ((getOpgroup(op)!=G_TENSORPROD))     if ((getOpgroup(op)!=G_TENSORPROD))
595     {     {
596      throw DataException("Programmer error - constructor DataLazy(left, right, op, ax, tr) will only process BINARY operations which require parameters.");          throw DataException("Programmer error - constructor DataLazy(left, right, op, ax, tr) will only process BINARY operations which require parameters.");
597     }     }
598     if ((transpose>2) || (transpose<0))     if ((transpose>2) || (transpose<0))
599     {     {
600      throw DataException("DataLazy GeneralTensorProduct constructor: Error - transpose should be 0, 1 or 2");          throw DataException("DataLazy GeneralTensorProduct constructor: Error - transpose should be 0, 1 or 2");
601     }     }
602     if (getFunctionSpace()!=left->getFunctionSpace())    // left needs to be interpolated     if (getFunctionSpace()!=left->getFunctionSpace())    // left needs to be interpolated
603     {     {
604      FunctionSpace fs=getFunctionSpace();          FunctionSpace fs=getFunctionSpace();
605      Data ltemp(left);          Data ltemp(left);
606      Data tmp(ltemp,fs);          Data tmp(ltemp,fs);
607      left=tmp.borrowDataPtr();          left=tmp.borrowDataPtr();
608     }     }
609     if (getFunctionSpace()!=right->getFunctionSpace())   // right needs to be interpolated     if (getFunctionSpace()!=right->getFunctionSpace())   // right needs to be interpolated
610     {     {
611      Data tmp(Data(right),getFunctionSpace());          Data tmp(Data(right),getFunctionSpace());
612      right=tmp.borrowDataPtr();          right=tmp.borrowDataPtr();
613     }     }
614  //    left->operandCheck(*right);  //    left->operandCheck(*right);
615    
616     if (left->isLazy())          // the children need to be DataLazy. Wrap them in IDENTITY if required     if (left->isLazy())                  // the children need to be DataLazy. Wrap them in IDENTITY if required
617     {     {
618      m_left=dynamic_pointer_cast<DataLazy>(left);          m_left=dynamic_pointer_cast<DataLazy>(left);
619     }     }
620     else     else
621     {     {
622      m_left=DataLazy_ptr(new DataLazy(left));          m_left=DataLazy_ptr(new DataLazy(left));
623     }     }
624     if (right->isLazy())     if (right->isLazy())
625     {     {
626      m_right=dynamic_pointer_cast<DataLazy>(right);          m_right=dynamic_pointer_cast<DataLazy>(right);
627     }     }
628     else     else
629     {     {
630      m_right=DataLazy_ptr(new DataLazy(right));          m_right=DataLazy_ptr(new DataLazy(right));
631     }     }
632     char lt=m_left->m_readytype;     char lt=m_left->m_readytype;
633     char rt=m_right->m_readytype;     char rt=m_right->m_readytype;
634     if (lt=='E' || rt=='E')     if (lt=='E' || rt=='E')
635     {     {
636      m_readytype='E';          m_readytype='E';
637     }     }
638     else if (lt=='T' || rt=='T')     else if (lt=='T' || rt=='T')
639     {     {
640      m_readytype='T';          m_readytype='T';
641     }     }
642     else     else
643     {     {
644      m_readytype='C';          m_readytype='C';
645     }     }
646     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
    m_maxsamplesize=max(max(m_samplesize,m_right->getMaxSampleSize()),m_left->getMaxSampleSize());    
    m_buffsRequired=calcBuffs(m_left, m_right,m_op);  
647     m_children=m_left->m_children+m_right->m_children+2;     m_children=m_left->m_children+m_right->m_children+2;
648     m_height=max(m_left->m_height,m_right->m_height)+1;     m_height=max(m_left->m_height,m_right->m_height)+1;
 #ifdef LAZY_NODE_STORAGE  
649     LazyNodeSetup();     LazyNodeSetup();
 #endif  
650     SIZELIMIT     SIZELIMIT
651  LAZYDEBUG(cout << "(4)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(4)Lazy created with " << m_samplesize << endl;)
652  }  }
653    
654    
655  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, int axis_offset)  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, int axis_offset)
656      : parent(left->getFunctionSpace(), resultShape(left,op, axis_offset)),          : parent(left->getFunctionSpace(), resultShape(left,op, axis_offset)),
657      m_op(op),          m_op(op),
658      m_axis_offset(axis_offset),          m_axis_offset(axis_offset),
659      m_transpose(0),          m_transpose(0),
660      m_tol(0)          m_tol(0)
661  {  {
662     if ((getOpgroup(op)!=G_NP1OUT_P))     if ((getOpgroup(op)!=G_NP1OUT_P))
663     {     {
664      throw DataException("Programmer error - constructor DataLazy(left, op, ax) will only process UNARY operations which require parameters.");          throw DataException("Programmer error - constructor DataLazy(left, op, ax) will only process UNARY operations which require parameters.");
665     }     }
666     DataLazy_ptr lleft;     DataLazy_ptr lleft;
667     if (!left->isLazy())     if (!left->isLazy())
668     {     {
669      lleft=DataLazy_ptr(new DataLazy(left));          lleft=DataLazy_ptr(new DataLazy(left));
670     }     }
671     else     else
672     {     {
673      lleft=dynamic_pointer_cast<DataLazy>(left);          lleft=dynamic_pointer_cast<DataLazy>(left);
674     }     }
675     m_readytype=lleft->m_readytype;     m_readytype=lleft->m_readytype;
676     m_left=lleft;     m_left=lleft;
    m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point  
677     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
    m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());  
678     m_children=m_left->m_children+1;     m_children=m_left->m_children+1;
679     m_height=m_left->m_height+1;     m_height=m_left->m_height+1;
 #ifdef LAZY_NODE_STORAGE  
680     LazyNodeSetup();     LazyNodeSetup();
 #endif  
681     SIZELIMIT     SIZELIMIT
682  LAZYDEBUG(cout << "(5)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(5)Lazy created with " << m_samplesize << endl;)
683  }  }
684    
685  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, double tol)  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, double tol)
686      : parent(left->getFunctionSpace(), left->getShape()),          : parent(left->getFunctionSpace(), left->getShape()),
687      m_op(op),          m_op(op),
688      m_axis_offset(0),          m_axis_offset(0),
689      m_transpose(0),          m_transpose(0),
690      m_tol(tol)          m_tol(tol)
691  {  {
692     if ((getOpgroup(op)!=G_UNARY_P))     if ((getOpgroup(op)!=G_UNARY_P))
693     {     {
694      throw DataException("Programmer error - constructor DataLazy(left, op, tol) will only process UNARY operations which require parameters.");          throw DataException("Programmer error - constructor DataLazy(left, op, tol) will only process UNARY operations which require parameters.");
695     }     }
696     DataLazy_ptr lleft;     DataLazy_ptr lleft;
697     if (!left->isLazy())     if (!left->isLazy())
698     {     {
699      lleft=DataLazy_ptr(new DataLazy(left));          lleft=DataLazy_ptr(new DataLazy(left));
700     }     }
701     else     else
702     {     {
703      lleft=dynamic_pointer_cast<DataLazy>(left);          lleft=dynamic_pointer_cast<DataLazy>(left);
704     }     }
705     m_readytype=lleft->m_readytype;     m_readytype=lleft->m_readytype;
706     m_left=lleft;     m_left=lleft;
    m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point  
707     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
    m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());  
708     m_children=m_left->m_children+1;     m_children=m_left->m_children+1;
709     m_height=m_left->m_height+1;     m_height=m_left->m_height+1;
 #ifdef LAZY_NODE_STORAGE  
710     LazyNodeSetup();     LazyNodeSetup();
 #endif  
711     SIZELIMIT     SIZELIMIT
712  LAZYDEBUG(cout << "(6)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(6)Lazy created with " << m_samplesize << endl;)
713  }  }
714    
715    
716  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, const int axis0, const int axis1)  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, const int axis0, const int axis1)
717      : parent(left->getFunctionSpace(), SwapShape(left,axis0,axis1)),          : parent(left->getFunctionSpace(), SwapShape(left,axis0,axis1)),
718      m_op(op),          m_op(op),
719      m_axis_offset(axis0),          m_axis_offset(axis0),
720      m_transpose(axis1),          m_transpose(axis1),
721      m_tol(0)          m_tol(0)
722  {  {
723     if ((getOpgroup(op)!=G_NP1OUT_2P))     if ((getOpgroup(op)!=G_NP1OUT_2P))
724     {     {
725      throw DataException("Programmer error - constructor DataLazy(left, op, tol) will only process UNARY operations which require two integer parameters.");          throw DataException("Programmer error - constructor DataLazy(left, op, tol) will only process UNARY operations which require two integer parameters.");
726     }     }
727     DataLazy_ptr lleft;     DataLazy_ptr lleft;
728     if (!left->isLazy())     if (!left->isLazy())
729     {     {
730      lleft=DataLazy_ptr(new DataLazy(left));          lleft=DataLazy_ptr(new DataLazy(left));
731     }     }
732     else     else
733     {     {
734      lleft=dynamic_pointer_cast<DataLazy>(left);          lleft=dynamic_pointer_cast<DataLazy>(left);
735     }     }
736     m_readytype=lleft->m_readytype;     m_readytype=lleft->m_readytype;
737     m_left=lleft;     m_left=lleft;
    m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point  
738     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
    m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());  
739     m_children=m_left->m_children+1;     m_children=m_left->m_children+1;
740     m_height=m_left->m_height+1;     m_height=m_left->m_height+1;
 #ifdef LAZY_NODE_STORAGE  
741     LazyNodeSetup();     LazyNodeSetup();
 #endif  
742     SIZELIMIT     SIZELIMIT
743  LAZYDEBUG(cout << "(7)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(7)Lazy created with " << m_samplesize << endl;)
744  }  }
745    
746  DataLazy::~DataLazy()  
747    namespace
748  {  {
 #ifdef LAZY_NODE_SETUP  
    delete[] m_sampleids;  
    delete[] m_samples;  
 #endif  
 }  
749    
750        inline int max3(int a, int b, int c)
751        {
752            int t=(a>b?a:b);
753            return (t>c?t:c);
754    
755  int      }
 DataLazy::getBuffsRequired() const  
 {  
     return m_buffsRequired;  
756  }  }
757    
758    DataLazy::DataLazy(DataAbstract_ptr mask, DataAbstract_ptr left, DataAbstract_ptr right/*, double tol*/)
759  size_t          : parent(left->getFunctionSpace(), left->getShape()),
760  DataLazy::getMaxSampleSize() const          m_op(CONDEVAL),
761            m_axis_offset(0),
762            m_transpose(0),
763            m_tol(0)
764  {  {
765      return m_maxsamplesize;  
766       DataLazy_ptr lmask;
767       DataLazy_ptr lleft;
768       DataLazy_ptr lright;
769       if (!mask->isLazy())
770       {
771            lmask=DataLazy_ptr(new DataLazy(mask));
772       }
773       else
774       {
775            lmask=dynamic_pointer_cast<DataLazy>(mask);
776       }
777       if (!left->isLazy())
778       {
779            lleft=DataLazy_ptr(new DataLazy(left));
780       }
781       else
782       {
783            lleft=dynamic_pointer_cast<DataLazy>(left);
784       }
785       if (!right->isLazy())
786       {
787            lright=DataLazy_ptr(new DataLazy(right));
788       }
789       else
790       {
791            lright=dynamic_pointer_cast<DataLazy>(right);
792       }
793       m_readytype=lmask->m_readytype;
794       if ((lleft->m_readytype!=lright->m_readytype) || (lmask->m_readytype!=lleft->m_readytype))
795       {
796            throw DataException("Programmer Error - condEval arguments must have the same readytype");
797       }
798       m_left=lleft;
799       m_right=lright;
800       m_mask=lmask;
801       m_samplesize=getNumDPPSample()*getNoValues();
802       m_children=m_left->m_children+m_right->m_children+m_mask->m_children+1;
803       m_height=max3(m_left->m_height,m_right->m_height,m_mask->m_height)+1;
804       LazyNodeSetup();
805       SIZELIMIT
806    LAZYDEBUG(cout << "(8)Lazy created with " << m_samplesize << endl;)
807  }  }
808    
809    
810    
811  size_t  DataLazy::~DataLazy()
 DataLazy::getSampleBufferSize() const  
812  {  {
813      return m_maxsamplesize*(max(1,m_buffsRequired));     delete[] m_sampleids;
814  }  }
815    
816    
817  /*  /*
818    \brief Evaluates the expression using methods on Data.    \brief Evaluates the expression using methods on Data.
819    This does the work for the collapse method.    This does the work for the collapse method.
820    For reasons of efficiency do not call this method on DataExpanded nodes.    For reasons of efficiency do not call this method on DataExpanded nodes.
821  */  */
822  DataReady_ptr  DataReady_ptr
823  DataLazy::collapseToReady()  DataLazy::collapseToReady() const
824  {  {
825    if (m_readytype=='E')    if (m_readytype=='E')
826    { // this is more an efficiency concern than anything else    {     // this is more an efficiency concern than anything else
827      throw DataException("Programmer Error - do not use collapse on Expanded data.");      throw DataException("Programmer Error - do not use collapse on Expanded data.");
828    }    }
829    if (m_op==IDENTITY)    if (m_op==IDENTITY)
# Line 818  DataLazy::collapseToReady() Line 841  DataLazy::collapseToReady()
841    switch(m_op)    switch(m_op)
842    {    {
843      case ADD:      case ADD:
844      result=left+right;          result=left+right;
845      break;          break;
846      case SUB:            case SUB:          
847      result=left-right;          result=left-right;
848      break;          break;
849      case MUL:            case MUL:          
850      result=left*right;          result=left*right;
851      break;          break;
852      case DIV:            case DIV:          
853      result=left/right;          result=left/right;
854      break;          break;
855      case SIN:      case SIN:
856      result=left.sin();            result=left.sin();      
857      break;          break;
858      case COS:      case COS:
859      result=left.cos();          result=left.cos();
860      break;          break;
861      case TAN:      case TAN:
862      result=left.tan();          result=left.tan();
863      break;          break;
864      case ASIN:      case ASIN:
865      result=left.asin();          result=left.asin();
866      break;          break;
867      case ACOS:      case ACOS:
868      result=left.acos();          result=left.acos();
869      break;          break;
870      case ATAN:      case ATAN:
871      result=left.atan();          result=left.atan();
872      break;          break;
873      case SINH:      case SINH:
874      result=left.sinh();          result=left.sinh();
875      break;          break;
876      case COSH:      case COSH:
877      result=left.cosh();          result=left.cosh();
878      break;          break;
879      case TANH:      case TANH:
880      result=left.tanh();          result=left.tanh();
881      break;          break;
882      case ERF:      case ERF:
883      result=left.erf();          result=left.erf();
884      break;          break;
885     case ASINH:     case ASINH:
886      result=left.asinh();          result=left.asinh();
887      break;          break;
888     case ACOSH:     case ACOSH:
889      result=left.acosh();          result=left.acosh();
890      break;          break;
891     case ATANH:     case ATANH:
892      result=left.atanh();          result=left.atanh();
893      break;          break;
894      case LOG10:      case LOG10:
895      result=left.log10();          result=left.log10();
896      break;          break;
897      case LOG:      case LOG:
898      result=left.log();          result=left.log();
899      break;          break;
900      case SIGN:      case SIGN:
901      result=left.sign();          result=left.sign();
902      break;          break;
903      case ABS:      case ABS:
904      result=left.abs();          result=left.abs();
905      break;          break;
906      case NEG:      case NEG:
907      result=left.neg();          result=left.neg();
908      break;          break;
909      case POS:      case POS:
910      // it doesn't mean anything for delayed.          // it doesn't mean anything for delayed.
911      // it will just trigger a deep copy of the lazy object          // it will just trigger a deep copy of the lazy object
912      throw DataException("Programmer error - POS not supported for lazy data.");          throw DataException("Programmer error - POS not supported for lazy data.");
913      break;          break;
914      case EXP:      case EXP:
915      result=left.exp();          result=left.exp();
916      break;          break;
917      case SQRT:      case SQRT:
918      result=left.sqrt();          result=left.sqrt();
919      break;          break;
920      case RECIP:      case RECIP:
921      result=left.oneOver();          result=left.oneOver();
922      break;          break;
923      case GZ:      case GZ:
924      result=left.wherePositive();          result=left.wherePositive();
925      break;          break;
926      case LZ:      case LZ:
927      result=left.whereNegative();          result=left.whereNegative();
928      break;          break;
929      case GEZ:      case GEZ:
930      result=left.whereNonNegative();          result=left.whereNonNegative();
931      break;          break;
932      case LEZ:      case LEZ:
933      result=left.whereNonPositive();          result=left.whereNonPositive();
934      break;          break;
935      case NEZ:      case NEZ:
936      result=left.whereNonZero(m_tol);          result=left.whereNonZero(m_tol);
937      break;          break;
938      case EZ:      case EZ:
939      result=left.whereZero(m_tol);          result=left.whereZero(m_tol);
940      break;          break;
941      case SYM:      case SYM:
942      result=left.symmetric();          result=left.symmetric();
943      break;          break;
944      case NSYM:      case NSYM:
945      result=left.nonsymmetric();          result=left.antisymmetric();
946      break;          break;
947      case PROD:      case PROD:
948      result=C_GeneralTensorProduct(left,right,m_axis_offset, m_transpose);          result=C_GeneralTensorProduct(left,right,m_axis_offset, m_transpose);
949      break;          break;
950      case TRANS:      case TRANS:
951      result=left.transpose(m_axis_offset);          result=left.transpose(m_axis_offset);
952      break;          break;
953      case TRACE:      case TRACE:
954      result=left.trace(m_axis_offset);          result=left.trace(m_axis_offset);
955      break;          break;
956      case SWAP:      case SWAP:
957      result=left.swapaxes(m_axis_offset, m_transpose);          result=left.swapaxes(m_axis_offset, m_transpose);
958      break;          break;
959        case MINVAL:
960            result=left.minval();
961            break;
962        case MAXVAL:
963            result=left.minval();
964            break;
965      default:      default:
966      throw DataException("Programmer error - collapseToReady does not know how to resolve operator "+opToString(m_op)+".");          throw DataException("Programmer error - collapseToReady does not know how to resolve operator "+opToString(m_op)+".");
967    }    }
968    return result.borrowReadyPtr();    return result.borrowReadyPtr();
969  }  }
# Line 946  DataLazy::collapseToReady() Line 975  DataLazy::collapseToReady()
975     the purpose of using DataLazy in the first place).     the purpose of using DataLazy in the first place).
976  */  */
977  void  void
978  DataLazy::collapse()  DataLazy::collapse() const
979  {  {
980    if (m_op==IDENTITY)    if (m_op==IDENTITY)
981    {    {
982      return;          return;
983    }    }
984    if (m_readytype=='E')    if (m_readytype=='E')
985    { // this is more an efficiency concern than anything else    {     // this is more an efficiency concern than anything else
986      throw DataException("Programmer Error - do not use collapse on Expanded data.");      throw DataException("Programmer Error - do not use collapse on Expanded data.");
987    }    }
988    m_id=collapseToReady();    m_id=collapseToReady();
989    m_op=IDENTITY;    m_op=IDENTITY;
990  }  }
991    
 /*  
   \brief Compute the value of the expression (unary operation) for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
 DataTypes::ValueType*  
 DataLazy::resolveUnary(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const  
 {  
     // we assume that any collapsing has been done before we get here  
     // since we only have one argument we don't need to think about only  
     // processing single points.  
   if (m_readytype!='E')  
   {  
     throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");  
   }  
   const ValueType* vleft=m_left->resolveSample(v,offset,sampleNo,roffset);  
   const double* left=&((*vleft)[roffset]);  
   double* result=&(v[offset]);  
   roffset=offset;  
   switch (m_op)  
   {  
     case SIN:    
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sin);  
     break;  
     case COS:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::cos);  
     break;  
     case TAN:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::tan);  
     break;  
     case ASIN:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::asin);  
     break;  
     case ACOS:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::acos);  
     break;  
     case ATAN:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::atan);  
     break;  
     case SINH:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sinh);  
     break;  
     case COSH:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::cosh);  
     break;  
     case TANH:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::tanh);  
     break;  
     case ERF:  
 #if defined (_WIN32) && !defined(__INTEL_COMPILER)  
     throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::erf);  
     break;  
 #endif  
    case ASINH:  
 #if defined (_WIN32) && !defined(__INTEL_COMPILER)  
     tensor_unary_operation(m_samplesize, left, result, escript::asinh_substitute);  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::asinh);  
 #endif    
     break;  
    case ACOSH:  
 #if defined (_WIN32) && !defined(__INTEL_COMPILER)  
     tensor_unary_operation(m_samplesize, left, result, escript::acosh_substitute);  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::acosh);  
 #endif    
     break;  
    case ATANH:  
 #if defined (_WIN32) && !defined(__INTEL_COMPILER)  
     tensor_unary_operation(m_samplesize, left, result, escript::atanh_substitute);  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::atanh);  
 #endif    
     break;  
     case LOG10:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::log10);  
     break;  
     case LOG:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::log);  
     break;  
     case SIGN:  
     tensor_unary_operation(m_samplesize, left, result, escript::fsign);  
     break;  
     case ABS:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::fabs);  
     break;  
     case NEG:  
     tensor_unary_operation(m_samplesize, left, result, negate<double>());  
     break;  
     case POS:  
     // it doesn't mean anything for delayed.  
     // it will just trigger a deep copy of the lazy object  
     throw DataException("Programmer error - POS not supported for lazy data.");  
     break;  
     case EXP:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::exp);  
     break;  
     case SQRT:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sqrt);  
     break;  
     case RECIP:  
     tensor_unary_operation(m_samplesize, left, result, bind1st(divides<double>(),1.));  
     break;  
     case GZ:  
     tensor_unary_operation(m_samplesize, left, result, bind2nd(greater<double>(),0.0));  
     break;  
     case LZ:  
     tensor_unary_operation(m_samplesize, left, result, bind2nd(less<double>(),0.0));  
     break;  
     case GEZ:  
     tensor_unary_operation(m_samplesize, left, result, bind2nd(greater_equal<double>(),0.0));  
     break;  
     case LEZ:  
     tensor_unary_operation(m_samplesize, left, result, bind2nd(less_equal<double>(),0.0));  
     break;  
 // There are actually G_UNARY_P but I don't see a compelling reason to treat them differently  
     case NEZ:  
     tensor_unary_operation(m_samplesize, left, result, bind2nd(AbsGT(),m_tol));  
     break;  
     case EZ:  
     tensor_unary_operation(m_samplesize, left, result, bind2nd(AbsLTE(),m_tol));  
     break;  
   
     default:  
     throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");  
   }  
   return &v;  
 }  
   
   
   
   
   
   
 /*  
   \brief Compute the value of the expression (unary operation) for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
 DataTypes::ValueType*  
 DataLazy::resolveNP1OUT(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const  
 {  
     // we assume that any collapsing has been done before we get here  
     // since we only have one argument we don't need to think about only  
     // processing single points.  
   if (m_readytype!='E')  
   {  
     throw DataException("Programmer error - resolveNP1OUT should only be called on expanded Data.");  
   }  
     // since we can't write the result over the input, we need a result offset further along  
   size_t subroffset=roffset+m_samplesize;  
 LAZYDEBUG(cerr << "subroffset=" << subroffset << endl;)  
   const ValueType* vleft=m_left->resolveSample(v,offset+m_left->m_samplesize,sampleNo,subroffset);  
   roffset=offset;  
   size_t loop=0;  
   size_t numsteps=(m_readytype=='E')?getNumDPPSample():1;  
   size_t step=getNoValues();  
   switch (m_op)  
   {  
     case SYM:  
     for (loop=0;loop<numsteps;++loop)  
     {  
         DataMaths::symmetric(*vleft,m_left->getShape(),subroffset, v, getShape(), offset);  
         subroffset+=step;  
         offset+=step;  
     }  
     break;  
     case NSYM:  
     for (loop=0;loop<numsteps;++loop)  
     {  
         DataMaths::nonsymmetric(*vleft,m_left->getShape(),subroffset, v, getShape(), offset);  
         subroffset+=step;  
         offset+=step;  
     }  
     break;  
     default:  
     throw DataException("Programmer error - resolveNP1OUT can not resolve operator "+opToString(m_op)+".");  
   }  
   return &v;  
 }  
   
 /*  
   \brief Compute the value of the expression (unary operation) for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
 DataTypes::ValueType*  
 DataLazy::resolveNP1OUT_P(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const  
 {  
     // we assume that any collapsing has been done before we get here  
     // since we only have one argument we don't need to think about only  
     // processing single points.  
   if (m_readytype!='E')  
   {  
     throw DataException("Programmer error - resolveNP1OUT_P should only be called on expanded Data.");  
   }  
     // since we can't write the result over the input, we need a result offset further along  
   size_t subroffset;  
   const ValueType* vleft=m_left->resolveSample(v,offset+m_left->m_samplesize,sampleNo,subroffset);  
 LAZYDEBUG(cerr << "srcsamplesize=" << offset+m_left->m_samplesize << " beg=" << subroffset << endl;)  
 LAZYDEBUG(cerr << "Offset for 5800=" << getPointOffset(5800/getNumDPPSample(),5800%getNumDPPSample()) << endl;)  
   roffset=offset;  
   size_t loop=0;  
   size_t numsteps=(m_readytype=='E')?getNumDPPSample():1;  
   size_t outstep=getNoValues();  
   size_t instep=m_left->getNoValues();  
 LAZYDEBUG(cerr << "instep=" << instep << " outstep=" << outstep<< " numsteps=" << numsteps << endl;)  
   switch (m_op)  
   {  
     case TRACE:  
     for (loop=0;loop<numsteps;++loop)  
     {  
 size_t zz=sampleNo*getNumDPPSample()+loop;  
 if (zz==5800)  
 {  
 LAZYDEBUG(cerr << "point=" <<  zz<< endl;)  
 LAZYDEBUG(cerr << "Input to  trace=" << DataTypes::pointToString(*vleft,m_left->getShape(),subroffset,"") << endl;)  
 LAZYDEBUG(cerr << "Offset for point=" << getPointOffset(5800/getNumDPPSample(),5800%getNumDPPSample()) << " vs ";)  
 LAZYDEBUG(cerr << subroffset << endl;)  
 LAZYDEBUG(cerr << "output=" << offset << endl;)  
 }  
             DataMaths::trace(*vleft,m_left->getShape(),subroffset, v ,getShape(),offset,m_axis_offset);  
 if (zz==5800)  
 {  
 LAZYDEBUG(cerr << "Result of trace=" << DataTypes::pointToString(v,getShape(),offset,"") << endl;)  
 }  
         subroffset+=instep;  
         offset+=outstep;  
     }  
     break;  
     case TRANS:  
     for (loop=0;loop<numsteps;++loop)  
     {  
             DataMaths::transpose(*vleft,m_left->getShape(),subroffset, v,getShape(),offset,m_axis_offset);  
         subroffset+=instep;  
         offset+=outstep;  
     }  
     break;  
     default:  
     throw DataException("Programmer error - resolveNP1OUTP can not resolve operator "+opToString(m_op)+".");  
   }  
   return &v;  
 }  
   
   
 /*  
   \brief Compute the value of the expression (unary operation with int params) for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
 DataTypes::ValueType*  
 DataLazy::resolveNP1OUT_2P(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const  
 {  
     // we assume that any collapsing has been done before we get here  
     // since we only have one argument we don't need to think about only  
     // processing single points.  
   if (m_readytype!='E')  
   {  
     throw DataException("Programmer error - resolveNP1OUT_2P should only be called on expanded Data.");  
   }  
     // since we can't write the result over the input, we need a result offset further along  
   size_t subroffset;  
   const ValueType* vleft=m_left->resolveSample(v,offset+m_left->m_samplesize,sampleNo,subroffset);  
 LAZYDEBUG(cerr << "srcsamplesize=" << offset+m_left->m_samplesize << " beg=" << subroffset << endl;)  
 LAZYDEBUG(cerr << "Offset for 5800=" << getPointOffset(5800/getNumDPPSample(),5800%getNumDPPSample()) << endl;)  
   roffset=offset;  
   size_t loop=0;  
   size_t numsteps=(m_readytype=='E')?getNumDPPSample():1;  
   size_t outstep=getNoValues();  
   size_t instep=m_left->getNoValues();  
 LAZYDEBUG(cerr << "instep=" << instep << " outstep=" << outstep<< " numsteps=" << numsteps << endl;)  
   switch (m_op)  
   {  
     case SWAP:  
     for (loop=0;loop<numsteps;++loop)  
     {  
             DataMaths::swapaxes(*vleft,m_left->getShape(),subroffset, v,getShape(),offset,m_axis_offset, m_transpose);  
         subroffset+=instep;  
         offset+=outstep;  
     }  
     break;  
     default:  
     throw DataException("Programmer error - resolveNP1OUT2P can not resolve operator "+opToString(m_op)+".");  
   }  
   return &v;  
 }  
   
   
   
 #define PROC_OP(TYPE,X)                               \  
     for (int j=0;j<onumsteps;++j)\  
     {\  
       for (int i=0;i<numsteps;++i,resultp+=resultStep) \  
       { \  
 LAZYDEBUG(cout << "[left,right]=[" << lroffset << "," << rroffset << "]" << endl;)\  
 LAZYDEBUG(cout << "{left,right}={" << (*left)[lroffset] << "," << (*right)[rroffset] << "}\n";)\  
          tensor_binary_operation< TYPE >(chunksize, &((*left)[lroffset]), &((*right)[rroffset]), resultp, X); \  
 LAZYDEBUG(cout << " result=      " << resultp[0] << endl;) \  
          lroffset+=leftstep; \  
          rroffset+=rightstep; \  
       }\  
       lroffset+=oleftstep;\  
       rroffset+=orightstep;\  
     }  
   
 /*  
   \brief Compute the value of the expression (binary operation) for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
 // This method assumes that any subexpressions which evaluate to Constant or Tagged Data  
 // have already been collapsed to IDENTITY. So we must have at least one expanded child.  
 // If both children are expanded, then we can process them in a single operation (we treat  
 // the whole sample as one big datapoint.  
 // If one of the children is not expanded, then we need to treat each point in the sample  
 // individually.  
 // There is an additional complication when scalar operations are considered.  
 // For example, 2+Vector.  
 // In this case each double within the point is treated individually  
 DataTypes::ValueType*  
 DataLazy::resolveBinary(ValueType& v,  size_t offset, int sampleNo, size_t& roffset) const  
 {  
 LAZYDEBUG(cout << "Resolve binary: " << toString() << endl;)  
   
   size_t lroffset=0, rroffset=0;    // offsets in the left and right result vectors  
     // first work out which of the children are expanded  
   bool leftExp=(m_left->m_readytype=='E');  
   bool rightExp=(m_right->m_readytype=='E');  
   if (!leftExp && !rightExp)  
   {  
     throw DataException("Programmer Error - please use collapse if neither argument has type 'E'.");  
   }  
   bool leftScalar=(m_left->getRank()==0);  
   bool rightScalar=(m_right->getRank()==0);  
   if ((m_left->getRank()!=m_right->getRank()) && (!leftScalar && !rightScalar))  
   {  
     throw DataException("resolveBinary - ranks of arguments must match unless one of them is scalar.");  
   }  
   size_t leftsize=m_left->getNoValues();  
   size_t rightsize=m_right->getNoValues();  
   size_t chunksize=1;           // how many doubles will be processed in one go  
   int leftstep=0;       // how far should the left offset advance after each step  
   int rightstep=0;  
   int numsteps=0;       // total number of steps for the inner loop  
   int oleftstep=0;  // the o variables refer to the outer loop  
   int orightstep=0; // The outer loop is only required in cases where there is an extended scalar  
   int onumsteps=1;  
     
   bool LES=(leftExp && leftScalar); // Left is an expanded scalar  
   bool RES=(rightExp && rightScalar);  
   bool LS=(!leftExp && leftScalar); // left is a single scalar  
   bool RS=(!rightExp && rightScalar);  
   bool LN=(!leftExp && !leftScalar);    // left is a single non-scalar  
   bool RN=(!rightExp && !rightScalar);  
   bool LEN=(leftExp && !leftScalar);    // left is an expanded non-scalar  
   bool REN=(rightExp && !rightScalar);  
   
   if ((LES && RES) || (LEN && REN)) // both are Expanded scalars or both are expanded non-scalars  
   {  
     chunksize=m_left->getNumDPPSample()*leftsize;  
     leftstep=0;  
     rightstep=0;  
     numsteps=1;  
   }  
   else if (LES || RES)  
   {  
     chunksize=1;  
     if (LES)        // left is an expanded scalar  
     {  
         if (RS)  
         {  
            leftstep=1;  
            rightstep=0;  
            numsteps=m_left->getNumDPPSample();  
         }  
         else        // RN or REN  
         {  
            leftstep=0;  
            oleftstep=1;  
            rightstep=1;  
            orightstep=(RN ? -(int)rightsize : 0);  
            numsteps=rightsize;  
            onumsteps=m_left->getNumDPPSample();  
         }  
     }  
     else        // right is an expanded scalar  
     {  
         if (LS)  
         {  
            rightstep=1;  
            leftstep=0;  
            numsteps=m_right->getNumDPPSample();  
         }  
         else  
         {  
            rightstep=0;  
            orightstep=1;  
            leftstep=1;  
            oleftstep=(LN ? -(int)leftsize : 0);  
            numsteps=leftsize;  
            onumsteps=m_right->getNumDPPSample();  
         }  
     }  
   }  
   else  // this leaves (LEN, RS), (LEN, RN) and their transposes  
   {  
     if (LEN)    // and Right will be a single value  
     {  
         chunksize=rightsize;  
         leftstep=rightsize;  
         rightstep=0;  
         numsteps=m_left->getNumDPPSample();  
         if (RS)  
         {  
            numsteps*=leftsize;  
         }  
     }  
     else    // REN  
     {  
         chunksize=leftsize;  
         rightstep=leftsize;  
         leftstep=0;  
         numsteps=m_right->getNumDPPSample();  
         if (LS)  
         {  
            numsteps*=rightsize;  
         }  
     }  
   }  
   
   int resultStep=max(leftstep,rightstep);   // only one (at most) should be !=0  
     // Get the values of sub-expressions  
   const ValueType* left=m_left->resolveSample(v,offset+getMaxSampleSize(),sampleNo,lroffset);   // see note on  
     // calcBufss for why we can't put offset as the 2nd param above  
   const ValueType* right=m_right->resolveSample(v,offset+2*getMaxSampleSize(),sampleNo,rroffset); // Note  
     // the right child starts further along.  
 LAZYDEBUG(cout << "Post sub calls in " << toString() << endl;)  
 LAZYDEBUG(cout << "shapes=" << DataTypes::shapeToString(m_left->getShape()) << "," << DataTypes::shapeToString(m_right->getShape()) << endl;)  
 LAZYDEBUG(cout << "chunksize=" << chunksize << endl << "leftstep=" << leftstep << " rightstep=" << rightstep;)  
 LAZYDEBUG(cout << " numsteps=" << numsteps << endl << "oleftstep=" << oleftstep << " orightstep=" << orightstep;)  
 LAZYDEBUG(cout << "onumsteps=" << onumsteps << endl;)  
 LAZYDEBUG(cout << " DPPS=" << m_left->getNumDPPSample() << "," <<m_right->getNumDPPSample() << endl;)  
 LAZYDEBUG(cout << "" << LS << RS << LN << RN << LES << RES <<LEN << REN <<   endl;)  
   
   
   double* resultp=&(v[offset]);     // results are stored at the vector offset we recieved  
   switch(m_op)  
   {  
     case ADD:  
         PROC_OP(NO_ARG,plus<double>());  
     break;  
     case SUB:  
     PROC_OP(NO_ARG,minus<double>());  
     break;  
     case MUL:  
     PROC_OP(NO_ARG,multiplies<double>());  
     break;  
     case DIV:  
     PROC_OP(NO_ARG,divides<double>());  
     break;  
     case POW:  
        PROC_OP(double (double,double),::pow);  
     break;  
     default:  
     throw DataException("Programmer error - resolveBinary can not resolve operator "+opToString(m_op)+".");  
   }  
   roffset=offset;  
   return &v;  
 }  
   
   
   
 /*  
   \brief Compute the value of the expression (tensor product) for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
 // This method assumes that any subexpressions which evaluate to Constant or Tagged Data  
 // have already been collapsed to IDENTITY. So we must have at least one expanded child.  
 // unlike the other resolve helpers, we must treat these datapoints separately.  
 DataTypes::ValueType*  
 DataLazy::resolveTProd(ValueType& v,  size_t offset, int sampleNo, size_t& roffset) const  
 {  
 LAZYDEBUG(cout << "Resolve TensorProduct: " << toString()  << " to offset " << offset<< endl;)  
   
   size_t lroffset=0, rroffset=0;    // offsets in the left and right result vectors  
     // first work out which of the children are expanded  
   bool leftExp=(m_left->m_readytype=='E');  
   bool rightExp=(m_right->m_readytype=='E');  
   int steps=getNumDPPSample();  
 /*  int leftStep=((leftExp && !rightExp)? m_right->getNoValues() : 0);  
   int rightStep=((rightExp && !leftExp)? m_left->getNoValues() : 0);*/  
   int leftStep=(leftExp? m_left->getNoValues() : 0);        // do not have scalars as input to this method  
   int rightStep=(rightExp?m_right->getNoValues() : 0);  
   
   int resultStep=getNoValues();  
     // Get the values of sub-expressions (leave a gap of one sample for the result).  
   int gap=offset+m_samplesize;    
   
 LAZYDEBUG(cout << "Query left with offset=" << gap << endl;)  
   
   const ValueType* left=m_left->resolveSample(v,gap,sampleNo,lroffset);  
   gap+=m_left->getMaxSampleSize();  
   
   
 LAZYDEBUG(cout << "Query right with offset=" << gap << endl;)  
   
   
   const ValueType* right=m_right->resolveSample(v,gap,sampleNo,rroffset);  
   
 LAZYDEBUG(cerr << "[Left shape]=" << DataTypes::shapeToString(m_left->getShape()) << "\n[Right shape]=" << DataTypes::shapeToString(m_right->getShape()) << " result=" <<DataTypes::shapeToString(getShape()) <<  endl;  
 cout << getNoValues() << endl;)  
 LAZYDEBUG(cerr << "Result of left=";)  
 LAZYDEBUG(cerr << "[" << lroffset << " .. " << lroffset+m_left->getNoValues() << "]" << endl;  
   
 for (int i=lroffset, limit=lroffset+(leftExp?m_left->getNoValues()*m_left->getNumDPPSample():m_left->getNoValues());i<limit;++i)  
 {  
 cout << "[" << setw(2) << i-lroffset << "] " << setw(10) << (*left)[i] << " ";  
 if (i%4==0) cout << endl;  
 })  
 LAZYDEBUG(cerr << "\nResult of right=" << endl;)  
 LAZYDEBUG(  
 for (int i=rroffset, limit=rroffset+(rightExp?m_right->getNoValues()*m_right->getNumDPPSample():m_right->getNoValues());i<limit;++i)  
 {  
 cerr << "[" <<  setw(2)<< i-rroffset << "] " << setw(10) << (*right)[i] << " ";  
 if (i%4==0) cout << endl;  
 }  
 cerr << endl;  
 )  
 LAZYDEBUG(cerr << "Post sub calls: " << toString() << endl;)  
 LAZYDEBUG(cout << "LeftExp=" << leftExp << " rightExp=" << rightExp << endl;)  
 LAZYDEBUG(cout << "LeftR=" << m_left->getRank() << " rightExp=" << m_right->getRank() << endl;)  
 LAZYDEBUG(cout << "LeftSize=" << m_left->getNoValues() << " RightSize=" << m_right->getNoValues() << endl;)  
 LAZYDEBUG(cout << "m_samplesize=" << m_samplesize << endl;)  
 LAZYDEBUG(cout << "outputshape=" << DataTypes::shapeToString(getShape()) << endl;)  
 LAZYDEBUG(cout << "DPPS=" << m_right->getNumDPPSample() <<"."<<endl;)  
   
   double* resultp=&(v[offset]);     // results are stored at the vector offset we recieved  
   switch(m_op)  
   {  
     case PROD:  
     for (int i=0;i<steps;++i,resultp+=resultStep)  
     {  
   
 LAZYDEBUG(cout << "lroffset=" << lroffset << "rroffset=" << rroffset << endl;)  
 LAZYDEBUG(cout << "l*=" << left << " r*=" << right << endl;)  
 LAZYDEBUG(cout << "m_SL=" << m_SL << " m_SM=" << m_SM << " m_SR=" << m_SR << endl;)  
   
           const double *ptr_0 = &((*left)[lroffset]);  
           const double *ptr_1 = &((*right)[rroffset]);  
   
 LAZYDEBUG(cout << DataTypes::pointToString(*left, m_left->getShape(),lroffset,"LEFT") << endl;)  
 LAZYDEBUG(cout << DataTypes::pointToString(*right,m_right->getShape(),rroffset, "RIGHT") << endl;)  
   
           matrix_matrix_product(m_SL, m_SM, m_SR, ptr_0, ptr_1, resultp, m_transpose);  
   
 LAZYDEBUG(cout << "Results--\n";  
 {  
   DataVector dv(getNoValues());  
 for (int z=0;z<getNoValues();++z)  
 {  
   cout << "[" << setw(2) << z<< "] " << setw(10) << resultp[z] << " ";  
   if (z%4==0) cout << endl;  
   dv[z]=resultp[z];  
 }  
 cout << endl << DataTypes::pointToString(dv,getShape(),0,"RESLT");  
 cout << "\nWritten to: " << resultp << " resultStep=" << resultStep << endl;  
 }  
 )  
       lroffset+=leftStep;  
       rroffset+=rightStep;  
     }  
     break;  
     default:  
     throw DataException("Programmer error - resolveTProduct can not resolve operator "+opToString(m_op)+".");  
   }  
   roffset=offset;  
   return &v;  
 }  
   
   
 #ifdef LAZY_NODE_STORAGE  
   
992  // The result will be stored in m_samples  // The result will be stored in m_samples
993  // The return value is a pointer to the DataVector, offset is the offset within the return value  // The return value is a pointer to the DataVector, offset is the offset within the return value
994  const DataTypes::ValueType*  const DataTypes::RealVectorType*
995  DataLazy::resolveNodeSample(int tid, int sampleNo, size_t& roffset)  DataLazy::resolveNodeSample(int tid, int sampleNo, size_t& roffset) const
996  {  {
997  LAZYDEBUG(cout << "Resolve sample " << toString() << endl;)  LAZYDEBUG(cout << "Resolve sample " << toString() << endl;)
998      // collapse so we have a 'E' node or an IDENTITY for some other type          // collapse so we have a 'E' node or an IDENTITY for some other type
999    if (m_readytype!='E' && m_op!=IDENTITY)    if (m_readytype!='E' && m_op!=IDENTITY)
1000    {    {
1001      collapse();          collapse();
1002    }    }
1003    if (m_op==IDENTITY)      if (m_op==IDENTITY)  
1004    {    {
1005      const ValueType& vec=m_id->getVectorRO();      const RealVectorType& vec=m_id->getVectorRO();
 //     if (m_readytype=='C')  
 //     {  
 //  roffset=0;      // all samples read from the same position  
 //  return &(m_samples);  
 //     }  
1006      roffset=m_id->getPointOffset(sampleNo, 0);      roffset=m_id->getPointOffset(sampleNo, 0);
1007    #ifdef LAZY_STACK_PROF
1008    int x;
1009    if (&x<stackend[omp_get_thread_num()])
1010    {
1011           stackend[omp_get_thread_num()]=&x;
1012    }
1013    #endif
1014      return &(vec);      return &(vec);
1015    }    }
1016    if (m_readytype!='E')    if (m_readytype!='E')
# Line 1615  LAZYDEBUG(cout << "Resolve sample " << t Line 1019  LAZYDEBUG(cout << "Resolve sample " << t
1019    }    }
1020    if (m_sampleids[tid]==sampleNo)    if (m_sampleids[tid]==sampleNo)
1021    {    {
1022      roffset=tid*m_samplesize;          roffset=tid*m_samplesize;
1023      return &(m_samples);        // sample is already resolved          return &(m_samples);            // sample is already resolved
1024    }    }
1025    m_sampleids[tid]=sampleNo;    m_sampleids[tid]=sampleNo;
1026    
1027    switch (getOpgroup(m_op))    switch (getOpgroup(m_op))
1028    {    {
1029    case G_UNARY:    case G_UNARY:
# Line 1628  LAZYDEBUG(cout << "Resolve sample " << t Line 1033  LAZYDEBUG(cout << "Resolve sample " << t
1033    case G_NP1OUT_P: return resolveNodeNP1OUT_P(tid, sampleNo, roffset);    case G_NP1OUT_P: return resolveNodeNP1OUT_P(tid, sampleNo, roffset);
1034    case G_TENSORPROD: return resolveNodeTProd(tid, sampleNo, roffset);    case G_TENSORPROD: return resolveNodeTProd(tid, sampleNo, roffset);
1035    case G_NP1OUT_2P: return resolveNodeNP1OUT_2P(tid, sampleNo, roffset);    case G_NP1OUT_2P: return resolveNodeNP1OUT_2P(tid, sampleNo, roffset);
1036      case G_REDUCTION: return resolveNodeReduction(tid, sampleNo, roffset);
1037      case G_CONDEVAL: return resolveNodeCondEval(tid, sampleNo, roffset);
1038    default:    default:
1039      throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");      throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");
1040    }    }
1041  }  }
1042    
1043  const DataTypes::ValueType*  const DataTypes::RealVectorType*
1044  DataLazy::resolveNodeUnary(int tid, int sampleNo, size_t& roffset)  DataLazy::resolveNodeUnary(int tid, int sampleNo, size_t& roffset) const
1045  {  {
1046      // we assume that any collapsing has been done before we get here          // we assume that any collapsing has been done before we get here
1047      // since we only have one argument we don't need to think about only          // since we only have one argument we don't need to think about only
1048      // processing single points.          // processing single points.
1049      // we will also know we won't get identity nodes          // we will also know we won't get identity nodes
1050    if (m_readytype!='E')    if (m_readytype!='E')
1051    {    {
1052      throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");      throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");
# Line 1648  DataLazy::resolveNodeUnary(int tid, int Line 1055  DataLazy::resolveNodeUnary(int tid, int
1055    {    {
1056      throw DataException("Programmer error - resolveNodeUnary should not be called on identity nodes.");      throw DataException("Programmer error - resolveNodeUnary should not be called on identity nodes.");
1057    }    }
1058    const DataTypes::ValueType* leftres=m_left->resolveNodeSample(tid, sampleNo, roffset);    const DataTypes::RealVectorType* leftres=m_left->resolveNodeSample(tid, sampleNo, roffset);
1059    const double* left=&((*leftres)[roffset]);    const double* left=&((*leftres)[roffset]);
1060    roffset=m_samplesize*tid;    roffset=m_samplesize*tid;
1061    double* result=&(m_samples[roffset]);    double* result=&(m_samples[roffset]);
1062      escript::ESFunction operation=SINF;
1063    switch (m_op)    switch (m_op)
1064    {    {
1065      case SIN:        case SIN:
1066      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sin);      operation=SINF;
1067      break;      break;
1068      case COS:      case COS:
1069      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::cos);          operation=COSF;
1070      break;      break;
1071      case TAN:      case TAN:
1072      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::tan);          operation=TANF;
1073      break;      break;
1074      case ASIN:      case ASIN:
1075      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::asin);          operation=ASINF;
1076      break;      break;
1077      case ACOS:      case ACOS:
1078      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::acos);          operation=ACOSF;
1079      break;      break;
1080      case ATAN:      case ATAN:
1081      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::atan);          operation=ATANF;
1082      break;      break;
1083      case SINH:      case SINH:
1084      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sinh);          operation=SINHF;
1085      break;      break;
1086      case COSH:      case COSH:
1087      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::cosh);          operation=COSHF;
1088      break;      break;
1089      case TANH:      case TANH:
1090      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::tanh);          operation=TANHF;
1091      break;      break;
1092      case ERF:      case ERF:
1093  #if defined (_WIN32) && !defined(__INTEL_COMPILER)          operation=ERFF;
     throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::erf);  
1094      break;      break;
 #endif  
1095     case ASINH:     case ASINH:
1096  #if defined (_WIN32) && !defined(__INTEL_COMPILER)          operation=ASINHF;
     tensor_unary_operation(m_samplesize, left, result, escript::asinh_substitute);  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::asinh);  
 #endif    
1097      break;      break;
1098     case ACOSH:     case ACOSH:
1099  #if defined (_WIN32) && !defined(__INTEL_COMPILER)          operation=ACOSHF;
     tensor_unary_operation(m_samplesize, left, result, escript::acosh_substitute);  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::acosh);  
 #endif    
1100      break;      break;
1101     case ATANH:     case ATANH:
1102  #if defined (_WIN32) && !defined(__INTEL_COMPILER)          operation=ATANHF;
     tensor_unary_operation(m_samplesize, left, result, escript::atanh_substitute);  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::atanh);  
 #endif    
1103      break;      break;
1104      case LOG10:      case LOG10:
1105      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::log10);          operation=LOG10F;
1106      break;      break;
1107      case LOG:      case LOG:
1108      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::log);          operation=LOGF;
1109      break;      break;
1110      case SIGN:      case SIGN:
1111      tensor_unary_operation(m_samplesize, left, result, escript::fsign);          operation=SIGNF;
1112      break;      break;
1113      case ABS:      case ABS:
1114      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::fabs);          operation=ABSF;
1115      break;      break;
1116      case NEG:      case NEG:
1117      tensor_unary_operation(m_samplesize, left, result, negate<double>());          operation=NEGF;
1118      break;      break;
1119      case POS:      case POS:
1120      // it doesn't mean anything for delayed.          // it doesn't mean anything for delayed.
1121      // it will just trigger a deep copy of the lazy object          // it will just trigger a deep copy of the lazy object
1122      throw DataException("Programmer error - POS not supported for lazy data.");          throw DataException("Programmer error - POS not supported for lazy data.");
1123      break;          break;
1124      case EXP:      case EXP:
1125      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::exp);          operation=EXPF;
1126      break;      break;
1127      case SQRT:      case SQRT:
1128      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sqrt);          operation=SQRTF;
1129      break;      break;
1130      case RECIP:      case RECIP:
1131      tensor_unary_operation(m_samplesize, left, result, bind1st(divides<double>(),1.));          operation=INVF;
1132      break;      break;
1133      case GZ:      case GZ:
1134      tensor_unary_operation(m_samplesize, left, result, bind2nd(greater<double>(),0.0));          operation=GTZEROF;
1135      break;      break;
1136      case LZ:      case LZ:
1137      tensor_unary_operation(m_samplesize, left, result, bind2nd(less<double>(),0.0));          operation=LTZEROF;
1138      break;      break;
1139      case GEZ:      case GEZ:
1140      tensor_unary_operation(m_samplesize, left, result, bind2nd(greater_equal<double>(),0.0));          operation=GEZEROF;
1141      break;      break;
1142      case LEZ:      case LEZ:
1143      tensor_unary_operation(m_samplesize, left, result, bind2nd(less_equal<double>(),0.0));          operation=LEZEROF;
1144      break;      break;
1145  // There are actually G_UNARY_P but I don't see a compelling reason to treat them differently  // There are actually G_UNARY_P but I don't see a compelling reason to treat them differently
1146      case NEZ:      case NEZ:
1147      tensor_unary_operation(m_samplesize, left, result, bind2nd(AbsGT(),m_tol));          operation=NEQZEROF;
1148      break;      break;
1149      case EZ:      case EZ:
1150      tensor_unary_operation(m_samplesize, left, result, bind2nd(AbsLTE(),m_tol));          operation=EQZEROF;
1151      break;      break;
   
1152      default:      default:
1153      throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");          throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");
1154    }    }
1155      tensor_unary_array_operation(m_samplesize,
1156                                 left,
1157                                 result,
1158                                 operation,
1159                                 m_tol);  
1160    return &(m_samples);    return &(m_samples);
1161  }  }
1162    
1163    
1164  const DataTypes::ValueType*  const DataTypes::RealVectorType*
1165  DataLazy::resolveNodeNP1OUT(int tid, int sampleNo, size_t& roffset)  DataLazy::resolveNodeReduction(int tid, int sampleNo, size_t& roffset) const
1166    {
1167            // we assume that any collapsing has been done before we get here
1168            // since we only have one argument we don't need to think about only
1169            // processing single points.
1170            // we will also know we won't get identity nodes
1171      if (m_readytype!='E')
1172      {
1173        throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");
1174      }
1175      if (m_op==IDENTITY)
1176      {
1177        throw DataException("Programmer error - resolveNodeUnary should not be called on identity nodes.");
1178      }
1179      size_t loffset=0;
1180      const DataTypes::RealVectorType* leftres=m_left->resolveNodeSample(tid, sampleNo, loffset);
1181    
1182      roffset=m_samplesize*tid;
1183      unsigned int ndpps=getNumDPPSample();
1184      unsigned int psize=DataTypes::noValues(m_left->getShape());
1185      double* result=&(m_samples[roffset]);
1186      switch (m_op)
1187      {
1188        case MINVAL:
1189            {
1190              for (unsigned int z=0;z<ndpps;++z)
1191              {
1192                FMin op;
1193                *result=escript::reductionOpVector(*leftres, m_left->getShape(), loffset, op, numeric_limits<double>::max());
1194                loffset+=psize;
1195                result++;
1196              }
1197            }
1198            break;
1199        case MAXVAL:
1200            {
1201              for (unsigned int z=0;z<ndpps;++z)
1202              {
1203              FMax op;
1204              *result=escript::reductionOpVector(*leftres, m_left->getShape(), loffset, op, numeric_limits<double>::max()*-1);
1205              loffset+=psize;
1206              result++;
1207              }
1208            }
1209            break;
1210        default:
1211            throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");
1212      }
1213      return &(m_samples);
1214    }
1215    
1216    const DataTypes::RealVectorType*
1217    DataLazy::resolveNodeNP1OUT(int tid, int sampleNo, size_t& roffset) const
1218  {  {
1219      // we assume that any collapsing has been done before we get here          // we assume that any collapsing has been done before we get here
1220      // since we only have one argument we don't need to think about only          // since we only have one argument we don't need to think about only
1221      // processing single points.          // processing single points.
1222    if (m_readytype!='E')    if (m_readytype!='E')
1223    {    {
1224      throw DataException("Programmer error - resolveNodeNP1OUT should only be called on expanded Data.");      throw DataException("Programmer error - resolveNodeNP1OUT should only be called on expanded Data.");
# Line 1780  DataLazy::resolveNodeNP1OUT(int tid, int Line 1228  DataLazy::resolveNodeNP1OUT(int tid, int
1228      throw DataException("Programmer error - resolveNodeNP1OUT should not be called on identity nodes.");      throw DataException("Programmer error - resolveNodeNP1OUT should not be called on identity nodes.");
1229    }    }
1230    size_t subroffset;    size_t subroffset;
1231    const ValueType* leftres=m_left->resolveNodeSample(tid, sampleNo, subroffset);    const RealVectorType* leftres=m_left->resolveNodeSample(tid, sampleNo, subroffset);
1232    roffset=m_samplesize*tid;    roffset=m_samplesize*tid;
1233    size_t loop=0;    size_t loop=0;
1234    size_t numsteps=(m_readytype=='E')?getNumDPPSample():1;    size_t numsteps=(m_readytype=='E')?getNumDPPSample():1;
# Line 1789  DataLazy::resolveNodeNP1OUT(int tid, int Line 1237  DataLazy::resolveNodeNP1OUT(int tid, int
1237    switch (m_op)    switch (m_op)
1238    {    {
1239      case SYM:      case SYM:
1240      for (loop=0;loop<numsteps;++loop)          for (loop=0;loop<numsteps;++loop)
1241      {          {
1242          DataMaths::symmetric(*leftres,m_left->getShape(),subroffset, m_samples, getShape(), offset);              escript::symmetric(*leftres,m_left->getShape(),subroffset, m_samples, getShape(), offset);
1243          subroffset+=step;              subroffset+=step;
1244          offset+=step;              offset+=step;
1245      }          }
1246      break;          break;
1247      case NSYM:      case NSYM:
1248      for (loop=0;loop<numsteps;++loop)          for (loop=0;loop<numsteps;++loop)
1249      {          {
1250          DataMaths::nonsymmetric(*leftres,m_left->getShape(),subroffset, m_samples, getShape(), offset);              escript::antisymmetric(*leftres,m_left->getShape(),subroffset, m_samples, getShape(), offset);
1251          subroffset+=step;              subroffset+=step;
1252          offset+=step;              offset+=step;
1253      }          }
1254      break;          break;
1255      default:      default:
1256      throw DataException("Programmer error - resolveNP1OUT can not resolve operator "+opToString(m_op)+".");          throw DataException("Programmer error - resolveNP1OUT can not resolve operator "+opToString(m_op)+".");
1257    }    }
1258    return &m_samples;    return &m_samples;
1259  }  }
1260    
1261  const DataTypes::ValueType*  const DataTypes::RealVectorType*
1262  DataLazy::resolveNodeNP1OUT_P(int tid, int sampleNo, size_t& roffset)  DataLazy::resolveNodeNP1OUT_P(int tid, int sampleNo, size_t& roffset) const
1263  {  {
1264      // we assume that any collapsing has been done before we get here          // we assume that any collapsing has been done before we get here
1265      // since we only have one argument we don't need to think about only          // since we only have one argument we don't need to think about only
1266      // processing single points.          // processing single points.
1267    if (m_readytype!='E')    if (m_readytype!='E')
1268    {    {
1269      throw DataException("Programmer error - resolveNodeNP1OUT_P should only be called on expanded Data.");      throw DataException("Programmer error - resolveNodeNP1OUT_P should only be called on expanded Data.");
# Line 1826  DataLazy::resolveNodeNP1OUT_P(int tid, i Line 1274  DataLazy::resolveNodeNP1OUT_P(int tid, i
1274    }    }
1275    size_t subroffset;    size_t subroffset;
1276    size_t offset;    size_t offset;
1277    const ValueType* leftres=m_left->resolveNodeSample(tid, sampleNo, subroffset);    const RealVectorType* leftres=m_left->resolveNodeSample(tid, sampleNo, subroffset);
1278    roffset=m_samplesize*tid;    roffset=m_samplesize*tid;
1279    offset=roffset;    offset=roffset;
1280    size_t loop=0;    size_t loop=0;
# Line 1836  DataLazy::resolveNodeNP1OUT_P(int tid, i Line 1284  DataLazy::resolveNodeNP1OUT_P(int tid, i
1284    switch (m_op)    switch (m_op)
1285    {    {
1286      case TRACE:      case TRACE:
1287      for (loop=0;loop<numsteps;++loop)          for (loop=0;loop<numsteps;++loop)
1288      {          {
1289              DataMaths::trace(*leftres,m_left->getShape(),subroffset, m_samples ,getShape(),offset,m_axis_offset);              escript::trace(*leftres,m_left->getShape(),subroffset, m_samples ,getShape(),offset,m_axis_offset);
1290          subroffset+=instep;              subroffset+=instep;
1291          offset+=outstep;              offset+=outstep;
1292      }          }
1293      break;          break;
1294      case TRANS:      case TRANS:
1295      for (loop=0;loop<numsteps;++loop)          for (loop=0;loop<numsteps;++loop)
1296      {          {
1297              DataMaths::transpose(*leftres,m_left->getShape(),subroffset, m_samples, getShape(),offset,m_axis_offset);              escript::transpose(*leftres,m_left->getShape(),subroffset, m_samples, getShape(),offset,m_axis_offset);
1298          subroffset+=instep;              subroffset+=instep;
1299          offset+=outstep;              offset+=outstep;
1300      }          }
1301      break;          break;
1302      default:      default:
1303      throw DataException("Programmer error - resolveNP1OUTP can not resolve operator "+opToString(m_op)+".");          throw DataException("Programmer error - resolveNP1OUTP can not resolve operator "+opToString(m_op)+".");
1304    }    }
1305    return &m_samples;    return &m_samples;
1306  }  }
1307    
1308    
1309  const DataTypes::ValueType*  const DataTypes::RealVectorType*
1310  DataLazy::resolveNodeNP1OUT_2P(int tid, int sampleNo, size_t& roffset)  DataLazy::resolveNodeNP1OUT_2P(int tid, int sampleNo, size_t& roffset) const
1311  {  {
1312    if (m_readytype!='E')    if (m_readytype!='E')
1313    {    {
# Line 1871  DataLazy::resolveNodeNP1OUT_2P(int tid, Line 1319  DataLazy::resolveNodeNP1OUT_2P(int tid,
1319    }    }
1320    size_t subroffset;    size_t subroffset;
1321    size_t offset;    size_t offset;
1322    const ValueType* leftres=m_left->resolveNodeSample(tid, sampleNo, subroffset);    const RealVectorType* leftres=m_left->resolveNodeSample(tid, sampleNo, subroffset);
1323    roffset=m_samplesize*tid;    roffset=m_samplesize*tid;
1324    offset=roffset;    offset=roffset;
1325    size_t loop=0;    size_t loop=0;
# Line 1881  DataLazy::resolveNodeNP1OUT_2P(int tid, Line 1329  DataLazy::resolveNodeNP1OUT_2P(int tid,
1329    switch (m_op)    switch (m_op)
1330    {    {
1331      case SWAP:      case SWAP:
1332      for (loop=0;loop<numsteps;++loop)          for (loop=0;loop<numsteps;++loop)
1333      {          {
1334              DataMaths::swapaxes(*leftres,m_left->getShape(),subroffset, m_samples, getShape(),offset, m_axis_offset, m_transpose);              escript::swapaxes(*leftres,m_left->getShape(),subroffset, m_samples, getShape(),offset, m_axis_offset, m_transpose);
1335          subroffset+=instep;              subroffset+=instep;
1336          offset+=outstep;              offset+=outstep;
1337      }          }
1338      break;          break;
1339      default:      default:
1340      throw DataException("Programmer error - resolveNodeNP1OUT2P can not resolve operator "+opToString(m_op)+".");          throw DataException("Programmer error - resolveNodeNP1OUT2P can not resolve operator "+opToString(m_op)+".");
1341    }    }
1342    return &m_samples;    return &m_samples;
1343  }  }
1344    
1345    const DataTypes::RealVectorType*
1346    DataLazy::resolveNodeCondEval(int tid, int sampleNo, size_t& roffset) const
1347    {
1348      if (m_readytype!='E')
1349      {
1350        throw DataException("Programmer error - resolveNodeCondEval should only be called on expanded Data.");
1351      }
1352      if (m_op!=CONDEVAL)
1353      {
1354        throw DataException("Programmer error - resolveNodeCondEval should only be called on CONDEVAL nodes.");
1355      }
1356      size_t subroffset;
1357    
1358      const RealVectorType* maskres=m_mask->resolveNodeSample(tid, sampleNo, subroffset);
1359      const RealVectorType* srcres=0;
1360      if ((*maskres)[subroffset]>0)
1361      {
1362            srcres=m_left->resolveNodeSample(tid, sampleNo, subroffset);
1363      }
1364      else
1365      {
1366            srcres=m_right->resolveNodeSample(tid, sampleNo, subroffset);
1367      }
1368    
1369      // Now we need to copy the result
1370    
1371      roffset=m_samplesize*tid;
1372      for (int i=0;i<m_samplesize;++i)
1373      {
1374            m_samples[roffset+i]=(*srcres)[subroffset+i];  
1375      }
1376    
1377      return &m_samples;
1378    }
1379    
1380  // This method assumes that any subexpressions which evaluate to Constant or Tagged Data  // This method assumes that any subexpressions which evaluate to Constant or Tagged Data
1381  // have already been collapsed to IDENTITY. So we must have at least one expanded child.  // have already been collapsed to IDENTITY. So we must have at least one expanded child.
# Line 1905  DataLazy::resolveNodeNP1OUT_2P(int tid, Line 1386  DataLazy::resolveNodeNP1OUT_2P(int tid,
1386  // There is an additional complication when scalar operations are considered.  // There is an additional complication when scalar operations are considered.
1387  // For example, 2+Vector.  // For example, 2+Vector.
1388  // In this case each double within the point is treated individually  // In this case each double within the point is treated individually
1389  const DataTypes::ValueType*  const DataTypes::RealVectorType*
1390  DataLazy::resolveNodeBinary(int tid, int sampleNo, size_t& roffset)  DataLazy::resolveNodeBinary(int tid, int sampleNo, size_t& roffset) const
1391  {  {
1392  LAZYDEBUG(cout << "Resolve binary: " << toString() << endl;)  LAZYDEBUG(cout << "Resolve binary: " << toString() << endl;)
1393    
1394    size_t lroffset=0, rroffset=0;    // offsets in the left and right result vectors    size_t lroffset=0, rroffset=0;        // offsets in the left and right result vectors
1395      // first work out which of the children are expanded          // first work out which of the children are expanded
1396    bool leftExp=(m_left->m_readytype=='E');    bool leftExp=(m_left->m_readytype=='E');
1397    bool rightExp=(m_right->m_readytype=='E');    bool rightExp=(m_right->m_readytype=='E');
1398    if (!leftExp && !rightExp)    if (!leftExp && !rightExp)
1399    {    {
1400      throw DataException("Programmer Error - please use collapse if neither argument has type 'E'.");          throw DataException("Programmer Error - please use collapse if neither argument has type 'E'.");
1401    }    }
1402    bool leftScalar=(m_left->getRank()==0);    bool leftScalar=(m_left->getRank()==0);
1403    bool rightScalar=(m_right->getRank()==0);    bool rightScalar=(m_right->getRank()==0);
1404    if ((m_left->getRank()!=m_right->getRank()) && (!leftScalar && !rightScalar))    if ((m_left->getRank()!=m_right->getRank()) && (!leftScalar && !rightScalar))
1405    {    {
1406      throw DataException("resolveBinary - ranks of arguments must match unless one of them is scalar.");          throw DataException("resolveBinary - ranks of arguments must match unless one of them is scalar.");
1407    }    }
1408    size_t leftsize=m_left->getNoValues();    size_t leftsize=m_left->getNoValues();
1409    size_t rightsize=m_right->getNoValues();    size_t rightsize=m_right->getNoValues();
1410    size_t chunksize=1;           // how many doubles will be processed in one go    size_t chunksize=1;                   // how many doubles will be processed in one go
1411    int leftstep=0;       // how far should the left offset advance after each step    int leftstep=0;               // how far should the left offset advance after each step
1412    int rightstep=0;    int rightstep=0;
1413    int numsteps=0;       // total number of steps for the inner loop    int numsteps=0;               // total number of steps for the inner loop
1414    int oleftstep=0;  // the o variables refer to the outer loop    int oleftstep=0;      // the o variables refer to the outer loop
1415    int orightstep=0; // The outer loop is only required in cases where there is an extended scalar    int orightstep=0;     // The outer loop is only required in cases where there is an extended scalar
1416    int onumsteps=1;    int onumsteps=1;
1417        
1418    bool LES=(leftExp && leftScalar); // Left is an expanded scalar    bool LES=(leftExp && leftScalar);     // Left is an expanded scalar
1419    bool RES=(rightExp && rightScalar);    bool RES=(rightExp && rightScalar);
1420    bool LS=(!leftExp && leftScalar); // left is a single scalar    bool LS=(!leftExp && leftScalar);     // left is a single scalar
1421    bool RS=(!rightExp && rightScalar);    bool RS=(!rightExp && rightScalar);
1422    bool LN=(!leftExp && !leftScalar);    // left is a single non-scalar    bool LN=(!leftExp && !leftScalar);    // left is a single non-scalar
1423    bool RN=(!rightExp && !rightScalar);    bool RN=(!rightExp && !rightScalar);
1424    bool LEN=(leftExp && !leftScalar);    // left is an expanded non-scalar    bool LEN=(leftExp && !leftScalar);    // left is an expanded non-scalar
1425    bool REN=(rightExp && !rightScalar);    bool REN=(rightExp && !rightScalar);
1426    
1427    if ((LES && RES) || (LEN && REN)) // both are Expanded scalars or both are expanded non-scalars    if ((LES && RES) || (LEN && REN))     // both are Expanded scalars or both are expanded non-scalars
1428    {    {
1429      chunksize=m_left->getNumDPPSample()*leftsize;          chunksize=m_left->getNumDPPSample()*leftsize;
1430      leftstep=0;          leftstep=0;
1431      rightstep=0;          rightstep=0;
1432      numsteps=1;          numsteps=1;
1433    }    }
1434    else if (LES || RES)    else if (LES || RES)
1435    {    {
1436      chunksize=1;          chunksize=1;
1437      if (LES)        // left is an expanded scalar          if (LES)                // left is an expanded scalar
1438      {          {
1439          if (RS)                  if (RS)
1440          {                  {
1441             leftstep=1;                     leftstep=1;
1442             rightstep=0;                     rightstep=0;
1443             numsteps=m_left->getNumDPPSample();                     numsteps=m_left->getNumDPPSample();
1444          }                  }
1445          else        // RN or REN                  else            // RN or REN
1446          {                  {
1447             leftstep=0;                     leftstep=0;
1448             oleftstep=1;                     oleftstep=1;
1449             rightstep=1;                     rightstep=1;
1450             orightstep=(RN ? -(int)rightsize : 0);                     orightstep=(RN ? -(int)rightsize : 0);
1451             numsteps=rightsize;                     numsteps=rightsize;
1452             onumsteps=m_left->getNumDPPSample();                     onumsteps=m_left->getNumDPPSample();
1453          }                  }
1454      }          }
1455      else        // right is an expanded scalar          else            // right is an expanded scalar
1456      {          {
1457          if (LS)                  if (LS)
1458          {                  {
1459             rightstep=1;                     rightstep=1;
1460             leftstep=0;                     leftstep=0;
1461             numsteps=m_right->getNumDPPSample();                     numsteps=m_right->getNumDPPSample();
1462          }                  }
1463          else                  else
1464          {                  {
1465             rightstep=0;                     rightstep=0;
1466             orightstep=1;                     orightstep=1;
1467             leftstep=1;                     leftstep=1;
1468             oleftstep=(LN ? -(int)leftsize : 0);                     oleftstep=(LN ? -(int)leftsize : 0);
1469             numsteps=leftsize;                     numsteps=leftsize;
1470             onumsteps=m_right->getNumDPPSample();                     onumsteps=m_right->getNumDPPSample();
1471          }                  }
1472      }          }
1473    }    }
1474    else  // this leaves (LEN, RS), (LEN, RN) and their transposes    else  // this leaves (LEN, RS), (LEN, RN) and their transposes
1475    {    {
1476      if (LEN)    // and Right will be a single value          if (LEN)        // and Right will be a single value
1477      {          {
1478          chunksize=rightsize;                  chunksize=rightsize;
1479          leftstep=rightsize;                  leftstep=rightsize;
1480          rightstep=0;                  rightstep=0;
1481          numsteps=m_left->getNumDPPSample();                  numsteps=m_left->getNumDPPSample();
1482          if (RS)                  if (RS)
1483          {                  {
1484             numsteps*=leftsize;                     numsteps*=leftsize;
1485          }                  }
1486      }          }
1487      else    // REN          else    // REN
1488      {          {
1489          chunksize=leftsize;                  chunksize=leftsize;
1490          rightstep=leftsize;                  rightstep=leftsize;
1491          leftstep=0;                  leftstep=0;
1492          numsteps=m_right->getNumDPPSample();                  numsteps=m_right->getNumDPPSample();
1493          if (LS)                  if (LS)
1494          {                  {
1495             numsteps*=rightsize;                     numsteps*=rightsize;
1496          }                  }
1497      }          }
1498    }    }
1499    
1500    int resultStep=max(leftstep,rightstep);   // only one (at most) should be !=0    int resultStep=max(leftstep,rightstep);       // only one (at most) should be !=0
1501      // Get the values of sub-expressions          // Get the values of sub-expressions
1502    const ValueType* left=m_left->resolveNodeSample(tid,sampleNo,lroffset);      const RealVectorType* left=m_left->resolveNodeSample(tid,sampleNo,lroffset);      
1503    const ValueType* right=m_right->resolveNodeSample(tid,sampleNo,rroffset);    const RealVectorType* right=m_right->resolveNodeSample(tid,sampleNo,rroffset);
1504  LAZYDEBUG(cout << "Post sub calls in " << toString() << endl;)  LAZYDEBUG(cout << "Post sub calls in " << toString() << endl;)
1505  LAZYDEBUG(cout << "shapes=" << DataTypes::shapeToString(m_left->getShape()) << "," << DataTypes::shapeToString(m_right->getShape()) << endl;)  LAZYDEBUG(cout << "shapes=" << DataTypes::shapeToString(m_left->getShape()) << "," << DataTypes::shapeToString(m_right->getShape()) << endl;)
1506  LAZYDEBUG(cout << "chunksize=" << chunksize << endl << "leftstep=" << leftstep << " rightstep=" << rightstep;)  LAZYDEBUG(cout << "chunksize=" << chunksize << endl << "leftstep=" << leftstep << " rightstep=" << rightstep;)
# Line 2033  LAZYDEBUG(cout << "Right res["<< rroffse Line 1514  LAZYDEBUG(cout << "Right res["<< rroffse
1514    
1515    
1516    roffset=m_samplesize*tid;    roffset=m_samplesize*tid;
1517    double* resultp=&(m_samples[roffset]);        // results are stored at the vector offset we recieved    double* resultp=&(m_samples[roffset]);                // results are stored at the vector offset we received
1518    switch(m_op)    switch(m_op)
1519    {    {
1520      case ADD:      case ADD:
1521          PROC_OP(NO_ARG,plus<double>());          //PROC_OP(NO_ARG,plus<double>());
1522      break;        escript::binaryOpVectorLazyHelper<real_t, real_t, real_t>(resultp,
1523                 &(*left)[0],
1524                 &(*right)[0],
1525                 chunksize,
1526                 onumsteps,
1527                 numsteps,
1528                 resultStep,
1529                 leftstep,
1530                 rightstep,
1531                 oleftstep,
1532                 orightstep,
1533                 lroffset,
1534                 rroffset,
1535                 escript::ESFunction::PLUSF);  
1536            break;
1537      case SUB:      case SUB:
1538      PROC_OP(NO_ARG,minus<double>());        escript::binaryOpVectorLazyHelper<real_t, real_t, real_t>(resultp,
1539      break;               &(*left)[0],
1540                 &(*right)[0],
1541                 chunksize,
1542                 onumsteps,
1543                 numsteps,
1544                 resultStep,
1545                 leftstep,
1546                 rightstep,
1547                 oleftstep,
1548                 orightstep,
1549                 lroffset,
1550                 rroffset,
1551                 escript::ESFunction::MINUSF);        
1552            //PROC_OP(NO_ARG,minus<double>());
1553            break;
1554      case MUL:      case MUL:
1555      PROC_OP(NO_ARG,multiplies<double>());          //PROC_OP(NO_ARG,multiplies<double>());
1556      break;        escript::binaryOpVectorLazyHelper<real_t, real_t, real_t>(resultp,
1557                 &(*left)[0],
1558                 &(*right)[0],
1559                 chunksize,
1560                 onumsteps,
1561                 numsteps,
1562                 resultStep,
1563                 leftstep,
1564                 rightstep,
1565                 oleftstep,
1566                 orightstep,
1567                 lroffset,
1568                 rroffset,
1569                 escript::ESFunction::MULTIPLIESF);      
1570            break;
1571      case DIV:      case DIV:
1572      PROC_OP(NO_ARG,divides<double>());          //PROC_OP(NO_ARG,divides<double>());
1573      break;        escript::binaryOpVectorLazyHelper<real_t, real_t, real_t>(resultp,
1574                 &(*left)[0],
1575                 &(*right)[0],
1576                 chunksize,
1577                 onumsteps,
1578                 numsteps,
1579                 resultStep,
1580                 leftstep,
1581                 rightstep,
1582                 oleftstep,
1583                 orightstep,
1584                 lroffset,
1585                 rroffset,
1586                 escript::ESFunction::DIVIDESF);          
1587            break;
1588      case POW:      case POW:
1589         PROC_OP(double (double,double),::pow);         //PROC_OP(double (double,double),::pow);
1590      break;        escript::binaryOpVectorLazyHelper<real_t, real_t, real_t>(resultp,
1591                 &(*left)[0],
1592                 &(*right)[0],
1593                 chunksize,
1594                 onumsteps,
1595                 numsteps,
1596                 resultStep,
1597                 leftstep,
1598                 rightstep,
1599                 oleftstep,
1600                 orightstep,
1601                 lroffset,
1602                 rroffset,
1603                 escript::ESFunction::POWF);          
1604            break;
1605      default:      default:
1606      throw DataException("Programmer error - resolveBinary can not resolve operator "+opToString(m_op)+".");          throw DataException("Programmer error - resolveBinary can not resolve operator "+opToString(m_op)+".");
1607    }    }
1608  LAZYDEBUG(cout << "Result res[" << roffset<< "]" << m_samples[roffset] << endl;)  LAZYDEBUG(cout << "Result res[" << roffset<< "]" << m_samples[roffset] << endl;)
1609    return &m_samples;    return &m_samples;
# Line 2062  LAZYDEBUG(cout << "Result res[" << roffs Line 1613  LAZYDEBUG(cout << "Result res[" << roffs
1613  // This method assumes that any subexpressions which evaluate to Constant or Tagged Data  // This method assumes that any subexpressions which evaluate to Constant or Tagged Data
1614  // have already been collapsed to IDENTITY. So we must have at least one expanded child.  // have already been collapsed to IDENTITY. So we must have at least one expanded child.
1615  // unlike the other resolve helpers, we must treat these datapoints separately.  // unlike the other resolve helpers, we must treat these datapoints separately.
1616  const DataTypes::ValueType*  const DataTypes::RealVectorType*
1617  DataLazy::resolveNodeTProd(int tid, int sampleNo, size_t& roffset)  DataLazy::resolveNodeTProd(int tid, int sampleNo, size_t& roffset) const
1618  {  {
1619  LAZYDEBUG(cout << "Resolve TensorProduct: " << toString() << endl;)  LAZYDEBUG(cout << "Resolve TensorProduct: " << toString() << endl;)
1620    
1621    size_t lroffset=0, rroffset=0;    // offsets in the left and right result vectors    size_t lroffset=0, rroffset=0;        // offsets in the left and right result vectors
1622      // first work out which of the children are expanded          // first work out which of the children are expanded
1623    bool leftExp=(m_left->m_readytype=='E');    bool leftExp=(m_left->m_readytype=='E');
1624    bool rightExp=(m_right->m_readytype=='E');    bool rightExp=(m_right->m_readytype=='E');
1625    int steps=getNumDPPSample();    int steps=getNumDPPSample();
1626    int leftStep=(leftExp? m_left->getNoValues() : 0);        // do not have scalars as input to this method    int leftStep=(leftExp? m_left->getNoValues() : 0);            // do not have scalars as input to this method
1627    int rightStep=(rightExp?m_right->getNoValues() : 0);    int rightStep=(rightExp?m_right->getNoValues() : 0);
1628    
1629    int resultStep=getNoValues();    int resultStep=getNoValues();
1630    roffset=m_samplesize*tid;    roffset=m_samplesize*tid;
1631    size_t offset=roffset;    size_t offset=roffset;
1632    
1633    const ValueType* left=m_left->resolveNodeSample(tid, sampleNo, lroffset);    const RealVectorType* left=m_left->resolveNodeSample(tid, sampleNo, lroffset);
1634    
1635    const ValueType* right=m_right->resolveNodeSample(tid, sampleNo, rroffset);    const RealVectorType* right=m_right->resolveNodeSample(tid, sampleNo, rroffset);
1636    
1637  LAZYDEBUG(cerr << "[Left shape]=" << DataTypes::shapeToString(m_left->getShape()) << "\n[Right shape]=" << DataTypes::shapeToString(m_right->getShape()) << " result=" <<DataTypes::shapeToString(getShape()) <<  endl;  LAZYDEBUG(cerr << "[Left shape]=" << DataTypes::shapeToString(m_left->getShape()) << "\n[Right shape]=" << DataTypes::shapeToString(m_right->getShape()) << " result=" <<DataTypes::shapeToString(getShape()) <<  endl;
1638  cout << getNoValues() << endl;)  cout << getNoValues() << endl;)
# Line 2095  LAZYDEBUG(cout << "m_samplesize=" << m_s Line 1646  LAZYDEBUG(cout << "m_samplesize=" << m_s
1646  LAZYDEBUG(cout << "outputshape=" << DataTypes::shapeToString(getShape()) << endl;)  LAZYDEBUG(cout << "outputshape=" << DataTypes::shapeToString(getShape()) << endl;)
1647  LAZYDEBUG(cout << "DPPS=" << m_right->getNumDPPSample() <<"."<<endl;)  LAZYDEBUG(cout << "DPPS=" << m_right->getNumDPPSample() <<"."<<endl;)
1648    
1649    double* resultp=&(m_samples[offset]);     // results are stored at the vector offset we recieved    double* resultp=&(m_samples[offset]);         // results are stored at the vector offset we received
1650    switch(m_op)    switch(m_op)
1651    {    {
1652      case PROD:      case PROD:
1653      for (int i=0;i<steps;++i,resultp+=resultStep)          for (int i=0;i<steps;++i,resultp+=resultStep)
1654      {          {
1655            const double *ptr_0 = &((*left)[lroffset]);            const double *ptr_0 = &((*left)[lroffset]);
1656            const double *ptr_1 = &((*right)[rroffset]);            const double *ptr_1 = &((*right)[rroffset]);
1657    
1658  LAZYDEBUG(cout << DataTypes::pointToString(*left, m_left->getShape(),lroffset,"LEFT") << endl;)  LAZYDEBUG(cout << DataTypes::pointToString(*left, m_left->getShape(),lroffset,"LEFT") << endl;)
1659  LAZYDEBUG(cout << DataTypes::pointToString(*right,m_right->getShape(),rroffset, "RIGHT") << endl;)  LAZYDEBUG(cout << DataTypes::pointToString(*right,m_right->getShape(),rroffset, "RIGHT") << endl;)
1660    
1661            matrix_matrix_product(m_SL, m_SM, m_SR, ptr_0, ptr_1, resultp, m_transpose);            matrix_matrix_product(m_SL, m_SM, m_SR, ptr_0, ptr_1, resultp, m_transpose);
1662    
1663        lroffset+=leftStep;            lroffset+=leftStep;
1664        rroffset+=rightStep;            rroffset+=rightStep;
1665      }          }
1666      break;          break;
1667      default:      default:
1668      throw DataException("Programmer error - resolveTProduct can not resolve operator "+opToString(m_op)+".");          throw DataException("Programmer error - resolveTProduct can not resolve operator "+opToString(m_op)+".");
1669    }    }
1670    roffset=offset;    roffset=offset;
1671    return &m_samples;    return &m_samples;
1672  }  }
 #endif //LAZY_NODE_STORAGE  
1673    
 /*  
   \brief Compute the value of the expression for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
1674    
1675    The return value will be an existing vector so do not deallocate it.  const DataTypes::RealVectorType*
1676  */  DataLazy::resolveSample(int sampleNo, size_t& roffset) const
 // the vector and the offset are a place where the method could write its data if it wishes  
 // it is not obligated to do so. For example, if it has its own storage already, it can use that.  
 // Hence the return value to indicate where the data is actually stored.  
 // Regardless, the storage should be assumed to be used, even if it isn't.  
   
 // the roffset is the offset within the returned vector where the data begins  
 const DataTypes::ValueType*  
 DataLazy::resolveSample(ValueType& v, size_t offset, int sampleNo, size_t& roffset)  
 {  
 LAZYDEBUG(cout << "Resolve sample " << toString() << endl;)  
     // collapse so we have a 'E' node or an IDENTITY for some other type  
   if (m_readytype!='E' && m_op!=IDENTITY)  
   {  
     collapse();  
   }  
   if (m_op==IDENTITY)    
   {  
     const ValueType& vec=m_id->getVectorRO();  
     if (m_readytype=='C')  
     {  
     roffset=0;  
 LAZYDEBUG(cout << "Finish  sample " << toString() << endl;)  
     return &(vec);  
     }  
     roffset=m_id->getPointOffset(sampleNo, 0);  
 LAZYDEBUG(cout << "Finish  sample " << toString() << endl;)  
     return &(vec);  
   }  
   if (m_readytype!='E')  
   {  
     throw DataException("Programmer Error - Collapse did not produce an expanded node.");  
   }  
   switch (getOpgroup(m_op))  
   {  
   case G_UNARY:  
   case G_UNARY_P: return resolveUnary(v, offset,sampleNo,roffset);  
   case G_BINARY: return resolveBinary(v, offset,sampleNo,roffset);  
   case G_NP1OUT: return resolveNP1OUT(v, offset, sampleNo,roffset);  
   case G_NP1OUT_P: return resolveNP1OUT_P(v, offset, sampleNo,roffset);  
   case G_TENSORPROD: return resolveTProd(v,offset, sampleNo,roffset);  
   case G_NP1OUT_2P: return resolveNP1OUT_2P(v, offset, sampleNo, roffset);  
   default:  
     throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");  
   }  
   
 }  
   
 const DataTypes::ValueType*  
 DataLazy::resolveSample(BufferGroup& bg, int sampleNo, size_t& roffset)  
1677  {  {
1678  #ifdef _OPENMP  #ifdef _OPENMP
1679      int tid=omp_get_thread_num();          int tid=omp_get_thread_num();
1680  #else  #else
1681      int tid=0;          int tid=0;
1682  #endif  #endif
1683      return resolveSample(bg.getBuffer(tid),bg.getOffset(tid),sampleNo,roffset);  
1684    #ifdef LAZY_STACK_PROF
1685            stackstart[tid]=&tid;
1686            stackend[tid]=&tid;
1687            const DataTypes::RealVectorType* r=resolveNodeSample(tid, sampleNo, roffset);
1688            size_t d=(size_t)stackstart[tid]-(size_t)stackend[tid];
1689            #pragma omp critical
1690            if (d>maxstackuse)
1691            {
1692    cout << "Max resolve Stack use " << d << endl;
1693                    maxstackuse=d;
1694            }
1695            return r;
1696    #else
1697            return resolveNodeSample(tid, sampleNo, roffset);
1698    #endif
1699  }  }
1700    
1701    
# Line 2195  void Line 1704  void
1704  DataLazy::resolveToIdentity()  DataLazy::resolveToIdentity()
1705  {  {
1706     if (m_op==IDENTITY)     if (m_op==IDENTITY)
1707      return;          return;
 #ifndef LAZY_NODE_STORAGE  
    DataReady_ptr p=resolveVectorWorker();  
 #else  
1708     DataReady_ptr p=resolveNodeWorker();     DataReady_ptr p=resolveNodeWorker();
 #endif  
1709     makeIdentity(p);     makeIdentity(p);
1710  }  }
1711    
# Line 2216  void DataLazy::makeIdentity(const DataRe Line 1721  void DataLazy::makeIdentity(const DataRe
1721     else if(p->isExpanded()) {m_readytype='E';}     else if(p->isExpanded()) {m_readytype='E';}
1722     else if (p->isTagged()) {m_readytype='T';}     else if (p->isTagged()) {m_readytype='T';}
1723     else {throw DataException("Unknown DataReady instance in convertToIdentity constructor.");}     else {throw DataException("Unknown DataReady instance in convertToIdentity constructor.");}
    m_buffsRequired=1;  
1724     m_samplesize=p->getNumDPPSample()*p->getNoValues();     m_samplesize=p->getNumDPPSample()*p->getNoValues();
    m_maxsamplesize=m_samplesize;  
1725     m_left.reset();     m_left.reset();
1726     m_right.reset();     m_right.reset();
1727  }  }
# Line 2231  DataLazy::resolve() Line 1734  DataLazy::resolve()
1734      return m_id;      return m_id;
1735  }  }
1736    
 #ifdef LAZY_NODE_STORAGE  
1737    
1738  // This version of resolve uses storage in each node to hold results  /* This is really a static method but I think that caused problems in windows */
1739  DataReady_ptr  void
1740  DataLazy::resolveNodeWorker()  DataLazy::resolveGroupWorker(std::vector<DataLazy*>& dats)
1741  {  {
1742    if (m_readytype!='E')     // if the whole sub-expression is Constant or Tagged, then evaluate it normally    if (dats.empty())
   {  
     collapse();  
   }  
   if (m_op==IDENTITY)       // So a lazy expression of Constant or Tagged data will be returned here.  
1743    {    {
1744      return m_id;          return;
1745    }    }
1746      // from this point on we must have m_op!=IDENTITY and m_readytype=='E'    vector<DataLazy*> work;
1747    DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(),  ValueType(getNoValues()));    FunctionSpace fs=dats[0]->getFunctionSpace();
1748    ValueType& resvec=result->getVectorRW();    bool match=true;
1749    DataReady_ptr resptr=DataReady_ptr(result);    for (int i=dats.size()-1;i>=0;--i)
1750      {
1751    int sample;          if (dats[i]->m_readytype!='E')
1752    int totalsamples=getNumSamples();          {
1753    const ValueType* res=0;   // Storage for answer                  dats[i]->collapse();
1754  LAZYDEBUG(cout << "Total number of samples=" <<totalsamples << endl;)          }
1755    #pragma omp parallel for private(sample,res) schedule(static)          if (dats[i]->m_op!=IDENTITY)
1756    for (sample=0;sample<totalsamples;++sample)          {
1757    {                  work.push_back(dats[i]);
1758      size_t roffset=0;                  if (fs!=dats[i]->getFunctionSpace())
1759                    {
1760                            match=false;
1761                    }
1762            }
1763      }
1764      if (work.empty())
1765      {
1766            return;         // no work to do
1767      }
1768      if (match)    // all functionspaces match.  Yes I realise this is overly strict
1769      {             // it is possible that dats[0] is one of the objects which we discarded and
1770                    // all the other functionspaces match.
1771            vector<DataExpanded*> dep;
1772            vector<RealVectorType*> vecs;
1773            for (int i=0;i<work.size();++i)
1774            {
1775                    dep.push_back(new DataExpanded(fs,work[i]->getShape(), RealVectorType(work[i]->getNoValues())));
1776                    vecs.push_back(&(dep[i]->getVectorRW()));
1777            }
1778            int totalsamples=work[0]->getNumSamples();
1779            const RealVectorType* res=0; // Storage for answer
1780            int sample;
1781            #pragma omp parallel private(sample, res)
1782            {
1783                size_t roffset=0;
1784                #pragma omp for schedule(static)
1785                for (sample=0;sample<totalsamples;++sample)
1786                {
1787                    roffset=0;
1788                    int j;
1789                    for (j=work.size()-1;j>=0;--j)
1790                    {
1791  #ifdef _OPENMP  #ifdef _OPENMP
1792      res=resolveNodeSample(omp_get_thread_num(),sample,roffset);                      res=work[j]->resolveNodeSample(omp_get_thread_num(),sample,roffset);
1793  #else  #else
1794      res=resolveNodeSample(0,sample,roffset);                      res=work[j]->resolveNodeSample(0,sample,roffset);
1795  #endif  #endif
1796  LAZYDEBUG(cout << "Sample #" << sample << endl;)                      RealVectorType::size_type outoffset=dep[j]->getPointOffset(sample,0);
1797  LAZYDEBUG(cout << "Final res[" << roffset<< "]=" << (*res)[roffset] << (*res)[roffset]<< endl; )                      memcpy(&((*vecs[j])[outoffset]),&((*res)[roffset]),work[j]->m_samplesize*sizeof(RealVectorType::ElementType));
1798      DataVector::size_type outoffset=result->getPointOffset(sample,0);                  }
1799      memcpy(&(resvec[outoffset]),&((*res)[roffset]),m_samplesize*sizeof(DataVector::ElementType));              }
1800            }
1801            // Now we need to load the new results as identity ops into the lazy nodes
1802            for (int i=work.size()-1;i>=0;--i)
1803            {
1804                work[i]->makeIdentity(REFCOUNTNS::dynamic_pointer_cast<DataReady>(dep[i]->getPtr()));
1805            }
1806      }
1807      else  // functionspaces do not match
1808      {
1809            for (int i=0;i<work.size();++i)
1810            {
1811                    work[i]->resolveToIdentity();
1812            }
1813    }    }
   return resptr;  
1814  }  }
1815    
 #endif // LAZY_NODE_STORAGE  
1816    
1817  // To simplify the memory management, all threads operate on one large vector, rather than one each.  
1818  // Each sample is evaluated independently and copied into the result DataExpanded.  // This version of resolve uses storage in each node to hold results
1819  DataReady_ptr  DataReady_ptr
1820  DataLazy::resolveVectorWorker()  DataLazy::resolveNodeWorker()
1821  {  {
1822      if (m_readytype!='E')         // if the whole sub-expression is Constant or Tagged, then evaluate it normally
 LAZYDEBUG(cout << "Sample size=" << m_samplesize << endl;)  
 LAZYDEBUG(cout << "Buffers=" << m_buffsRequired << endl;)  
   if (m_readytype!='E')     // if the whole sub-expression is Constant or Tagged, then evaluate it normally  
1823    {    {
1824      collapse();      collapse();
1825    }    }
1826    if (m_op==IDENTITY)       // So a lazy expression of Constant or Tagged data will be returned here.    if (m_op==IDENTITY)           // So a lazy expression of Constant or Tagged data will be returned here.
1827    {    {
1828      return m_id;      return m_id;
1829    }    }
1830      // from this point on we must have m_op!=IDENTITY and m_readytype=='E'          // from this point on we must have m_op!=IDENTITY and m_readytype=='E'
1831    size_t threadbuffersize=m_maxsamplesize*(max(1,m_buffsRequired)); // Each thread needs to have enough    DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(),  RealVectorType(getNoValues()));
1832      // storage to evaluate its expression    RealVectorType& resvec=result->getVectorRW();
   int numthreads=1;  
 #ifdef _OPENMP  
   numthreads=omp_get_max_threads();  
 #endif  
   ValueType v(numthreads*threadbuffersize);  
 LAZYDEBUG(cout << "Buffer created with size=" << v.size() << endl;)  
   DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(),  ValueType(getNoValues()));  
   ValueType& resvec=result->getVectorRW();  
1833    DataReady_ptr resptr=DataReady_ptr(result);    DataReady_ptr resptr=DataReady_ptr(result);
1834    
1835    int sample;    int sample;
   size_t outoffset;     // offset in the output data  
1836    int totalsamples=getNumSamples();    int totalsamples=getNumSamples();
1837    const ValueType* res=0;   // Vector storing the answer    const RealVectorType* res=0;       // Storage for answer
   size_t resoffset=0;       // where in the vector to find the answer  
1838  LAZYDEBUG(cout << "Total number of samples=" <<totalsamples << endl;)  LAZYDEBUG(cout << "Total number of samples=" <<totalsamples << endl;)
1839    #pragma omp parallel for private(sample,resoffset,outoffset,res) schedule(static)    #pragma omp parallel private(sample,res)
   for (sample=0;sample<totalsamples;++sample)  
1840    {    {
1841  LAZYDEBUG(cout << "################################# " << sample << endl;)          size_t roffset=0;
1842    #ifdef LAZY_STACK_PROF
1843            stackstart[omp_get_thread_num()]=&roffset;
1844            stackend[omp_get_thread_num()]=&roffset;
1845    #endif
1846            #pragma omp for schedule(static)
1847            for (sample=0;sample<totalsamples;++sample)
1848            {
1849                    roffset=0;
1850  #ifdef _OPENMP  #ifdef _OPENMP
1851      res=resolveSample(v,threadbuffersize*omp_get_thread_num(),sample,resoffset);                  res=resolveNodeSample(omp_get_thread_num(),sample,roffset);
1852  #else  #else
1853      res=resolveSample(v,0,sample,resoffset);   // res would normally be v, but not if its a single IDENTITY op.                  res=resolveNodeSample(0,sample,roffset);
1854  #endif  #endif
1855  LAZYDEBUG(cerr << "-------------------------------- " << endl;)  LAZYDEBUG(cout << "Sample #" << sample << endl;)
1856  LAZYDEBUG(cerr<< "Copying sample#" << sample << endl;)  LAZYDEBUG(cout << "Final res[" << roffset<< "]=" << (*res)[roffset] << (*res)[roffset]<< endl; )
1857      outoffset=result->getPointOffset(sample,0);                  RealVectorType::size_type outoffset=result->getPointOffset(sample,0);
1858  LAZYDEBUG(cerr << "offset=" << outoffset << " from offset=" << resoffset << " " << m_samplesize << " doubles" << endl;)                  memcpy(&(resvec[outoffset]),&((*res)[roffset]),m_samplesize*sizeof(RealVectorType::ElementType));
1859      for (unsigned int i=0;i<m_samplesize;++i,++outoffset,++resoffset)   // copy values into the output vector          }
1860      {    }
1861  LAZYDEBUG(cerr << "outoffset=" << outoffset << " resoffset=" << resoffset << " " << (*res)[resoffset]<< endl;)  #ifdef LAZY_STACK_PROF
1862      resvec[outoffset]=(*res)[resoffset];    for (int i=0;i<getNumberOfThreads();++i)
1863      }    {
1864  LAZYDEBUG(cerr << DataTypes::pointToString(resvec,getShape(),outoffset-m_samplesize+DataTypes::noValues(getShape()),"Final result:") << endl;)          size_t r=((size_t)stackstart[i] - (size_t)stackend[i]);
1865  LAZYDEBUG(cerr << "*********************************" << endl;)  //      cout << i << " " << stackstart[i] << " .. " << stackend[i] << " = " <<  r << endl;
1866            if (r>maxstackuse)
1867            {
1868                    maxstackuse=r;
1869            }
1870    }    }
1871      cout << "Max resolve Stack use=" << maxstackuse << endl;
1872    #endif
1873    return resptr;    return resptr;
1874  }  }
1875    
# Line 2335  std::string Line 1877  std::string
1877  DataLazy::toString() const  DataLazy::toString() const
1878  {  {
1879    ostringstream oss;    ostringstream oss;
1880    oss << "Lazy Data:";    oss << "Lazy Data: [depth=" << m_height<< "] ";
1881    intoString(oss);    switch (escriptParams.getLAZY_STR_FMT())
1882      {
1883      case 1:       // tree format
1884            oss << endl;
1885            intoTreeString(oss,"");
1886            break;
1887      case 2:       // just the depth
1888            break;
1889      default:
1890            intoString(oss);
1891            break;
1892      }
1893    return oss.str();    return oss.str();
1894  }  }
1895    
# Line 2348  DataLazy::intoString(ostringstream& oss) Line 1901  DataLazy::intoString(ostringstream& oss)
1901    switch (getOpgroup(m_op))    switch (getOpgroup(m_op))
1902    {    {
1903    case G_IDENTITY:    case G_IDENTITY:
1904      if (m_id->isExpanded())          if (m_id->isExpanded())
1905      {          {
1906         oss << "E";             oss << "E";
1907      }          }
1908      else if (m_id->isTagged())          else if (m_id->isTagged())
1909      {          {
1910        oss << "T";            oss << "T";
1911      }          }
1912      else if (m_id->isConstant())          else if (m_id->isConstant())
1913      {          {
1914        oss << "C";            oss << "C";
1915      }          }
1916      else          else
1917      {          {
1918        oss << "?";            oss << "?";
1919      }          }
1920      oss << '@' << m_id.get();          oss << '@' << m_id.get();
1921      break;          break;
1922    case G_BINARY:    case G_BINARY:
1923      oss << '(';          oss << '(';
1924      m_left->intoString(oss);          m_left->intoString(oss);
1925      oss << ' ' << opToString(m_op) << ' ';          oss << ' ' << opToString(m_op) << ' ';
1926      m_right->intoString(oss);          m_right->intoString(oss);
1927      oss << ')';          oss << ')';
1928      break;          break;
1929    case G_UNARY:    case G_UNARY:
1930    case G_UNARY_P:    case G_UNARY_P:
1931    case G_NP1OUT:    case G_NP1OUT:
1932    case G_NP1OUT_P:    case G_NP1OUT_P:
1933      oss << opToString(m_op) << '(';    case G_REDUCTION:
1934      m_left->intoString(oss);          oss << opToString(m_op) << '(';
1935      oss << ')';          m_left->intoString(oss);
1936      break;          oss << ')';
1937            break;
1938    case G_TENSORPROD:    case G_TENSORPROD:
1939      oss << opToString(m_op) << '(';          oss << opToString(m_op) << '(';
1940      m_left->intoString(oss);          m_left->intoString(oss);
1941      oss << ", ";          oss << ", ";
1942      m_right->intoString(oss);          m_right->intoString(oss);
1943      oss << ')';          oss << ')';
1944      break;          break;
1945    case G_NP1OUT_2P:    case G_NP1OUT_2P:
1946      oss << opToString(m_op) << '(';          oss << opToString(m_op) << '(';
1947      m_left->intoString(oss);          m_left->intoString(oss);
1948      oss << ", " << m_axis_offset << ", " << m_transpose;          oss << ", " << m_axis_offset << ", " << m_transpose;
1949      oss << ')';          oss << ')';
1950      break;          break;
1951      case G_CONDEVAL:
1952            oss << opToString(m_op)<< '(' ;
1953            m_mask->intoString(oss);
1954            oss << " ? ";
1955            m_left->intoString(oss);
1956            oss << " : ";
1957            m_right->intoString(oss);
1958            oss << ')';
1959            break;
1960    default:    default:
1961      oss << "UNKNOWN";          oss << "UNKNOWN";
1962    }    }
1963  }  }
1964    
1965    
1966    void
1967    DataLazy::intoTreeString(ostringstream& oss, string indent) const
1968    {
1969      oss << '[' << m_rank << ':' << setw(3) << m_samplesize << "] " << indent;
1970      switch (getOpgroup(m_op))
1971      {
1972      case G_IDENTITY:
1973            if (m_id->isExpanded())
1974            {
1975               oss << "E";
1976            }
1977            else if (m_id->isTagged())
1978            {
1979              oss << "T";
1980            }
1981            else if (m_id->isConstant())
1982            {
1983              oss << "C";
1984            }
1985            else
1986            {
1987              oss << "?";
1988            }
1989            oss << '@' << m_id.get() << endl;
1990            break;
1991      case G_BINARY:
1992            oss << opToString(m_op) << endl;
1993            indent+='.';
1994            m_left->intoTreeString(oss, indent);
1995            m_right->intoTreeString(oss, indent);
1996            break;
1997      case G_UNARY:
1998      case G_UNARY_P:
1999      case G_NP1OUT:
2000      case G_NP1OUT_P:
2001      case G_REDUCTION:
2002            oss << opToString(m_op) << endl;
2003            indent+='.';
2004            m_left->intoTreeString(oss, indent);
2005            break;
2006      case G_TENSORPROD:
2007            oss << opToString(m_op) << endl;
2008            indent+='.';
2009            m_left->intoTreeString(oss, indent);
2010            m_right->intoTreeString(oss, indent);
2011            break;
2012      case G_NP1OUT_2P:
2013            oss << opToString(m_op) << ", " << m_axis_offset << ", " << m_transpose<< endl;
2014            indent+='.';
2015            m_left->intoTreeString(oss, indent);
2016            break;
2017      default:
2018            oss << "UNKNOWN";
2019      }
2020    }
2021    
2022    
2023  DataAbstract*  DataAbstract*
2024  DataLazy::deepCopy()  DataLazy::deepCopy() const
2025  {  {
2026    switch (getOpgroup(m_op))    switch (getOpgroup(m_op))
2027    {    {
2028    case G_IDENTITY:  return new DataLazy(m_id->deepCopy()->getPtr());    case G_IDENTITY:  return new DataLazy(m_id->deepCopy()->getPtr());
2029    case G_UNARY: return new DataLazy(m_left->deepCopy()->getPtr(),m_op);    case G_UNARY:
2030    case G_BINARY:    return new DataLazy(m_left->deepCopy()->getPtr(),m_right->deepCopy()->getPtr(),m_op);    case G_REDUCTION:      return new DataLazy(m_left->deepCopy()->getPtr(),m_op);
2031      case G_UNARY_P:       return new DataLazy(m_left->deepCopy()->getPtr(), m_op, m_tol);
2032      case G_BINARY:        return new DataLazy(m_left->deepCopy()->getPtr(),m_right->deepCopy()->getPtr(),m_op);
2033    case G_NP1OUT: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(),m_op);    case G_NP1OUT: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(),m_op);
2034    case G_TENSORPROD: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(), m_op, m_axis_offset, m_transpose);    case G_TENSORPROD: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(), m_op, m_axis_offset, m_transpose);
2035      case G_NP1OUT_P:   return new DataLazy(m_left->deepCopy()->getPtr(),m_op,  m_axis_offset);
2036      case G_NP1OUT_2P:  return new DataLazy(m_left->deepCopy()->getPtr(), m_op, m_axis_offset, m_transpose);
2037    default:    default:
2038      throw DataException("Programmer error - do not know how to deepcopy operator "+opToString(m_op)+".");          throw DataException("Programmer error - do not know how to deepcopy operator "+opToString(m_op)+".");
2039    }    }
2040  }  }
2041    
2042    
2043    
2044  // There is no single, natural interpretation of getLength on DataLazy.  // There is no single, natural interpretation of getLength on DataLazy.
2045  // Instances of DataReady can look at the size of their vectors.  // Instances of DataReady can look at the size of their vectors.
2046  // For lazy though, it could be the size the data would be if it were resolved;  // For lazy though, it could be the size the data would be if it were resolved;
2047  // or it could be some function of the lengths of the DataReady instances which  // or it could be some function of the lengths of the DataReady instances which
2048  // form part of the expression.  // form part of the expression.
2049  // Rather than have people making assumptions, I have disabled the method.  // Rather than have people making assumptions, I have disabled the method.
2050  DataTypes::ValueType::size_type  DataTypes::RealVectorType::size_type
2051  DataLazy::getLength() const  DataLazy::getLength() const
2052  {  {
2053    throw DataException("getLength() does not make sense for lazy data.");    throw DataException("getLength() does not make sense for lazy data.");
# Line 2436  DataLazy::getSlice(const DataTypes::Regi Line 2062  DataLazy::getSlice(const DataTypes::Regi
2062    
2063    
2064  // To do this we need to rely on our child nodes  // To do this we need to rely on our child nodes
2065  DataTypes::ValueType::size_type  DataTypes::RealVectorType::size_type
2066  DataLazy::getPointOffset(int sampleNo,  DataLazy::getPointOffset(int sampleNo,
2067                   int dataPointNo)                   int dataPointNo)
2068  {  {
2069    if (m_op==IDENTITY)    if (m_op==IDENTITY)
2070    {    {
2071      return m_id->getPointOffset(sampleNo,dataPointNo);          return m_id->getPointOffset(sampleNo,dataPointNo);
2072    }    }
2073    if (m_readytype!='E')    if (m_readytype!='E')
2074    {    {
2075      collapse();          collapse();
2076      return m_id->getPointOffset(sampleNo,dataPointNo);          return m_id->getPointOffset(sampleNo,dataPointNo);
2077    }    }
2078    // at this point we do not have an identity node and the expression will be Expanded    // at this point we do not have an identity node and the expression will be Expanded
2079    // so we only need to know which child to ask    // so we only need to know which child to ask
2080    if (m_left->m_readytype=='E')    if (m_left->m_readytype=='E')
2081    {    {
2082      return m_left->getPointOffset(sampleNo,dataPointNo);          return m_left->getPointOffset(sampleNo,dataPointNo);
2083    }    }
2084    else    else
2085    {    {
2086      return m_right->getPointOffset(sampleNo,dataPointNo);          return m_right->getPointOffset(sampleNo,dataPointNo);
2087    }    }
2088  }  }
2089    
2090  // To do this we need to rely on our child nodes  // To do this we need to rely on our child nodes
2091  DataTypes::ValueType::size_type  DataTypes::RealVectorType::size_type
2092  DataLazy::getPointOffset(int sampleNo,  DataLazy::getPointOffset(int sampleNo,
2093                   int dataPointNo) const                   int dataPointNo) const
2094  {  {
2095    if (m_op==IDENTITY)    if (m_op==IDENTITY)
2096    {    {
2097      return m_id->getPointOffset(sampleNo,dataPointNo);          return m_id->getPointOffset(sampleNo,dataPointNo);
2098    }    }
2099    if (m_readytype=='E')    if (m_readytype=='E')
2100    {    {
# Line 2476  DataLazy::getPointOffset(int sampleNo, Line 2102  DataLazy::getPointOffset(int sampleNo,
2102      // so we only need to know which child to ask      // so we only need to know which child to ask
2103      if (m_left->m_readytype=='E')      if (m_left->m_readytype=='E')
2104      {      {
2105      return m_left->getPointOffset(sampleNo,dataPointNo);          return m_left->getPointOffset(sampleNo,dataPointNo);
2106      }      }
2107      else      else
2108      {      {
2109      return m_right->getPointOffset(sampleNo,dataPointNo);          return m_right->getPointOffset(sampleNo,dataPointNo);
2110      }      }
2111    }    }
2112    if (m_readytype=='C')    if (m_readytype=='C')
2113    {    {
2114      return m_left->getPointOffset(sampleNo,dataPointNo); // which child doesn't matter          return m_left->getPointOffset(sampleNo,dataPointNo); // which child doesn't matter
2115    }    }
2116    throw DataException("Programmer error - getPointOffset on lazy data may require collapsing (but this object is marked const).");    throw DataException("Programmer error - getPointOffset on lazy data may require collapsing (but this object is marked const).");
2117  }  }
# Line 2495  DataLazy::getPointOffset(int sampleNo, Line 2121  DataLazy::getPointOffset(int sampleNo,
2121  void  void
2122  DataLazy::setToZero()  DataLazy::setToZero()
2123  {  {
2124  //   DataTypes::ValueType v(getNoValues(),0);  //   DataTypes::RealVectorType v(getNoValues(),0);
2125  //   m_id=DataReady_ptr(new DataConstant(getFunctionSpace(),getShape(),v));  //   m_id=DataReady_ptr(new DataConstant(getFunctionSpace(),getShape(),v));
2126  //   m_op=IDENTITY;  //   m_op=IDENTITY;
2127  //   m_right.reset();    //   m_right.reset();  
# Line 2503  DataLazy::setToZero() Line 2129  DataLazy::setToZero()
2129  //   m_readytype='C';  //   m_readytype='C';
2130  //   m_buffsRequired=1;  //   m_buffsRequired=1;
2131    
2132    privdebug=privdebug;  // to stop the compiler complaining about unused privdebug    (void)privdebug;  // to stop the compiler complaining about unused privdebug
2133    throw DataException("Programmer error - setToZero not supported for DataLazy (DataLazy objects should be read only).");    throw DataException("Programmer error - setToZero not supported for DataLazy (DataLazy objects should be read only).");
2134  }  }
2135    
2136  bool  bool
2137  DataLazy::actsExpanded() const  DataLazy::actsExpanded() const
2138  {  {
2139      return (m_readytype=='E');          return (m_readytype=='E');
2140  }  }
2141    
2142  }   // end namespace  } // end namespace
2143    

Legend:
Removed from v.2514  
changed lines
  Added in v.6114

  ViewVC Help
Powered by ViewVC 1.1.26