/[escript]/branches/clazy/escriptcore/src/DataLazy.cpp
ViewVC logotype

Diff of /branches/clazy/escriptcore/src/DataLazy.cpp

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

trunk/escript/src/DataLazy.cpp revision 2514 by jfenwick, Fri Jul 3 00:57:45 2009 UTC trunk/escriptcore/src/DataLazy.cpp revision 6469 by jfenwick, Tue Jan 17 07:45:36 2017 UTC
# Line 1  Line 1 
1    
2  /*******************************************************  /*****************************************************************************
3  *  *
4  * Copyright (c) 2003-2008 by University of Queensland  * Copyright (c) 2003-2016 by The University of Queensland
5  * Earth Systems Science Computational Center (ESSCC)  * http://www.uq.edu.au
 * http://www.uq.edu.au/esscc  
6  *  *
7  * Primary Business: Queensland, Australia  * Primary Business: Queensland, Australia
8  * Licensed under the Open Software License version 3.0  * Licensed under the Apache License, version 2.0
9  * http://www.opensource.org/licenses/osl-3.0.php  * http://www.apache.org/licenses/LICENSE-2.0
10  *  *
11  *******************************************************/  * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12    * Development 2012-2013 by School of Earth Sciences
13    * Development from 2014 by Centre for Geoscience Computing (GeoComp)
14    *
15    *****************************************************************************/
16    
17  #include "DataLazy.h"  #include "DataLazy.h"
 #ifdef USE_NETCDF  
 #include <netcdfcpp.h>  
 #endif  
 #ifdef PASO_MPI  
 #include <mpi.h>  
 #endif  
 #ifdef _OPENMP  
 #include <omp.h>  
 #endif  
 #include "FunctionSpace.h"  
 #include "DataTypes.h"  
18  #include "Data.h"  #include "Data.h"
19  #include "UnaryFuncs.h"     // for escript::fsign  #include "DataTypes.h"
20    #include "EscriptParams.h"
21    #include "FunctionSpace.h"
22  #include "Utils.h"  #include "Utils.h"
23    #include "DataVectorOps.h"
24    
25  #include "EscriptParams.h"  #include <iomanip> // for some fancy formatting in debug
26    
27    using namespace escript::DataTypes;
28    
29  #include <iomanip>      // for some fancy formatting in debug  #define NO_ARG
30    
31  // #define LAZYDEBUG(X) if (privdebug){X;}  // #define LAZYDEBUG(X) if (privdebug){X;}
32  #define LAZYDEBUG(X)  #define LAZYDEBUG(X)
# Line 42  bool privdebug=false; Line 38  bool privdebug=false;
38  #define DISABLEDEBUG privdebug=false;  #define DISABLEDEBUG privdebug=false;
39  }  }
40    
41  // #define SIZELIMIT if ((m_height>escript::escriptParams.getTOO_MANY_LEVELS()) || (m_children>escript::escriptParams.getTOO_MANY_NODES())) {cerr << "\n!!!!!!! SIZE LIMIT EXCEEDED " << m_children << ";" << m_height << endl << toString() << endl;resolveToIdentity();}  //#define SIZELIMIT if ((m_height>escript::escriptParams.getInt("TOO_MANY_LEVELS")) || (m_children>escript::escriptParams.getInt("TOO_MANY_NODES"))) {cerr << "\n!!!!!!! SIZE LIMIT EXCEEDED " << m_children << ";" << m_height << endl << toString() << endl;resolveToIdentity();}
42    
43  #define SIZELIMIT if ((m_height>escript::escriptParams.getTOO_MANY_LEVELS()) || (m_children>escript::escriptParams.getTOO_MANY_NODES())) {resolveToIdentity();}  //#define SIZELIMIT if ((m_height>escript::escriptParams.getInt("TOO_MANY_LEVELS")) || (m_children>escript::escriptParams.getInt("TOO_MANY_NODES"))) {cerr << "SIZE LIMIT EXCEEDED " << m_height << endl;resolveToIdentity();}
44    
45    
46    #define SIZELIMIT \
47        if (m_height > escript::escriptParams.getTooManyLevels()) {\
48            if (escript::escriptParams.getLazyVerbose()) {\
49                cerr << "SIZE LIMIT EXCEEDED height=" << m_height << endl;\
50            }\
51            resolveToIdentity();\
52        }
53    
54  /*  /*
55  How does DataLazy work?  How does DataLazy work?
56  ~~~~~~~~~~~~~~~~~~~~~~~  ~~~~~~~~~~~~~~~~~~~~~~~
# Line 58  A special operation, IDENTITY, stores an Line 62  A special operation, IDENTITY, stores an
62  This means that all "internal" nodes in the structure are instances of DataLazy.  This means that all "internal" nodes in the structure are instances of DataLazy.
63    
64  Each operation has a string representation as well as an opgroup - eg G_IDENTITY, G_BINARY, ...  Each operation has a string representation as well as an opgroup - eg G_IDENTITY, G_BINARY, ...
65  Note that IDENITY is not considered a unary operation.  Note that IDENTITY is not considered a unary operation.
66    
67  I am avoiding calling the structure formed a tree because it is not guaranteed to be one (eg c=a+a).  I am avoiding calling the structure formed a tree because it is not guaranteed to be one (eg c=a+a).
68  It must however form a DAG (directed acyclic graph).  It must however form a DAG (directed acyclic graph).
# Line 66  I will refer to individual DataLazy obje Line 70  I will refer to individual DataLazy obje
70    
71  Each node also stores:  Each node also stores:
72  - m_readytype \in {'E','T','C','?'} ~ indicates what sort of DataReady would be produced if the expression was  - m_readytype \in {'E','T','C','?'} ~ indicates what sort of DataReady would be produced if the expression was
73      evaluated.          evaluated.
74  - m_buffsrequired ~ the larged number of samples which would need to be kept simultaneously in order to  - m_buffsrequired ~ the large number of samples which would need to be kept simultaneously in order to
75      evaluate the expression.          evaluate the expression.
76  - m_samplesize ~ the number of doubles stored in a sample.  - m_samplesize ~ the number of doubles stored in a sample.
77    
78  When a new node is created, the above values are computed based on the values in the child nodes.  When a new node is created, the above values are computed based on the values in the child nodes.
# Line 109  namespace escript Line 113  namespace escript
113  namespace  namespace
114  {  {
115    
 enum ES_opgroup  
 {  
    G_UNKNOWN,  
    G_IDENTITY,  
    G_BINARY,        // pointwise operations with two arguments  
    G_UNARY,     // pointwise operations with one argument  
    G_UNARY_P,       // pointwise operations with one argument, requiring a parameter  
    G_NP1OUT,        // non-pointwise op with one output  
    G_NP1OUT_P,      // non-pointwise op with one output requiring a parameter  
    G_TENSORPROD,    // general tensor product  
    G_NP1OUT_2P      // non-pointwise op with one output requiring two params  
 };  
116    
117    // enabling this will print out when ever the maximum stacksize used by resolve increases
118    // it assumes _OPENMP is also in use
119    //#define LAZY_STACK_PROF
120    
121    
122    
123  string ES_opstrings[]={"UNKNOWN","IDENTITY","+","-","*","/","^",  #ifndef _OPENMP
124              "sin","cos","tan",    #ifdef LAZY_STACK_PROF
125              "asin","acos","atan","sinh","cosh","tanh","erf",    #undef LAZY_STACK_PROF
126              "asinh","acosh","atanh",    #endif
127              "log10","log","sign","abs","neg","pos","exp","sqrt",  #endif
128              "1/","where>0","where<0","where>=0","where<=0", "where<>0","where=0",  
129              "symmetric","nonsymmetric",  
130              "prod",  #ifdef LAZY_STACK_PROF
131              "transpose", "trace",  std::vector<void*> stackstart(getNumberOfThreads());
132              "swapaxes"};  std::vector<void*> stackend(getNumberOfThreads());
133  int ES_opcount=41;  size_t maxstackuse=0;
134  ES_opgroup opgroups[]={G_UNKNOWN,G_IDENTITY,G_BINARY,G_BINARY,G_BINARY,G_BINARY, G_BINARY,  #endif
135              G_UNARY,G_UNARY,G_UNARY, //10  
             G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,    // 17  
             G_UNARY,G_UNARY,G_UNARY,                    // 20  
             G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,    // 28  
             G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY, G_UNARY_P, G_UNARY_P,      // 35  
             G_NP1OUT,G_NP1OUT,  
             G_TENSORPROD,  
             G_NP1OUT_P, G_NP1OUT_P,  
             G_NP1OUT_2P};  
 inline  
 ES_opgroup  
 getOpgroup(ES_optype op)  
 {  
   return opgroups[op];  
 }  
136    
137  // return the FunctionSpace of the result of "left op right"  // return the FunctionSpace of the result of "left op right"
138  FunctionSpace  FunctionSpace
139  resultFS(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)  resultFS(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
140  {  {
141      // perhaps this should call interpolate and throw or something?          // perhaps this should call interpolate and throw or something?
142      // maybe we need an interpolate node -          // maybe we need an interpolate node -
143      // that way, if interpolate is required in any other op we can just throw a          // that way, if interpolate is required in any other op we can just throw a
144      // programming error exception.          // programming error exception.
145    
146    FunctionSpace l=left->getFunctionSpace();    FunctionSpace l=left->getFunctionSpace();
147    FunctionSpace r=right->getFunctionSpace();    FunctionSpace r=right->getFunctionSpace();
148    if (l!=r)    if (l!=r)
149    {    {
150      if (r.probeInterpolation(l))      signed char res=r.getDomain()->preferredInterpolationOnDomain(r.getTypeCode(), l.getTypeCode());
151        if (res==1)
152      {      {
153      return l;          return l;
154      }      }
155      if (l.probeInterpolation(r))      if (res==-1)
156      {      {
157      return r;          return r;
158      }      }
159      throw DataException("Cannot interpolate between the FunctionSpaces given for operation "+opToString(op)+".");      throw DataException("Cannot interpolate between the FunctionSpaces given for operation "+opToString(op)+".");
160    }    }
# Line 184  resultFS(DataAbstract_ptr left, DataAbst Line 166  resultFS(DataAbstract_ptr left, DataAbst
166  DataTypes::ShapeType  DataTypes::ShapeType
167  resultShape(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)  resultShape(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
168  {  {
169      if (left->getShape()!=right->getShape())          if (left->getShape()!=right->getShape())
170      {          {
171        if ((getOpgroup(op)!=G_BINARY) && (getOpgroup(op)!=G_NP1OUT))            if ((getOpgroup(op)!=G_BINARY) && (getOpgroup(op)!=G_NP1OUT))
172        {            {
173          throw DataException("Shapes not the name - shapes must match for (point)binary operations.");                  throw DataException("Shapes not the name - shapes must match for (point)binary operations.");
174        }            }
175        if (left->getRank()==0)   // we need to allow scalar * anything  
176        {            if (left->getRank()==0)       // we need to allow scalar * anything
177          return right->getShape();            {
178        }                  return right->getShape();
179        if (right->getRank()==0)            }
180        {            if (right->getRank()==0)
181          return left->getShape();            {
182        }                  return left->getShape();
183        throw DataException("Shapes not the same - arguments must have matching shapes (or be scalars) for (point)binary operations on lazy data.");            }
184      }            throw DataException("Shapes not the same - arguments must have matching shapes (or be scalars) for (point)binary operations on lazy data.");
185      return left->getShape();          }
186            return left->getShape();
187  }  }
188    
189  // return the shape for "op left"  // return the shape for "op left"
# Line 208  resultShape(DataAbstract_ptr left, DataA Line 191  resultShape(DataAbstract_ptr left, DataA
191  DataTypes::ShapeType  DataTypes::ShapeType
192  resultShape(DataAbstract_ptr left, ES_optype op, int axis_offset)  resultShape(DataAbstract_ptr left, ES_optype op, int axis_offset)
193  {  {
194      switch(op)          switch(op)
195      {          {
196          case TRANS:          case TRANS:
197         {            // for the scoping of variables             {                    // for the scoping of variables
198          const DataTypes::ShapeType& s=left->getShape();                  const DataTypes::ShapeType& s=left->getShape();
199          DataTypes::ShapeType sh;                  DataTypes::ShapeType sh;
200          int rank=left->getRank();                  int rank=left->getRank();
201          if (axis_offset<0 || axis_offset>rank)                  if (axis_offset<0 || axis_offset>rank)
202          {                  {
203                 throw DataException("Error - Data::transpose must have 0 <= axis_offset <= rank=" + rank);              stringstream e;
204              }              e << "Error - Data::transpose must have 0 <= axis_offset <= rank=" << rank;
205              for (int i=0; i<rank; i++)              throw DataException(e.str());
206          {          }
207             int index = (axis_offset+i)%rank;          for (int i=0; i<rank; i++)
208                 sh.push_back(s[index]); // Append to new shape                  {
209              }                     int index = (axis_offset+i)%rank;
210          return sh;             sh.push_back(s[index]); // Append to new shape
211         }          }
212      break;                  return sh;
213      case TRACE:             }
214         {          break;
215          int rank=left->getRank();          case TRACE:
216          if (rank<2)             {
217          {                  int rank=left->getRank();
218             throw DataException("Trace can only be computed for objects with rank 2 or greater.");                  if (rank<2)
219          }                  {
220          if ((axis_offset>rank-2) || (axis_offset<0))                     throw DataException("Trace can only be computed for objects with rank 2 or greater.");
221          {                  }
222             throw DataException("Trace: axis offset must lie between 0 and rank-2 inclusive.");                  if ((axis_offset>rank-2) || (axis_offset<0))
223          }                  {
224          if (rank==2)                     throw DataException("Trace: axis offset must lie between 0 and rank-2 inclusive.");
225          {                  }
226             return DataTypes::scalarShape;                  if (rank==2)
227          }                  {
228          else if (rank==3)                     return DataTypes::scalarShape;
229          {                  }
230             DataTypes::ShapeType sh;                  else if (rank==3)
231                 if (axis_offset==0)                  {
232             {                     DataTypes::ShapeType sh;
233                  sh.push_back(left->getShape()[2]);                     if (axis_offset==0)
234                 }                     {
235                 else     // offset==1                          sh.push_back(left->getShape()[2]);
236             {                     }
237              sh.push_back(left->getShape()[0]);                     else         // offset==1
238                 }                     {
239             return sh;                          sh.push_back(left->getShape()[0]);
240          }                     }
241          else if (rank==4)                     return sh;
242          {                  }
243             DataTypes::ShapeType sh;                  else if (rank==4)
244             const DataTypes::ShapeType& s=left->getShape();                  {
245                 if (axis_offset==0)                     DataTypes::ShapeType sh;
246             {                     const DataTypes::ShapeType& s=left->getShape();
247                  sh.push_back(s[2]);                     if (axis_offset==0)
248                  sh.push_back(s[3]);                     {
249                 }                          sh.push_back(s[2]);
250                 else if (axis_offset==1)                          sh.push_back(s[3]);
251             {                     }
252                  sh.push_back(s[0]);                     else if (axis_offset==1)
253                  sh.push_back(s[3]);                     {
254                 }                          sh.push_back(s[0]);
255             else     // offset==2                          sh.push_back(s[3]);
256             {                     }
257              sh.push_back(s[0]);                     else         // offset==2
258              sh.push_back(s[1]);                     {
259             }                          sh.push_back(s[0]);
260             return sh;                          sh.push_back(s[1]);
261          }                     }
262          else        // unknown rank                     return sh;
263          {                  }
264             throw DataException("Error - Data::trace can only be calculated for rank 2, 3 or 4 object.");                  else            // unknown rank
265          }                  {
266         }                     throw DataException("Error - Data::trace can only be calculated for rank 2, 3 or 4 object.");
267      break;                  }
268          default:             }
269      throw DataException("Programmer error - resultShape(left,op) can't compute shapes for operator "+opToString(op)+".");          break;
270      }          default:
271            throw DataException("Programmer error - resultShape(left,op) can't compute shapes for operator "+opToString(op)+".");
272            }
273  }  }
274    
275  DataTypes::ShapeType  DataTypes::ShapeType
# Line 302  SwapShape(DataAbstract_ptr left, const i Line 287  SwapShape(DataAbstract_ptr left, const i
287          throw DataException("Error - Data::swapaxes argument must have at least rank 2.");          throw DataException("Error - Data::swapaxes argument must have at least rank 2.");
288       }       }
289       if (axis0<0 || axis0>rank-1) {       if (axis0<0 || axis0>rank-1) {
290          throw DataException("Error - Data::swapaxes: axis0 must be between 0 and rank-1=" + rank-1);          stringstream e;
291            e << "Error - Data::swapaxes: axis0 must be between 0 and rank-1=" << (rank-1);
292            throw DataException(e.str());
293       }       }
294       if (axis1<0 || axis1>rank-1) {       if (axis1<0 || axis1>rank-1) {
295           throw DataException("Error - Data::swapaxes: axis1 must be between 0 and rank-1=" + rank-1);          stringstream e;
296            e << "Error - Data::swapaxes: axis1 must be between 0 and rank-1=" << (rank-1);
297            throw DataException(e.str());
298       }       }
299       if (axis0 == axis1) {       if (axis0 == axis1) {
300           throw DataException("Error - Data::swapaxes: axis indices must be different.");           throw DataException("Error - Data::swapaxes: axis indices must be different.");
# Line 336  SwapShape(DataAbstract_ptr left, const i Line 325  SwapShape(DataAbstract_ptr left, const i
325  DataTypes::ShapeType  DataTypes::ShapeType
326  GTPShape(DataAbstract_ptr left, DataAbstract_ptr right, int axis_offset, int transpose, int& SL, int& SM, int& SR)  GTPShape(DataAbstract_ptr left, DataAbstract_ptr right, int axis_offset, int transpose, int& SL, int& SM, int& SR)
327  {  {
328                
329    // Get rank and shape of inputs    // Get rank and shape of inputs
330    int rank0 = left->getRank();    int rank0 = left->getRank();
331    int rank1 = right->getRank();    int rank1 = right->getRank();
# Line 345  GTPShape(DataAbstract_ptr left, DataAbst Line 334  GTPShape(DataAbstract_ptr left, DataAbst
334    
335    // Prepare for the loops of the product and verify compatibility of shapes    // Prepare for the loops of the product and verify compatibility of shapes
336    int start0=0, start1=0;    int start0=0, start1=0;
337    if (transpose == 0)       {}    if (transpose == 0)           {}
338    else if (transpose == 1)  { start0 = axis_offset; }    else if (transpose == 1)      { start0 = axis_offset; }
339    else if (transpose == 2)  { start1 = rank1-axis_offset; }    else if (transpose == 2)      { start1 = rank1-axis_offset; }
340    else              { throw DataException("DataLazy GeneralTensorProduct Constructor: Error - transpose should be 0, 1 or 2"); }    else                          { throw DataException("DataLazy GeneralTensorProduct Constructor: Error - transpose should be 0, 1 or 2"); }
341    
342    if (rank0<axis_offset)    if (rank0<axis_offset)
343    {    {
344      throw DataException("DataLazy GeneralTensorProduct Constructor: Error - rank of left < axisoffset");          throw DataException("DataLazy GeneralTensorProduct Constructor: Error - rank of left < axisoffset");
345    }    }
346    
347    // Adjust the shapes for transpose    // Adjust the shapes for transpose
348    DataTypes::ShapeType tmpShape0(rank0);    // pre-sizing the vectors rather    DataTypes::ShapeType tmpShape0(rank0);        // pre-sizing the vectors rather
349    DataTypes::ShapeType tmpShape1(rank1);    // than using push_back    DataTypes::ShapeType tmpShape1(rank1);        // than using push_back
350    for (int i=0; i<rank0; i++)   { tmpShape0[i]=shape0[(i+start0)%rank0]; }    for (int i=0; i<rank0; i++)   { tmpShape0[i]=shape0[(i+start0)%rank0]; }
351    for (int i=0; i<rank1; i++)   { tmpShape1[i]=shape1[(i+start1)%rank1]; }    for (int i=0; i<rank1; i++)   { tmpShape1[i]=shape1[(i+start1)%rank1]; }
352    
353    // Prepare for the loops of the product    // Prepare for the loops of the product
354    SL=1, SM=1, SR=1;    SL=1, SM=1, SR=1;
355    for (int i=0; i<rank0-axis_offset; i++)   {    for (int i=0; i<rank0-axis_offset; i++)       {
356      SL *= tmpShape0[i];      SL *= tmpShape0[i];
357    }    }
358    for (int i=rank0-axis_offset; i<rank0; i++)   {    for (int i=rank0-axis_offset; i<rank0; i++)   {
359      if (tmpShape0[i] != tmpShape1[i-(rank0-axis_offset)]) {      if (tmpShape0[i] != tmpShape1[i-(rank0-axis_offset)]) {
360        throw DataException("C_GeneralTensorProduct: Error - incompatible shapes");        throw DataException("C_GeneralTensorProduct: Error - incompatible shapes");
361      }      }
362      SM *= tmpShape0[i];      SM *= tmpShape0[i];
363    }    }
364    for (int i=axis_offset; i<rank1; i++)     {    for (int i=axis_offset; i<rank1; i++)         {
365      SR *= tmpShape1[i];      SR *= tmpShape1[i];
366    }    }
367    
368    // Define the shape of the output (rank of shape is the sum of the loop ranges below)    // Define the shape of the output (rank of shape is the sum of the loop ranges below)
369    DataTypes::ShapeType shape2(rank0+rank1-2*axis_offset);      DataTypes::ShapeType shape2(rank0+rank1-2*axis_offset);      
370    {         // block to limit the scope of out_index    {                     // block to limit the scope of out_index
371       int out_index=0;       int out_index=0;
372       for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z       for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z
373       for (int i=axis_offset; i<rank1; i++, ++out_index)   { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z       for (int i=axis_offset; i<rank1; i++, ++out_index)   { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z
# Line 394  GTPShape(DataAbstract_ptr left, DataAbst Line 383  GTPShape(DataAbstract_ptr left, DataAbst
383    return shape2;    return shape2;
384  }  }
385    
386  // determine the number of samples requires to evaluate an expression combining left and right  }       // end anonymous namespace
 // NP1OUT needs an extra buffer because we can't write the answers over the top of the input.  
 // The same goes for G_TENSORPROD  
 // It might seem that pointwise binary ops (G_BINARY) could be written over the top of the lefts.  
 // This would be true were it not for the possibility that the LHS could be a scalar which needs to be examined  
 // multiple times  
 int  
 calcBuffs(const DataLazy_ptr& left, const DataLazy_ptr& right, ES_optype op)  
 {  
    switch(getOpgroup(op))  
    {  
    case G_IDENTITY: return 1;  
    case G_BINARY: return 1+max(left->getBuffsRequired(),right->getBuffsRequired()+1);  
    case G_UNARY:  
    case G_UNARY_P: return max(left->getBuffsRequired(),1);  
    case G_NP1OUT: return 1+max(left->getBuffsRequired(),1);  
    case G_NP1OUT_P: return 1+max(left->getBuffsRequired(),1);  
    case G_TENSORPROD: return 1+max(left->getBuffsRequired(),right->getBuffsRequired()+1);  
    case G_NP1OUT_2P: return 1+max(left->getBuffsRequired(),1);  
    default:  
     throw DataException("Programmer Error - attempt to calcBuffs() for operator "+opToString(op)+".");  
    }  
 }  
   
   
 }   // end anonymous namespace  
   
   
   
 // Return a string representing the operation  
 const std::string&  
 opToString(ES_optype op)  
 {  
   if (op<0 || op>=ES_opcount)  
   {  
     op=UNKNOWNOP;  
   }  
   return ES_opstrings[op];  
 }  
387    
 #ifdef LAZY_NODE_STORAGE  
388  void DataLazy::LazyNodeSetup()  void DataLazy::LazyNodeSetup()
389  {  {
390  #ifdef _OPENMP  #ifdef _OPENMP
# Line 451  void DataLazy::LazyNodeSetup() Line 401  void DataLazy::LazyNodeSetup()
401      m_sampleids[0]=-1;      m_sampleids[0]=-1;
402  #endif  // _OPENMP  #endif  // _OPENMP
403  }  }
 #endif   // LAZY_NODE_STORAGE  
404    
405    
406  // Creates an identity node  // Creates an identity node
407  DataLazy::DataLazy(DataAbstract_ptr p)  DataLazy::DataLazy(DataAbstract_ptr p)
408      : parent(p->getFunctionSpace(),p->getShape())          : parent(p->getFunctionSpace(),p->getShape())
409  #ifdef LAZY_NODE_STORAGE          ,m_sampleids(0),
410      ,m_sampleids(0),          m_samples(1)
     m_samples(1)  
 #endif  
411  {  {
412     if (p->isLazy())     if (p->isLazy())
413     {     {
414      // I don't want identity of Lazy.          // I don't want identity of Lazy.
415      // Question: Why would that be so bad?          // Question: Why would that be so bad?
416      // Answer: We assume that the child of ID is something we can call getVector on          // Answer: We assume that the child of ID is something we can call getVector on
417      throw DataException("Programmer error - attempt to create identity from a DataLazy.");          throw DataException("Programmer error - attempt to create identity from a DataLazy.");
418     }     }
419     else     else
420     {     {
421      p->makeLazyShared();          DataReady_ptr dr=dynamic_pointer_cast<DataReady>(p);
422      DataReady_ptr dr=dynamic_pointer_cast<DataReady>(p);          makeIdentity(dr);
     makeIdentity(dr);  
423  LAZYDEBUG(cout << "Wrapping " << dr.get() << " id=" << m_id.get() << endl;)  LAZYDEBUG(cout << "Wrapping " << dr.get() << " id=" << m_id.get() << endl;)
424     }     }
425  LAZYDEBUG(cout << "(1)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(1)Lazy created with " << m_samplesize << endl;)
426  }  }
427    
428  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op)  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op)
429      : parent(left->getFunctionSpace(),left->getShape()),          : parent(left->getFunctionSpace(),(getOpgroup(op)!=G_REDUCTION)?left->getShape():DataTypes::scalarShape),
430      m_op(op),          m_op(op),
431      m_axis_offset(0),          m_axis_offset(0),
432      m_transpose(0),          m_transpose(0),
433      m_SL(0), m_SM(0), m_SR(0)          m_SL(0), m_SM(0), m_SR(0)
434  {  {
435     if ((getOpgroup(op)!=G_UNARY) && (getOpgroup(op)!=G_NP1OUT))     if ((getOpgroup(op)!=G_UNARY) && (getOpgroup(op)!=G_NP1OUT) && (getOpgroup(op)!=G_REDUCTION))
436     {     {
437      throw DataException("Programmer error - constructor DataLazy(left, op) will only process UNARY operations.");          throw DataException("Programmer error - constructor DataLazy(left, op) will only process UNARY operations.");
438     }     }
439    
440     DataLazy_ptr lleft;     DataLazy_ptr lleft;
441     if (!left->isLazy())     if (!left->isLazy())
442     {     {
443      lleft=DataLazy_ptr(new DataLazy(left));          lleft=DataLazy_ptr(new DataLazy(left));
444     }     }
445     else     else
446     {     {
447      lleft=dynamic_pointer_cast<DataLazy>(left);          lleft=dynamic_pointer_cast<DataLazy>(left);
448     }     }
449     m_readytype=lleft->m_readytype;     m_readytype=lleft->m_readytype;
450     m_left=lleft;     m_left=lleft;
    m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point  
451     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
    m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());  
452     m_children=m_left->m_children+1;     m_children=m_left->m_children+1;
453     m_height=m_left->m_height+1;     m_height=m_left->m_height+1;
 #ifdef LAZY_NODE_STORAGE  
454     LazyNodeSetup();     LazyNodeSetup();
 #endif  
455     SIZELIMIT     SIZELIMIT
456  }  }
457    
458    
459  // In this constructor we need to consider interpolation  // In this constructor we need to consider interpolation
460  DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)  DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
461      : parent(resultFS(left,right,op), resultShape(left,right,op)),          : parent(resultFS(left,right,op), resultShape(left,right,op)),
462      m_op(op),          m_op(op),
463      m_SL(0), m_SM(0), m_SR(0)          m_SL(0), m_SM(0), m_SR(0)
464  {  {
465  LAZYDEBUG(cout << "Forming operator with " << left.get() << " " << right.get() << endl;)  LAZYDEBUG(cout << "Forming operator with " << left.get() << " " << right.get() << endl;)
466     if ((getOpgroup(op)!=G_BINARY))     if ((getOpgroup(op)!=G_BINARY))
467     {     {
468      throw DataException("Programmer error - constructor DataLazy(left, right, op) will only process BINARY operations.");          throw DataException("Programmer error - constructor DataLazy(left, right, op) will only process BINARY operations.");
469     }     }
470    
471     if (getFunctionSpace()!=left->getFunctionSpace())    // left needs to be interpolated     if (getFunctionSpace()!=left->getFunctionSpace())    // left needs to be interpolated
472     {     {
473      FunctionSpace fs=getFunctionSpace();          FunctionSpace fs=getFunctionSpace();
474      Data ltemp(left);          Data ltemp(left);
475      Data tmp(ltemp,fs);          Data tmp(ltemp,fs);
476      left=tmp.borrowDataPtr();          left=tmp.borrowDataPtr();
477     }     }
478     if (getFunctionSpace()!=right->getFunctionSpace())   // right needs to be interpolated     if (getFunctionSpace()!=right->getFunctionSpace())   // right needs to be interpolated
479     {     {
480      Data tmp(Data(right),getFunctionSpace());          Data tmp(Data(right),getFunctionSpace());
481      right=tmp.borrowDataPtr();          right=tmp.borrowDataPtr();
482  LAZYDEBUG(cout << "Right interpolation required " << right.get() << endl;)  LAZYDEBUG(cout << "Right interpolation required " << right.get() << endl;)
483     }     }
484     left->operandCheck(*right);     left->operandCheck(*right);
485    
486     if (left->isLazy())          // the children need to be DataLazy. Wrap them in IDENTITY if required     if (left->isLazy())                  // the children need to be DataLazy. Wrap them in IDENTITY if required
487     {     {
488      m_left=dynamic_pointer_cast<DataLazy>(left);          m_left=dynamic_pointer_cast<DataLazy>(left);
489  LAZYDEBUG(cout << "Left is " << m_left->toString() << endl;)  LAZYDEBUG(cout << "Left is " << m_left->toString() << endl;)
490     }     }
491     else     else
492     {     {
493      m_left=DataLazy_ptr(new DataLazy(left));          m_left=DataLazy_ptr(new DataLazy(left));
494  LAZYDEBUG(cout << "Left " << left.get() << " wrapped " << m_left->m_id.get() << endl;)  LAZYDEBUG(cout << "Left " << left.get() << " wrapped " << m_left->m_id.get() << endl;)
495     }     }
496     if (right->isLazy())     if (right->isLazy())
497     {     {
498      m_right=dynamic_pointer_cast<DataLazy>(right);          m_right=dynamic_pointer_cast<DataLazy>(right);
499  LAZYDEBUG(cout << "Right is " << m_right->toString() << endl;)  LAZYDEBUG(cout << "Right is " << m_right->toString() << endl;)
500     }     }
501     else     else
502     {     {
503      m_right=DataLazy_ptr(new DataLazy(right));          m_right=DataLazy_ptr(new DataLazy(right));
504  LAZYDEBUG(cout << "Right " << right.get() << " wrapped " << m_right->m_id.get() << endl;)  LAZYDEBUG(cout << "Right " << right.get() << " wrapped " << m_right->m_id.get() << endl;)
505     }     }
506     char lt=m_left->m_readytype;     char lt=m_left->m_readytype;
507     char rt=m_right->m_readytype;     char rt=m_right->m_readytype;
508     if (lt=='E' || rt=='E')     if (lt=='E' || rt=='E')
509     {     {
510      m_readytype='E';          m_readytype='E';
511     }     }
512     else if (lt=='T' || rt=='T')     else if (lt=='T' || rt=='T')
513     {     {
514      m_readytype='T';          m_readytype='T';
515     }     }
516     else     else
517     {     {
518      m_readytype='C';          m_readytype='C';
519     }     }
520     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
    m_maxsamplesize=max(max(m_samplesize,m_right->getMaxSampleSize()),m_left->getMaxSampleSize());    
    m_buffsRequired=calcBuffs(m_left, m_right,m_op);  
521     m_children=m_left->m_children+m_right->m_children+2;     m_children=m_left->m_children+m_right->m_children+2;
522     m_height=max(m_left->m_height,m_right->m_height)+1;     m_height=max(m_left->m_height,m_right->m_height)+1;
 #ifdef LAZY_NODE_STORAGE  
523     LazyNodeSetup();     LazyNodeSetup();
 #endif  
524     SIZELIMIT     SIZELIMIT
525  LAZYDEBUG(cout << "(3)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(3)Lazy created with " << m_samplesize << endl;)
526  }  }
527    
528  DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op, int axis_offset, int transpose)  DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op, int axis_offset, int transpose)
529      : parent(resultFS(left,right,op), GTPShape(left,right, axis_offset, transpose, m_SL,m_SM, m_SR)),          : parent(resultFS(left,right,op), GTPShape(left,right, axis_offset, transpose, m_SL,m_SM, m_SR)),
530      m_op(op),          m_op(op),
531      m_axis_offset(axis_offset),          m_axis_offset(axis_offset),
532      m_transpose(transpose)          m_transpose(transpose)
533  {  {
534     if ((getOpgroup(op)!=G_TENSORPROD))     if ((getOpgroup(op)!=G_TENSORPROD))
535     {     {
536      throw DataException("Programmer error - constructor DataLazy(left, right, op, ax, tr) will only process BINARY operations which require parameters.");          throw DataException("Programmer error - constructor DataLazy(left, right, op, ax, tr) will only process BINARY operations which require parameters.");
537     }     }
538     if ((transpose>2) || (transpose<0))     if ((transpose>2) || (transpose<0))
539     {     {
540      throw DataException("DataLazy GeneralTensorProduct constructor: Error - transpose should be 0, 1 or 2");          throw DataException("DataLazy GeneralTensorProduct constructor: Error - transpose should be 0, 1 or 2");
541     }     }
542     if (getFunctionSpace()!=left->getFunctionSpace())    // left needs to be interpolated     if (getFunctionSpace()!=left->getFunctionSpace())    // left needs to be interpolated
543     {     {
544      FunctionSpace fs=getFunctionSpace();          FunctionSpace fs=getFunctionSpace();
545      Data ltemp(left);          Data ltemp(left);
546      Data tmp(ltemp,fs);          Data tmp(ltemp,fs);
547      left=tmp.borrowDataPtr();          left=tmp.borrowDataPtr();
548     }     }
549     if (getFunctionSpace()!=right->getFunctionSpace())   // right needs to be interpolated     if (getFunctionSpace()!=right->getFunctionSpace())   // right needs to be interpolated
550     {     {
551      Data tmp(Data(right),getFunctionSpace());          Data tmp(Data(right),getFunctionSpace());
552      right=tmp.borrowDataPtr();          right=tmp.borrowDataPtr();
553     }     }
554  //    left->operandCheck(*right);  //    left->operandCheck(*right);
555    
556     if (left->isLazy())          // the children need to be DataLazy. Wrap them in IDENTITY if required     if (left->isLazy())                  // the children need to be DataLazy. Wrap them in IDENTITY if required
557     {     {
558      m_left=dynamic_pointer_cast<DataLazy>(left);          m_left=dynamic_pointer_cast<DataLazy>(left);
559     }     }
560     else     else
561     {     {
562      m_left=DataLazy_ptr(new DataLazy(left));          m_left=DataLazy_ptr(new DataLazy(left));
563     }     }
564     if (right->isLazy())     if (right->isLazy())
565     {     {
566      m_right=dynamic_pointer_cast<DataLazy>(right);          m_right=dynamic_pointer_cast<DataLazy>(right);
567     }     }
568     else     else
569     {     {
570      m_right=DataLazy_ptr(new DataLazy(right));          m_right=DataLazy_ptr(new DataLazy(right));
571     }     }
572     char lt=m_left->m_readytype;     char lt=m_left->m_readytype;
573     char rt=m_right->m_readytype;     char rt=m_right->m_readytype;
574     if (lt=='E' || rt=='E')     if (lt=='E' || rt=='E')
575     {     {
576      m_readytype='E';          m_readytype='E';
577     }     }
578     else if (lt=='T' || rt=='T')     else if (lt=='T' || rt=='T')
579     {     {
580      m_readytype='T';          m_readytype='T';
581     }     }
582     else     else
583     {     {
584      m_readytype='C';          m_readytype='C';
585     }     }
586     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
    m_maxsamplesize=max(max(m_samplesize,m_right->getMaxSampleSize()),m_left->getMaxSampleSize());    
    m_buffsRequired=calcBuffs(m_left, m_right,m_op);  
587     m_children=m_left->m_children+m_right->m_children+2;     m_children=m_left->m_children+m_right->m_children+2;
588     m_height=max(m_left->m_height,m_right->m_height)+1;     m_height=max(m_left->m_height,m_right->m_height)+1;
 #ifdef LAZY_NODE_STORAGE  
589     LazyNodeSetup();     LazyNodeSetup();
 #endif  
590     SIZELIMIT     SIZELIMIT
591  LAZYDEBUG(cout << "(4)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(4)Lazy created with " << m_samplesize << endl;)
592  }  }
593    
594    
595  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, int axis_offset)  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, int axis_offset)
596      : parent(left->getFunctionSpace(), resultShape(left,op, axis_offset)),          : parent(left->getFunctionSpace(), resultShape(left,op, axis_offset)),
597      m_op(op),          m_op(op),
598      m_axis_offset(axis_offset),          m_axis_offset(axis_offset),
599      m_transpose(0),          m_transpose(0),
600      m_tol(0)          m_tol(0)
601  {  {
602     if ((getOpgroup(op)!=G_NP1OUT_P))     if ((getOpgroup(op)!=G_NP1OUT_P))
603     {     {
604      throw DataException("Programmer error - constructor DataLazy(left, op, ax) will only process UNARY operations which require parameters.");          throw DataException("Programmer error - constructor DataLazy(left, op, ax) will only process UNARY operations which require parameters.");
605     }     }
606     DataLazy_ptr lleft;     DataLazy_ptr lleft;
607     if (!left->isLazy())     if (!left->isLazy())
608     {     {
609      lleft=DataLazy_ptr(new DataLazy(left));          lleft=DataLazy_ptr(new DataLazy(left));
610     }     }
611     else     else
612     {     {
613      lleft=dynamic_pointer_cast<DataLazy>(left);          lleft=dynamic_pointer_cast<DataLazy>(left);
614     }     }
615     m_readytype=lleft->m_readytype;     m_readytype=lleft->m_readytype;
616     m_left=lleft;     m_left=lleft;
    m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point  
617     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
    m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());  
618     m_children=m_left->m_children+1;     m_children=m_left->m_children+1;
619     m_height=m_left->m_height+1;     m_height=m_left->m_height+1;
 #ifdef LAZY_NODE_STORAGE  
620     LazyNodeSetup();     LazyNodeSetup();
 #endif  
621     SIZELIMIT     SIZELIMIT
622  LAZYDEBUG(cout << "(5)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(5)Lazy created with " << m_samplesize << endl;)
623  }  }
624    
625  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, double tol)  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, double tol)
626      : parent(left->getFunctionSpace(), left->getShape()),          : parent(left->getFunctionSpace(), left->getShape()),
627      m_op(op),          m_op(op),
628      m_axis_offset(0),          m_axis_offset(0),
629      m_transpose(0),          m_transpose(0),
630      m_tol(tol)          m_tol(tol)
631  {  {
632     if ((getOpgroup(op)!=G_UNARY_P))     if ((getOpgroup(op)!=G_UNARY_P))
633     {     {
634      throw DataException("Programmer error - constructor DataLazy(left, op, tol) will only process UNARY operations which require parameters.");          throw DataException("Programmer error - constructor DataLazy(left, op, tol) will only process UNARY operations which require parameters.");
635     }     }
636     DataLazy_ptr lleft;     DataLazy_ptr lleft;
637     if (!left->isLazy())     if (!left->isLazy())
638     {     {
639      lleft=DataLazy_ptr(new DataLazy(left));          lleft=DataLazy_ptr(new DataLazy(left));
640     }     }
641     else     else
642     {     {
643      lleft=dynamic_pointer_cast<DataLazy>(left);          lleft=dynamic_pointer_cast<DataLazy>(left);
644     }     }
645     m_readytype=lleft->m_readytype;     m_readytype=lleft->m_readytype;
646     m_left=lleft;     m_left=lleft;
    m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point  
647     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
    m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());  
648     m_children=m_left->m_children+1;     m_children=m_left->m_children+1;
649     m_height=m_left->m_height+1;     m_height=m_left->m_height+1;
 #ifdef LAZY_NODE_STORAGE  
650     LazyNodeSetup();     LazyNodeSetup();
 #endif  
651     SIZELIMIT     SIZELIMIT
652  LAZYDEBUG(cout << "(6)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(6)Lazy created with " << m_samplesize << endl;)
653  }  }
654    
655    
656  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, const int axis0, const int axis1)  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, const int axis0, const int axis1)
657      : parent(left->getFunctionSpace(), SwapShape(left,axis0,axis1)),          : parent(left->getFunctionSpace(), SwapShape(left,axis0,axis1)),
658      m_op(op),          m_op(op),
659      m_axis_offset(axis0),          m_axis_offset(axis0),
660      m_transpose(axis1),          m_transpose(axis1),
661      m_tol(0)          m_tol(0)
662  {  {
663     if ((getOpgroup(op)!=G_NP1OUT_2P))     if ((getOpgroup(op)!=G_NP1OUT_2P))
664     {     {
665      throw DataException("Programmer error - constructor DataLazy(left, op, tol) will only process UNARY operations which require two integer parameters.");          throw DataException("Programmer error - constructor DataLazy(left, op, tol) will only process UNARY operations which require two integer parameters.");
666     }     }
667     DataLazy_ptr lleft;     DataLazy_ptr lleft;
668     if (!left->isLazy())     if (!left->isLazy())
669     {     {
670      lleft=DataLazy_ptr(new DataLazy(left));          lleft=DataLazy_ptr(new DataLazy(left));
671     }     }
672     else     else
673     {     {
674      lleft=dynamic_pointer_cast<DataLazy>(left);          lleft=dynamic_pointer_cast<DataLazy>(left);
675     }     }
676     m_readytype=lleft->m_readytype;     m_readytype=lleft->m_readytype;
677     m_left=lleft;     m_left=lleft;
    m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point  
678     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
    m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());  
679     m_children=m_left->m_children+1;     m_children=m_left->m_children+1;
680     m_height=m_left->m_height+1;     m_height=m_left->m_height+1;
 #ifdef LAZY_NODE_STORAGE  
681     LazyNodeSetup();     LazyNodeSetup();
 #endif  
682     SIZELIMIT     SIZELIMIT
683  LAZYDEBUG(cout << "(7)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(7)Lazy created with " << m_samplesize << endl;)
684  }  }
685    
686  DataLazy::~DataLazy()  
687    namespace
688  {  {
 #ifdef LAZY_NODE_SETUP  
    delete[] m_sampleids;  
    delete[] m_samples;  
 #endif  
 }  
689    
690        inline int max3(int a, int b, int c)
691        {
692            int t=(a>b?a:b);
693            return (t>c?t:c);
694    
695  int      }
 DataLazy::getBuffsRequired() const  
 {  
     return m_buffsRequired;  
696  }  }
697    
698    DataLazy::DataLazy(DataAbstract_ptr mask, DataAbstract_ptr left, DataAbstract_ptr right/*, double tol*/)
699  size_t          : parent(left->getFunctionSpace(), left->getShape()),
700  DataLazy::getMaxSampleSize() const          m_op(CONDEVAL),
701            m_axis_offset(0),
702            m_transpose(0),
703            m_tol(0)
704  {  {
705      return m_maxsamplesize;  
706       DataLazy_ptr lmask;
707       DataLazy_ptr lleft;
708       DataLazy_ptr lright;
709       if (!mask->isLazy())
710       {
711            lmask=DataLazy_ptr(new DataLazy(mask));
712       }
713       else
714       {
715            lmask=dynamic_pointer_cast<DataLazy>(mask);
716       }
717       if (!left->isLazy())
718       {
719            lleft=DataLazy_ptr(new DataLazy(left));
720       }
721       else
722       {
723            lleft=dynamic_pointer_cast<DataLazy>(left);
724       }
725       if (!right->isLazy())
726       {
727            lright=DataLazy_ptr(new DataLazy(right));
728       }
729       else
730       {
731            lright=dynamic_pointer_cast<DataLazy>(right);
732       }
733       m_readytype=lmask->m_readytype;
734       if ((lleft->m_readytype!=lright->m_readytype) || (lmask->m_readytype!=lleft->m_readytype))
735       {
736            throw DataException("Programmer Error - condEval arguments must have the same readytype");
737       }
738       m_left=lleft;
739       m_right=lright;
740       m_mask=lmask;
741       m_samplesize=getNumDPPSample()*getNoValues();
742       m_children=m_left->m_children+m_right->m_children+m_mask->m_children+1;
743       m_height=max3(m_left->m_height,m_right->m_height,m_mask->m_height)+1;
744       LazyNodeSetup();
745       SIZELIMIT
746    LAZYDEBUG(cout << "(8)Lazy created with " << m_samplesize << endl;)
747  }  }
748    
749    
750    
751  size_t  DataLazy::~DataLazy()
 DataLazy::getSampleBufferSize() const  
752  {  {
753      return m_maxsamplesize*(max(1,m_buffsRequired));     delete[] m_sampleids;
754  }  }
755    
756    
757  /*  /*
758    \brief Evaluates the expression using methods on Data.    \brief Evaluates the expression using methods on Data.
759    This does the work for the collapse method.    This does the work for the collapse method.
760    For reasons of efficiency do not call this method on DataExpanded nodes.    For reasons of efficiency do not call this method on DataExpanded nodes.
761  */  */
762  DataReady_ptr  DataReady_ptr
763  DataLazy::collapseToReady()  DataLazy::collapseToReady() const
764  {  {
765    if (m_readytype=='E')    if (m_readytype=='E')
766    { // this is more an efficiency concern than anything else    {     // this is more an efficiency concern than anything else
767      throw DataException("Programmer Error - do not use collapse on Expanded data.");      throw DataException("Programmer Error - do not use collapse on Expanded data.");
768    }    }
769    if (m_op==IDENTITY)    if (m_op==IDENTITY)
# Line 818  DataLazy::collapseToReady() Line 781  DataLazy::collapseToReady()
781    switch(m_op)    switch(m_op)
782    {    {
783      case ADD:      case ADD:
784      result=left+right;          result=left+right;
785      break;          break;
786      case SUB:            case SUB:          
787      result=left-right;          result=left-right;
788      break;          break;
789      case MUL:            case MUL:          
790      result=left*right;          result=left*right;
791      break;          break;
792      case DIV:            case DIV:          
793      result=left/right;          result=left/right;
794      break;          break;
795      case SIN:      case SIN:
796      result=left.sin();            result=left.sin();      
797      break;          break;
798      case COS:      case COS:
799      result=left.cos();          result=left.cos();
800      break;          break;
801      case TAN:      case TAN:
802      result=left.tan();          result=left.tan();
803      break;          break;
804      case ASIN:      case ASIN:
805      result=left.asin();          result=left.asin();
806      break;          break;
807      case ACOS:      case ACOS:
808      result=left.acos();          result=left.acos();
809      break;          break;
810      case ATAN:      case ATAN:
811      result=left.atan();          result=left.atan();
812      break;          break;
813      case SINH:      case SINH:
814      result=left.sinh();          result=left.sinh();
815      break;          break;
816      case COSH:      case COSH:
817      result=left.cosh();          result=left.cosh();
818      break;          break;
819      case TANH:      case TANH:
820      result=left.tanh();          result=left.tanh();
821      break;          break;
822      case ERF:      case ERF:
823      result=left.erf();          result=left.erf();
824      break;          break;
825     case ASINH:     case ASINH:
826      result=left.asinh();          result=left.asinh();
827      break;          break;
828     case ACOSH:     case ACOSH:
829      result=left.acosh();          result=left.acosh();
830      break;          break;
831     case ATANH:     case ATANH:
832      result=left.atanh();          result=left.atanh();
833      break;          break;
834      case LOG10:      case LOG10:
835      result=left.log10();          result=left.log10();
836      break;          break;
837      case LOG:      case LOG:
838      result=left.log();          result=left.log();
839      break;          break;
840      case SIGN:      case SIGN:
841      result=left.sign();          result=left.sign();
842      break;          break;
843      case ABS:      case ABS:
844      result=left.abs();          result=left.abs();
845      break;          break;
846      case NEG:      case NEG:
847      result=left.neg();          result=left.neg();
848      break;          break;
849      case POS:      case POS:
850      // it doesn't mean anything for delayed.          // it doesn't mean anything for delayed.
851      // it will just trigger a deep copy of the lazy object          // it will just trigger a deep copy of the lazy object
852      throw DataException("Programmer error - POS not supported for lazy data.");          throw DataException("Programmer error - POS not supported for lazy data.");
853      break;          break;
854      case EXP:      case EXP:
855      result=left.exp();          result=left.exp();
856      break;          break;
857      case SQRT:      case SQRT:
858      result=left.sqrt();          result=left.sqrt();
859      break;          break;
860      case RECIP:      case RECIP:
861      result=left.oneOver();          result=left.oneOver();
862      break;          break;
863      case GZ:      case GZ:
864      result=left.wherePositive();          result=left.wherePositive();
865      break;          break;
866      case LZ:      case LZ:
867      result=left.whereNegative();          result=left.whereNegative();
868      break;          break;
869      case GEZ:      case GEZ:
870      result=left.whereNonNegative();          result=left.whereNonNegative();
871      break;          break;
872      case LEZ:      case LEZ:
873      result=left.whereNonPositive();          result=left.whereNonPositive();
874      break;          break;
875      case NEZ:      case NEZ:
876      result=left.whereNonZero(m_tol);          result=left.whereNonZero(m_tol);
877      break;          break;
878      case EZ:      case EZ:
879      result=left.whereZero(m_tol);          result=left.whereZero(m_tol);
880      break;          break;
881      case SYM:      case SYM:
882      result=left.symmetric();          result=left.symmetric();
883      break;          break;
884      case NSYM:      case NSYM:
885      result=left.nonsymmetric();          result=left.antisymmetric();
886      break;          break;
887      case PROD:      case PROD:
888      result=C_GeneralTensorProduct(left,right,m_axis_offset, m_transpose);          result=C_GeneralTensorProduct(left,right,m_axis_offset, m_transpose);
889      break;          break;
890      case TRANS:      case TRANS:
891      result=left.transpose(m_axis_offset);          result=left.transpose(m_axis_offset);
892      break;          break;
893      case TRACE:      case TRACE:
894      result=left.trace(m_axis_offset);          result=left.trace(m_axis_offset);
895      break;          break;
896      case SWAP:      case SWAP:
897      result=left.swapaxes(m_axis_offset, m_transpose);          result=left.swapaxes(m_axis_offset, m_transpose);
898            break;
899        case MINVAL:
900            result=left.minval();
901            break;
902        case MAXVAL:
903            result=left.minval();
904            break;
905        case HER:
906        result=left.hermitian();
907      break;      break;
908      default:      default:
909      throw DataException("Programmer error - collapseToReady does not know how to resolve operator "+opToString(m_op)+".");          throw DataException("Programmer error - collapseToReady does not know how to resolve operator "+opToString(m_op)+".");
910    }    }
911    return result.borrowReadyPtr();    return result.borrowReadyPtr();
912  }  }
# Line 946  DataLazy::collapseToReady() Line 918  DataLazy::collapseToReady()
918     the purpose of using DataLazy in the first place).     the purpose of using DataLazy in the first place).
919  */  */
920  void  void
921  DataLazy::collapse()  DataLazy::collapse() const
922  {  {
923    if (m_op==IDENTITY)    if (m_op==IDENTITY)
924    {    {
925      return;          return;
926    }    }
927    if (m_readytype=='E')    if (m_readytype=='E')
928    { // this is more an efficiency concern than anything else    {     // this is more an efficiency concern than anything else
929      throw DataException("Programmer Error - do not use collapse on Expanded data.");      throw DataException("Programmer Error - do not use collapse on Expanded data.");
930    }    }
931    m_id=collapseToReady();    m_id=collapseToReady();
932    m_op=IDENTITY;    m_op=IDENTITY;
933  }  }
934    
 /*  
   \brief Compute the value of the expression (unary operation) for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
 DataTypes::ValueType*  
 DataLazy::resolveUnary(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const  
 {  
     // we assume that any collapsing has been done before we get here  
     // since we only have one argument we don't need to think about only  
     // processing single points.  
   if (m_readytype!='E')  
   {  
     throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");  
   }  
   const ValueType* vleft=m_left->resolveSample(v,offset,sampleNo,roffset);  
   const double* left=&((*vleft)[roffset]);  
   double* result=&(v[offset]);  
   roffset=offset;  
   switch (m_op)  
   {  
     case SIN:    
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sin);  
     break;  
     case COS:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::cos);  
     break;  
     case TAN:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::tan);  
     break;  
     case ASIN:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::asin);  
     break;  
     case ACOS:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::acos);  
     break;  
     case ATAN:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::atan);  
     break;  
     case SINH:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sinh);  
     break;  
     case COSH:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::cosh);  
     break;  
     case TANH:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::tanh);  
     break;  
     case ERF:  
 #if defined (_WIN32) && !defined(__INTEL_COMPILER)  
     throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::erf);  
     break;  
 #endif  
    case ASINH:  
 #if defined (_WIN32) && !defined(__INTEL_COMPILER)  
     tensor_unary_operation(m_samplesize, left, result, escript::asinh_substitute);  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::asinh);  
 #endif    
     break;  
    case ACOSH:  
 #if defined (_WIN32) && !defined(__INTEL_COMPILER)  
     tensor_unary_operation(m_samplesize, left, result, escript::acosh_substitute);  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::acosh);  
 #endif    
     break;  
    case ATANH:  
 #if defined (_WIN32) && !defined(__INTEL_COMPILER)  
     tensor_unary_operation(m_samplesize, left, result, escript::atanh_substitute);  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::atanh);  
 #endif    
     break;  
     case LOG10:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::log10);  
     break;  
     case LOG:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::log);  
     break;  
     case SIGN:  
     tensor_unary_operation(m_samplesize, left, result, escript::fsign);  
     break;  
     case ABS:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::fabs);  
     break;  
     case NEG:  
     tensor_unary_operation(m_samplesize, left, result, negate<double>());  
     break;  
     case POS:  
     // it doesn't mean anything for delayed.  
     // it will just trigger a deep copy of the lazy object  
     throw DataException("Programmer error - POS not supported for lazy data.");  
     break;  
     case EXP:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::exp);  
     break;  
     case SQRT:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sqrt);  
     break;  
     case RECIP:  
     tensor_unary_operation(m_samplesize, left, result, bind1st(divides<double>(),1.));  
     break;  
     case GZ:  
     tensor_unary_operation(m_samplesize, left, result, bind2nd(greater<double>(),0.0));  
     break;  
     case LZ:  
     tensor_unary_operation(m_samplesize, left, result, bind2nd(less<double>(),0.0));  
     break;  
     case GEZ:  
     tensor_unary_operation(m_samplesize, left, result, bind2nd(greater_equal<double>(),0.0));  
     break;  
     case LEZ:  
     tensor_unary_operation(m_samplesize, left, result, bind2nd(less_equal<double>(),0.0));  
     break;  
 // There are actually G_UNARY_P but I don't see a compelling reason to treat them differently  
     case NEZ:  
     tensor_unary_operation(m_samplesize, left, result, bind2nd(AbsGT(),m_tol));  
     break;  
     case EZ:  
     tensor_unary_operation(m_samplesize, left, result, bind2nd(AbsLTE(),m_tol));  
     break;  
   
     default:  
     throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");  
   }  
   return &v;  
 }  
   
   
   
   
   
   
 /*  
   \brief Compute the value of the expression (unary operation) for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
 DataTypes::ValueType*  
 DataLazy::resolveNP1OUT(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const  
 {  
     // we assume that any collapsing has been done before we get here  
     // since we only have one argument we don't need to think about only  
     // processing single points.  
   if (m_readytype!='E')  
   {  
     throw DataException("Programmer error - resolveNP1OUT should only be called on expanded Data.");  
   }  
     // since we can't write the result over the input, we need a result offset further along  
   size_t subroffset=roffset+m_samplesize;  
 LAZYDEBUG(cerr << "subroffset=" << subroffset << endl;)  
   const ValueType* vleft=m_left->resolveSample(v,offset+m_left->m_samplesize,sampleNo,subroffset);  
   roffset=offset;  
   size_t loop=0;  
   size_t numsteps=(m_readytype=='E')?getNumDPPSample():1;  
   size_t step=getNoValues();  
   switch (m_op)  
   {  
     case SYM:  
     for (loop=0;loop<numsteps;++loop)  
     {  
         DataMaths::symmetric(*vleft,m_left->getShape(),subroffset, v, getShape(), offset);  
         subroffset+=step;  
         offset+=step;  
     }  
     break;  
     case NSYM:  
     for (loop=0;loop<numsteps;++loop)  
     {  
         DataMaths::nonsymmetric(*vleft,m_left->getShape(),subroffset, v, getShape(), offset);  
         subroffset+=step;  
         offset+=step;  
     }  
     break;  
     default:  
     throw DataException("Programmer error - resolveNP1OUT can not resolve operator "+opToString(m_op)+".");  
   }  
   return &v;  
 }  
   
 /*  
   \brief Compute the value of the expression (unary operation) for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
 DataTypes::ValueType*  
 DataLazy::resolveNP1OUT_P(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const  
 {  
     // we assume that any collapsing has been done before we get here  
     // since we only have one argument we don't need to think about only  
     // processing single points.  
   if (m_readytype!='E')  
   {  
     throw DataException("Programmer error - resolveNP1OUT_P should only be called on expanded Data.");  
   }  
     // since we can't write the result over the input, we need a result offset further along  
   size_t subroffset;  
   const ValueType* vleft=m_left->resolveSample(v,offset+m_left->m_samplesize,sampleNo,subroffset);  
 LAZYDEBUG(cerr << "srcsamplesize=" << offset+m_left->m_samplesize << " beg=" << subroffset << endl;)  
 LAZYDEBUG(cerr << "Offset for 5800=" << getPointOffset(5800/getNumDPPSample(),5800%getNumDPPSample()) << endl;)  
   roffset=offset;  
   size_t loop=0;  
   size_t numsteps=(m_readytype=='E')?getNumDPPSample():1;  
   size_t outstep=getNoValues();  
   size_t instep=m_left->getNoValues();  
 LAZYDEBUG(cerr << "instep=" << instep << " outstep=" << outstep<< " numsteps=" << numsteps << endl;)  
   switch (m_op)  
   {  
     case TRACE:  
     for (loop=0;loop<numsteps;++loop)  
     {  
 size_t zz=sampleNo*getNumDPPSample()+loop;  
 if (zz==5800)  
 {  
 LAZYDEBUG(cerr << "point=" <<  zz<< endl;)  
 LAZYDEBUG(cerr << "Input to  trace=" << DataTypes::pointToString(*vleft,m_left->getShape(),subroffset,"") << endl;)  
 LAZYDEBUG(cerr << "Offset for point=" << getPointOffset(5800/getNumDPPSample(),5800%getNumDPPSample()) << " vs ";)  
 LAZYDEBUG(cerr << subroffset << endl;)  
 LAZYDEBUG(cerr << "output=" << offset << endl;)  
 }  
             DataMaths::trace(*vleft,m_left->getShape(),subroffset, v ,getShape(),offset,m_axis_offset);  
 if (zz==5800)  
 {  
 LAZYDEBUG(cerr << "Result of trace=" << DataTypes::pointToString(v,getShape(),offset,"") << endl;)  
 }  
         subroffset+=instep;  
         offset+=outstep;  
     }  
     break;  
     case TRANS:  
     for (loop=0;loop<numsteps;++loop)  
     {  
             DataMaths::transpose(*vleft,m_left->getShape(),subroffset, v,getShape(),offset,m_axis_offset);  
         subroffset+=instep;  
         offset+=outstep;  
     }  
     break;  
     default:  
     throw DataException("Programmer error - resolveNP1OUTP can not resolve operator "+opToString(m_op)+".");  
   }  
   return &v;  
 }  
   
   
 /*  
   \brief Compute the value of the expression (unary operation with int params) for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
 DataTypes::ValueType*  
 DataLazy::resolveNP1OUT_2P(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const  
 {  
     // we assume that any collapsing has been done before we get here  
     // since we only have one argument we don't need to think about only  
     // processing single points.  
   if (m_readytype!='E')  
   {  
     throw DataException("Programmer error - resolveNP1OUT_2P should only be called on expanded Data.");  
   }  
     // since we can't write the result over the input, we need a result offset further along  
   size_t subroffset;  
   const ValueType* vleft=m_left->resolveSample(v,offset+m_left->m_samplesize,sampleNo,subroffset);  
 LAZYDEBUG(cerr << "srcsamplesize=" << offset+m_left->m_samplesize << " beg=" << subroffset << endl;)  
 LAZYDEBUG(cerr << "Offset for 5800=" << getPointOffset(5800/getNumDPPSample(),5800%getNumDPPSample()) << endl;)  
   roffset=offset;  
   size_t loop=0;  
   size_t numsteps=(m_readytype=='E')?getNumDPPSample():1;  
   size_t outstep=getNoValues();  
   size_t instep=m_left->getNoValues();  
 LAZYDEBUG(cerr << "instep=" << instep << " outstep=" << outstep<< " numsteps=" << numsteps << endl;)  
   switch (m_op)  
   {  
     case SWAP:  
     for (loop=0;loop<numsteps;++loop)  
     {  
             DataMaths::swapaxes(*vleft,m_left->getShape(),subroffset, v,getShape(),offset,m_axis_offset, m_transpose);  
         subroffset+=instep;  
         offset+=outstep;  
     }  
     break;  
     default:  
     throw DataException("Programmer error - resolveNP1OUT2P can not resolve operator "+opToString(m_op)+".");  
   }  
   return &v;  
 }  
   
   
   
 #define PROC_OP(TYPE,X)                               \  
     for (int j=0;j<onumsteps;++j)\  
     {\  
       for (int i=0;i<numsteps;++i,resultp+=resultStep) \  
       { \  
 LAZYDEBUG(cout << "[left,right]=[" << lroffset << "," << rroffset << "]" << endl;)\  
 LAZYDEBUG(cout << "{left,right}={" << (*left)[lroffset] << "," << (*right)[rroffset] << "}\n";)\  
          tensor_binary_operation< TYPE >(chunksize, &((*left)[lroffset]), &((*right)[rroffset]), resultp, X); \  
 LAZYDEBUG(cout << " result=      " << resultp[0] << endl;) \  
          lroffset+=leftstep; \  
          rroffset+=rightstep; \  
       }\  
       lroffset+=oleftstep;\  
       rroffset+=orightstep;\  
     }  
   
 /*  
   \brief Compute the value of the expression (binary operation) for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
 // This method assumes that any subexpressions which evaluate to Constant or Tagged Data  
 // have already been collapsed to IDENTITY. So we must have at least one expanded child.  
 // If both children are expanded, then we can process them in a single operation (we treat  
 // the whole sample as one big datapoint.  
 // If one of the children is not expanded, then we need to treat each point in the sample  
 // individually.  
 // There is an additional complication when scalar operations are considered.  
 // For example, 2+Vector.  
 // In this case each double within the point is treated individually  
 DataTypes::ValueType*  
 DataLazy::resolveBinary(ValueType& v,  size_t offset, int sampleNo, size_t& roffset) const  
 {  
 LAZYDEBUG(cout << "Resolve binary: " << toString() << endl;)  
   
   size_t lroffset=0, rroffset=0;    // offsets in the left and right result vectors  
     // first work out which of the children are expanded  
   bool leftExp=(m_left->m_readytype=='E');  
   bool rightExp=(m_right->m_readytype=='E');  
   if (!leftExp && !rightExp)  
   {  
     throw DataException("Programmer Error - please use collapse if neither argument has type 'E'.");  
   }  
   bool leftScalar=(m_left->getRank()==0);  
   bool rightScalar=(m_right->getRank()==0);  
   if ((m_left->getRank()!=m_right->getRank()) && (!leftScalar && !rightScalar))  
   {  
     throw DataException("resolveBinary - ranks of arguments must match unless one of them is scalar.");  
   }  
   size_t leftsize=m_left->getNoValues();  
   size_t rightsize=m_right->getNoValues();  
   size_t chunksize=1;           // how many doubles will be processed in one go  
   int leftstep=0;       // how far should the left offset advance after each step  
   int rightstep=0;  
   int numsteps=0;       // total number of steps for the inner loop  
   int oleftstep=0;  // the o variables refer to the outer loop  
   int orightstep=0; // The outer loop is only required in cases where there is an extended scalar  
   int onumsteps=1;  
     
   bool LES=(leftExp && leftScalar); // Left is an expanded scalar  
   bool RES=(rightExp && rightScalar);  
   bool LS=(!leftExp && leftScalar); // left is a single scalar  
   bool RS=(!rightExp && rightScalar);  
   bool LN=(!leftExp && !leftScalar);    // left is a single non-scalar  
   bool RN=(!rightExp && !rightScalar);  
   bool LEN=(leftExp && !leftScalar);    // left is an expanded non-scalar  
   bool REN=(rightExp && !rightScalar);  
   
   if ((LES && RES) || (LEN && REN)) // both are Expanded scalars or both are expanded non-scalars  
   {  
     chunksize=m_left->getNumDPPSample()*leftsize;  
     leftstep=0;  
     rightstep=0;  
     numsteps=1;  
   }  
   else if (LES || RES)  
   {  
     chunksize=1;  
     if (LES)        // left is an expanded scalar  
     {  
         if (RS)  
         {  
            leftstep=1;  
            rightstep=0;  
            numsteps=m_left->getNumDPPSample();  
         }  
         else        // RN or REN  
         {  
            leftstep=0;  
            oleftstep=1;  
            rightstep=1;  
            orightstep=(RN ? -(int)rightsize : 0);  
            numsteps=rightsize;  
            onumsteps=m_left->getNumDPPSample();  
         }  
     }  
     else        // right is an expanded scalar  
     {  
         if (LS)  
         {  
            rightstep=1;  
            leftstep=0;  
            numsteps=m_right->getNumDPPSample();  
         }  
         else  
         {  
            rightstep=0;  
            orightstep=1;  
            leftstep=1;  
            oleftstep=(LN ? -(int)leftsize : 0);  
            numsteps=leftsize;  
            onumsteps=m_right->getNumDPPSample();  
         }  
     }  
   }  
   else  // this leaves (LEN, RS), (LEN, RN) and their transposes  
   {  
     if (LEN)    // and Right will be a single value  
     {  
         chunksize=rightsize;  
         leftstep=rightsize;  
         rightstep=0;  
         numsteps=m_left->getNumDPPSample();  
         if (RS)  
         {  
            numsteps*=leftsize;  
         }  
     }  
     else    // REN  
     {  
         chunksize=leftsize;  
         rightstep=leftsize;  
         leftstep=0;  
         numsteps=m_right->getNumDPPSample();  
         if (LS)  
         {  
            numsteps*=rightsize;  
         }  
     }  
   }  
   
   int resultStep=max(leftstep,rightstep);   // only one (at most) should be !=0  
     // Get the values of sub-expressions  
   const ValueType* left=m_left->resolveSample(v,offset+getMaxSampleSize(),sampleNo,lroffset);   // see note on  
     // calcBufss for why we can't put offset as the 2nd param above  
   const ValueType* right=m_right->resolveSample(v,offset+2*getMaxSampleSize(),sampleNo,rroffset); // Note  
     // the right child starts further along.  
 LAZYDEBUG(cout << "Post sub calls in " << toString() << endl;)  
 LAZYDEBUG(cout << "shapes=" << DataTypes::shapeToString(m_left->getShape()) << "," << DataTypes::shapeToString(m_right->getShape()) << endl;)  
 LAZYDEBUG(cout << "chunksize=" << chunksize << endl << "leftstep=" << leftstep << " rightstep=" << rightstep;)  
 LAZYDEBUG(cout << " numsteps=" << numsteps << endl << "oleftstep=" << oleftstep << " orightstep=" << orightstep;)  
 LAZYDEBUG(cout << "onumsteps=" << onumsteps << endl;)  
 LAZYDEBUG(cout << " DPPS=" << m_left->getNumDPPSample() << "," <<m_right->getNumDPPSample() << endl;)  
 LAZYDEBUG(cout << "" << LS << RS << LN << RN << LES << RES <<LEN << REN <<   endl;)  
   
   
   double* resultp=&(v[offset]);     // results are stored at the vector offset we recieved  
   switch(m_op)  
   {  
     case ADD:  
         PROC_OP(NO_ARG,plus<double>());  
     break;  
     case SUB:  
     PROC_OP(NO_ARG,minus<double>());  
     break;  
     case MUL:  
     PROC_OP(NO_ARG,multiplies<double>());  
     break;  
     case DIV:  
     PROC_OP(NO_ARG,divides<double>());  
     break;  
     case POW:  
        PROC_OP(double (double,double),::pow);  
     break;  
     default:  
     throw DataException("Programmer error - resolveBinary can not resolve operator "+opToString(m_op)+".");  
   }  
   roffset=offset;  
   return &v;  
 }  
   
   
   
 /*  
   \brief Compute the value of the expression (tensor product) for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
 // This method assumes that any subexpressions which evaluate to Constant or Tagged Data  
 // have already been collapsed to IDENTITY. So we must have at least one expanded child.  
 // unlike the other resolve helpers, we must treat these datapoints separately.  
 DataTypes::ValueType*  
 DataLazy::resolveTProd(ValueType& v,  size_t offset, int sampleNo, size_t& roffset) const  
 {  
 LAZYDEBUG(cout << "Resolve TensorProduct: " << toString()  << " to offset " << offset<< endl;)  
   
   size_t lroffset=0, rroffset=0;    // offsets in the left and right result vectors  
     // first work out which of the children are expanded  
   bool leftExp=(m_left->m_readytype=='E');  
   bool rightExp=(m_right->m_readytype=='E');  
   int steps=getNumDPPSample();  
 /*  int leftStep=((leftExp && !rightExp)? m_right->getNoValues() : 0);  
   int rightStep=((rightExp && !leftExp)? m_left->getNoValues() : 0);*/  
   int leftStep=(leftExp? m_left->getNoValues() : 0);        // do not have scalars as input to this method  
   int rightStep=(rightExp?m_right->getNoValues() : 0);  
   
   int resultStep=getNoValues();  
     // Get the values of sub-expressions (leave a gap of one sample for the result).  
   int gap=offset+m_samplesize;    
   
 LAZYDEBUG(cout << "Query left with offset=" << gap << endl;)  
   
   const ValueType* left=m_left->resolveSample(v,gap,sampleNo,lroffset);  
   gap+=m_left->getMaxSampleSize();  
   
   
 LAZYDEBUG(cout << "Query right with offset=" << gap << endl;)  
   
   
   const ValueType* right=m_right->resolveSample(v,gap,sampleNo,rroffset);  
   
 LAZYDEBUG(cerr << "[Left shape]=" << DataTypes::shapeToString(m_left->getShape()) << "\n[Right shape]=" << DataTypes::shapeToString(m_right->getShape()) << " result=" <<DataTypes::shapeToString(getShape()) <<  endl;  
 cout << getNoValues() << endl;)  
 LAZYDEBUG(cerr << "Result of left=";)  
 LAZYDEBUG(cerr << "[" << lroffset << " .. " << lroffset+m_left->getNoValues() << "]" << endl;  
   
 for (int i=lroffset, limit=lroffset+(leftExp?m_left->getNoValues()*m_left->getNumDPPSample():m_left->getNoValues());i<limit;++i)  
 {  
 cout << "[" << setw(2) << i-lroffset << "] " << setw(10) << (*left)[i] << " ";  
 if (i%4==0) cout << endl;  
 })  
 LAZYDEBUG(cerr << "\nResult of right=" << endl;)  
 LAZYDEBUG(  
 for (int i=rroffset, limit=rroffset+(rightExp?m_right->getNoValues()*m_right->getNumDPPSample():m_right->getNoValues());i<limit;++i)  
 {  
 cerr << "[" <<  setw(2)<< i-rroffset << "] " << setw(10) << (*right)[i] << " ";  
 if (i%4==0) cout << endl;  
 }  
 cerr << endl;  
 )  
 LAZYDEBUG(cerr << "Post sub calls: " << toString() << endl;)  
 LAZYDEBUG(cout << "LeftExp=" << leftExp << " rightExp=" << rightExp << endl;)  
 LAZYDEBUG(cout << "LeftR=" << m_left->getRank() << " rightExp=" << m_right->getRank() << endl;)  
 LAZYDEBUG(cout << "LeftSize=" << m_left->getNoValues() << " RightSize=" << m_right->getNoValues() << endl;)  
 LAZYDEBUG(cout << "m_samplesize=" << m_samplesize << endl;)  
 LAZYDEBUG(cout << "outputshape=" << DataTypes::shapeToString(getShape()) << endl;)  
 LAZYDEBUG(cout << "DPPS=" << m_right->getNumDPPSample() <<"."<<endl;)  
   
   double* resultp=&(v[offset]);     // results are stored at the vector offset we recieved  
   switch(m_op)  
   {  
     case PROD:  
     for (int i=0;i<steps;++i,resultp+=resultStep)  
     {  
   
 LAZYDEBUG(cout << "lroffset=" << lroffset << "rroffset=" << rroffset << endl;)  
 LAZYDEBUG(cout << "l*=" << left << " r*=" << right << endl;)  
 LAZYDEBUG(cout << "m_SL=" << m_SL << " m_SM=" << m_SM << " m_SR=" << m_SR << endl;)  
   
           const double *ptr_0 = &((*left)[lroffset]);  
           const double *ptr_1 = &((*right)[rroffset]);  
   
 LAZYDEBUG(cout << DataTypes::pointToString(*left, m_left->getShape(),lroffset,"LEFT") << endl;)  
 LAZYDEBUG(cout << DataTypes::pointToString(*right,m_right->getShape(),rroffset, "RIGHT") << endl;)  
   
           matrix_matrix_product(m_SL, m_SM, m_SR, ptr_0, ptr_1, resultp, m_transpose);  
   
 LAZYDEBUG(cout << "Results--\n";  
 {  
   DataVector dv(getNoValues());  
 for (int z=0;z<getNoValues();++z)  
 {  
   cout << "[" << setw(2) << z<< "] " << setw(10) << resultp[z] << " ";  
   if (z%4==0) cout << endl;  
   dv[z]=resultp[z];  
 }  
 cout << endl << DataTypes::pointToString(dv,getShape(),0,"RESLT");  
 cout << "\nWritten to: " << resultp << " resultStep=" << resultStep << endl;  
 }  
 )  
       lroffset+=leftStep;  
       rroffset+=rightStep;  
     }  
     break;  
     default:  
     throw DataException("Programmer error - resolveTProduct can not resolve operator "+opToString(m_op)+".");  
   }  
   roffset=offset;  
   return &v;  
 }  
   
   
 #ifdef LAZY_NODE_STORAGE  
   
935  // The result will be stored in m_samples  // The result will be stored in m_samples
936  // The return value is a pointer to the DataVector, offset is the offset within the return value  // The return value is a pointer to the DataVector, offset is the offset within the return value
937  const DataTypes::ValueType*  const DataTypes::RealVectorType*
938  DataLazy::resolveNodeSample(int tid, int sampleNo, size_t& roffset)  DataLazy::resolveNodeSample(int tid, int sampleNo, size_t& roffset) const
939  {  {
940  LAZYDEBUG(cout << "Resolve sample " << toString() << endl;)  LAZYDEBUG(cout << "Resolve sample " << toString() << endl;)
941      // collapse so we have a 'E' node or an IDENTITY for some other type          // collapse so we have a 'E' node or an IDENTITY for some other type
942    if (m_readytype!='E' && m_op!=IDENTITY)    if (m_readytype!='E' && m_op!=IDENTITY)
943    {    {
944      collapse();          collapse();
945    }    }
946    if (m_op==IDENTITY)      if (m_op==IDENTITY)  
947    {    {
948      const ValueType& vec=m_id->getVectorRO();      const RealVectorType& vec=m_id->getVectorRO();
 //     if (m_readytype=='C')  
 //     {  
 //  roffset=0;      // all samples read from the same position  
 //  return &(m_samples);  
 //     }  
949      roffset=m_id->getPointOffset(sampleNo, 0);      roffset=m_id->getPointOffset(sampleNo, 0);
950    #ifdef LAZY_STACK_PROF
951    int x;
952    if (&x<stackend[omp_get_thread_num()])
953    {
954           stackend[omp_get_thread_num()]=&x;
955    }
956    #endif
957      return &(vec);      return &(vec);
958    }    }
959    if (m_readytype!='E')    if (m_readytype!='E')
# Line 1615  LAZYDEBUG(cout << "Resolve sample " << t Line 962  LAZYDEBUG(cout << "Resolve sample " << t
962    }    }
963    if (m_sampleids[tid]==sampleNo)    if (m_sampleids[tid]==sampleNo)
964    {    {
965      roffset=tid*m_samplesize;          roffset=tid*m_samplesize;
966      return &(m_samples);        // sample is already resolved          return &(m_samples);            // sample is already resolved
967    }    }
968    m_sampleids[tid]=sampleNo;    m_sampleids[tid]=sampleNo;
969    
970    switch (getOpgroup(m_op))    switch (getOpgroup(m_op))
971    {    {
972    case G_UNARY:    case G_UNARY:
# Line 1628  LAZYDEBUG(cout << "Resolve sample " << t Line 976  LAZYDEBUG(cout << "Resolve sample " << t
976    case G_NP1OUT_P: return resolveNodeNP1OUT_P(tid, sampleNo, roffset);    case G_NP1OUT_P: return resolveNodeNP1OUT_P(tid, sampleNo, roffset);
977    case G_TENSORPROD: return resolveNodeTProd(tid, sampleNo, roffset);    case G_TENSORPROD: return resolveNodeTProd(tid, sampleNo, roffset);
978    case G_NP1OUT_2P: return resolveNodeNP1OUT_2P(tid, sampleNo, roffset);    case G_NP1OUT_2P: return resolveNodeNP1OUT_2P(tid, sampleNo, roffset);
979      case G_REDUCTION: return resolveNodeReduction(tid, sampleNo, roffset);
980      case G_CONDEVAL: return resolveNodeCondEval(tid, sampleNo, roffset);
981    default:    default:
982      throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");      throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");
983    }    }
984  }  }
985    
986  const DataTypes::ValueType*  const DataTypes::RealVectorType*
987  DataLazy::resolveNodeUnary(int tid, int sampleNo, size_t& roffset)  DataLazy::resolveNodeUnary(int tid, int sampleNo, size_t& roffset) const
988  {  {
989      // we assume that any collapsing has been done before we get here          // we assume that any collapsing has been done before we get here
990      // since we only have one argument we don't need to think about only          // since we only have one argument we don't need to think about only
991      // processing single points.          // processing single points.
992      // we will also know we won't get identity nodes          // we will also know we won't get identity nodes
993    if (m_readytype!='E')    if (m_readytype!='E')
994    {    {
995      throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");      throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");
# Line 1648  DataLazy::resolveNodeUnary(int tid, int Line 998  DataLazy::resolveNodeUnary(int tid, int
998    {    {
999      throw DataException("Programmer error - resolveNodeUnary should not be called on identity nodes.");      throw DataException("Programmer error - resolveNodeUnary should not be called on identity nodes.");
1000    }    }
1001    const DataTypes::ValueType* leftres=m_left->resolveNodeSample(tid, sampleNo, roffset);    const DataTypes::RealVectorType* leftres=m_left->resolveNodeSample(tid, sampleNo, roffset);
1002    const double* left=&((*leftres)[roffset]);    const double* left=&((*leftres)[roffset]);
1003    roffset=m_samplesize*tid;    roffset=m_samplesize*tid;
1004    double* result=&(m_samples[roffset]);    double* result=&(m_samples[roffset]);
1005    switch (m_op)    if (m_op==POS)
1006    {    {
1007      case SIN:        // this should be prevented earlier
1008      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sin);      // operation is meaningless for lazy
1009      break;          throw DataException("Programmer error - POS not supported for lazy data.");    
1010      case COS:    }
1011      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::cos);    tensor_unary_array_operation(m_samplesize,
1012      break;                               left,
1013      case TAN:                               result,
1014      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::tan);                               m_op,
1015      break;                               m_tol);  
1016      case ASIN:    return &(m_samples);
1017      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::asin);  }
     break;  
     case ACOS:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::acos);  
     break;  
     case ATAN:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::atan);  
     break;  
     case SINH:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sinh);  
     break;  
     case COSH:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::cosh);  
     break;  
     case TANH:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::tanh);  
     break;  
     case ERF:  
 #if defined (_WIN32) && !defined(__INTEL_COMPILER)  
     throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::erf);  
     break;  
 #endif  
    case ASINH:  
 #if defined (_WIN32) && !defined(__INTEL_COMPILER)  
     tensor_unary_operation(m_samplesize, left, result, escript::asinh_substitute);  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::asinh);  
 #endif    
     break;  
    case ACOSH:  
 #if defined (_WIN32) && !defined(__INTEL_COMPILER)  
     tensor_unary_operation(m_samplesize, left, result, escript::acosh_substitute);  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::acosh);  
 #endif    
     break;  
    case ATANH:  
 #if defined (_WIN32) && !defined(__INTEL_COMPILER)  
     tensor_unary_operation(m_samplesize, left, result, escript::atanh_substitute);  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::atanh);  
 #endif    
     break;  
     case LOG10:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::log10);  
     break;  
     case LOG:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::log);  
     break;  
     case SIGN:  
     tensor_unary_operation(m_samplesize, left, result, escript::fsign);  
     break;  
     case ABS:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::fabs);  
     break;  
     case NEG:  
     tensor_unary_operation(m_samplesize, left, result, negate<double>());  
     break;  
     case POS:  
     // it doesn't mean anything for delayed.  
     // it will just trigger a deep copy of the lazy object  
     throw DataException("Programmer error - POS not supported for lazy data.");  
     break;  
     case EXP:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::exp);  
     break;  
     case SQRT:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sqrt);  
     break;  
     case RECIP:  
     tensor_unary_operation(m_samplesize, left, result, bind1st(divides<double>(),1.));  
     break;  
     case GZ:  
     tensor_unary_operation(m_samplesize, left, result, bind2nd(greater<double>(),0.0));  
     break;  
     case LZ:  
     tensor_unary_operation(m_samplesize, left, result, bind2nd(less<double>(),0.0));  
     break;  
     case GEZ:  
     tensor_unary_operation(m_samplesize, left, result, bind2nd(greater_equal<double>(),0.0));  
     break;  
     case LEZ:  
     tensor_unary_operation(m_samplesize, left, result, bind2nd(less_equal<double>(),0.0));  
     break;  
 // There are actually G_UNARY_P but I don't see a compelling reason to treat them differently  
     case NEZ:  
     tensor_unary_operation(m_samplesize, left, result, bind2nd(AbsGT(),m_tol));  
     break;  
     case EZ:  
     tensor_unary_operation(m_samplesize, left, result, bind2nd(AbsLTE(),m_tol));  
     break;  
1018    
1019    
1020    const DataTypes::RealVectorType*
1021    DataLazy::resolveNodeReduction(int tid, int sampleNo, size_t& roffset) const
1022    {
1023            // we assume that any collapsing has been done before we get here
1024            // since we only have one argument we don't need to think about only
1025            // processing single points.
1026            // we will also know we won't get identity nodes
1027      if (m_readytype!='E')
1028      {
1029        throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");
1030      }
1031      if (m_op==IDENTITY)
1032      {
1033        throw DataException("Programmer error - resolveNodeUnary should not be called on identity nodes.");
1034      }
1035      size_t loffset=0;
1036      const DataTypes::RealVectorType* leftres=m_left->resolveNodeSample(tid, sampleNo, loffset);
1037    
1038      roffset=m_samplesize*tid;
1039      unsigned int ndpps=getNumDPPSample();
1040      unsigned int psize=DataTypes::noValues(m_left->getShape());
1041      double* result=&(m_samples[roffset]);
1042      switch (m_op)
1043      {
1044        case MINVAL:
1045            {
1046              for (unsigned int z=0;z<ndpps;++z)
1047              {
1048                FMin op;
1049                *result=escript::reductionOpVector(*leftres, m_left->getShape(), loffset, op, numeric_limits<double>::max());
1050                loffset+=psize;
1051                result++;
1052              }
1053            }
1054            break;
1055        case MAXVAL:
1056            {
1057              for (unsigned int z=0;z<ndpps;++z)
1058              {
1059              FMax op;
1060              *result=escript::reductionOpVector(*leftres, m_left->getShape(), loffset, op, numeric_limits<double>::max()*-1);
1061              loffset+=psize;
1062              result++;
1063              }
1064            }
1065            break;
1066      default:      default:
1067      throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");          throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");
1068    }    }
1069    return &(m_samples);    return &(m_samples);
1070  }  }
1071    
1072    const DataTypes::RealVectorType*
1073  const DataTypes::ValueType*  DataLazy::resolveNodeNP1OUT(int tid, int sampleNo, size_t& roffset) const
 DataLazy::resolveNodeNP1OUT(int tid, int sampleNo, size_t& roffset)  
1074  {  {
1075      // we assume that any collapsing has been done before we get here          // we assume that any collapsing has been done before we get here
1076      // since we only have one argument we don't need to think about only          // since we only have one argument we don't need to think about only
1077      // processing single points.          // processing single points.
1078    if (m_readytype!='E')    if (m_readytype!='E')
1079    {    {
1080      throw DataException("Programmer error - resolveNodeNP1OUT should only be called on expanded Data.");      throw DataException("Programmer error - resolveNodeNP1OUT should only be called on expanded Data.");
# Line 1780  DataLazy::resolveNodeNP1OUT(int tid, int Line 1084  DataLazy::resolveNodeNP1OUT(int tid, int
1084      throw DataException("Programmer error - resolveNodeNP1OUT should not be called on identity nodes.");      throw DataException("Programmer error - resolveNodeNP1OUT should not be called on identity nodes.");
1085    }    }
1086    size_t subroffset;    size_t subroffset;
1087    const ValueType* leftres=m_left->resolveNodeSample(tid, sampleNo, subroffset);    const RealVectorType* leftres=m_left->resolveNodeSample(tid, sampleNo, subroffset);
1088    roffset=m_samplesize*tid;    roffset=m_samplesize*tid;
1089    size_t loop=0;    size_t loop=0;
1090    size_t numsteps=(m_readytype=='E')?getNumDPPSample():1;    size_t numsteps=(m_readytype=='E')?getNumDPPSample():1;
# Line 1789  DataLazy::resolveNodeNP1OUT(int tid, int Line 1093  DataLazy::resolveNodeNP1OUT(int tid, int
1093    switch (m_op)    switch (m_op)
1094    {    {
1095      case SYM:      case SYM:
1096      for (loop=0;loop<numsteps;++loop)          for (loop=0;loop<numsteps;++loop)
1097      {          {
1098          DataMaths::symmetric(*leftres,m_left->getShape(),subroffset, m_samples, getShape(), offset);              escript::symmetric(*leftres,m_left->getShape(),subroffset, m_samples, getShape(), offset);
1099          subroffset+=step;              subroffset+=step;
1100          offset+=step;              offset+=step;
1101      }          }
1102      break;          break;
1103      case NSYM:      case NSYM:
1104      for (loop=0;loop<numsteps;++loop)          for (loop=0;loop<numsteps;++loop)
1105      {          {
1106          DataMaths::nonsymmetric(*leftres,m_left->getShape(),subroffset, m_samples, getShape(), offset);              escript::antisymmetric(*leftres,m_left->getShape(),subroffset, m_samples, getShape(), offset);
1107          subroffset+=step;              subroffset+=step;
1108          offset+=step;              offset+=step;
1109      }          }
1110      break;          break;
1111      default:      default:
1112      throw DataException("Programmer error - resolveNP1OUT can not resolve operator "+opToString(m_op)+".");          throw DataException("Programmer error - resolveNP1OUT can not resolve operator "+opToString(m_op)+".");
1113    }    }
1114    return &m_samples;    return &m_samples;
1115  }  }
1116    
1117  const DataTypes::ValueType*  const DataTypes::RealVectorType*
1118  DataLazy::resolveNodeNP1OUT_P(int tid, int sampleNo, size_t& roffset)  DataLazy::resolveNodeNP1OUT_P(int tid, int sampleNo, size_t& roffset) const
1119  {  {
1120      // we assume that any collapsing has been done before we get here          // we assume that any collapsing has been done before we get here
1121      // since we only have one argument we don't need to think about only          // since we only have one argument we don't need to think about only
1122      // processing single points.          // processing single points.
1123    if (m_readytype!='E')    if (m_readytype!='E')
1124    {    {
1125      throw DataException("Programmer error - resolveNodeNP1OUT_P should only be called on expanded Data.");      throw DataException("Programmer error - resolveNodeNP1OUT_P should only be called on expanded Data.");
# Line 1826  DataLazy::resolveNodeNP1OUT_P(int tid, i Line 1130  DataLazy::resolveNodeNP1OUT_P(int tid, i
1130    }    }
1131    size_t subroffset;    size_t subroffset;
1132    size_t offset;    size_t offset;
1133    const ValueType* leftres=m_left->resolveNodeSample(tid, sampleNo, subroffset);    const RealVectorType* leftres=m_left->resolveNodeSample(tid, sampleNo, subroffset);
1134    roffset=m_samplesize*tid;    roffset=m_samplesize*tid;
1135    offset=roffset;    offset=roffset;
1136    size_t loop=0;    size_t loop=0;
# Line 1836  DataLazy::resolveNodeNP1OUT_P(int tid, i Line 1140  DataLazy::resolveNodeNP1OUT_P(int tid, i
1140    switch (m_op)    switch (m_op)
1141    {    {
1142      case TRACE:      case TRACE:
1143      for (loop=0;loop<numsteps;++loop)          for (loop=0;loop<numsteps;++loop)
1144      {          {
1145              DataMaths::trace(*leftres,m_left->getShape(),subroffset, m_samples ,getShape(),offset,m_axis_offset);              escript::trace(*leftres,m_left->getShape(),subroffset, m_samples ,getShape(),offset,m_axis_offset);
1146          subroffset+=instep;              subroffset+=instep;
1147          offset+=outstep;              offset+=outstep;
1148      }          }
1149      break;          break;
1150      case TRANS:      case TRANS:
1151      for (loop=0;loop<numsteps;++loop)          for (loop=0;loop<numsteps;++loop)
1152      {          {
1153              DataMaths::transpose(*leftres,m_left->getShape(),subroffset, m_samples, getShape(),offset,m_axis_offset);              escript::transpose(*leftres,m_left->getShape(),subroffset, m_samples, getShape(),offset,m_axis_offset);
1154          subroffset+=instep;              subroffset+=instep;
1155          offset+=outstep;              offset+=outstep;
1156      }          }
1157      break;          break;
1158      default:      default:
1159      throw DataException("Programmer error - resolveNP1OUTP can not resolve operator "+opToString(m_op)+".");          throw DataException("Programmer error - resolveNP1OUTP can not resolve operator "+opToString(m_op)+".");
1160    }    }
1161    return &m_samples;    return &m_samples;
1162  }  }
1163    
1164    
1165  const DataTypes::ValueType*  const DataTypes::RealVectorType*
1166  DataLazy::resolveNodeNP1OUT_2P(int tid, int sampleNo, size_t& roffset)  DataLazy::resolveNodeNP1OUT_2P(int tid, int sampleNo, size_t& roffset) const
1167  {  {
1168    if (m_readytype!='E')    if (m_readytype!='E')
1169    {    {
# Line 1871  DataLazy::resolveNodeNP1OUT_2P(int tid, Line 1175  DataLazy::resolveNodeNP1OUT_2P(int tid,
1175    }    }
1176    size_t subroffset;    size_t subroffset;
1177    size_t offset;    size_t offset;
1178    const ValueType* leftres=m_left->resolveNodeSample(tid, sampleNo, subroffset);    const RealVectorType* leftres=m_left->resolveNodeSample(tid, sampleNo, subroffset);
1179    roffset=m_samplesize*tid;    roffset=m_samplesize*tid;
1180    offset=roffset;    offset=roffset;
1181    size_t loop=0;    size_t loop=0;
# Line 1881  DataLazy::resolveNodeNP1OUT_2P(int tid, Line 1185  DataLazy::resolveNodeNP1OUT_2P(int tid,
1185    switch (m_op)    switch (m_op)
1186    {    {
1187      case SWAP:      case SWAP:
1188      for (loop=0;loop<numsteps;++loop)          for (loop=0;loop<numsteps;++loop)
1189      {          {
1190              DataMaths::swapaxes(*leftres,m_left->getShape(),subroffset, m_samples, getShape(),offset, m_axis_offset, m_transpose);              escript::swapaxes(*leftres,m_left->getShape(),subroffset, m_samples, getShape(),offset, m_axis_offset, m_transpose);
1191          subroffset+=instep;              subroffset+=instep;
1192          offset+=outstep;              offset+=outstep;
1193      }          }
1194      break;          break;
1195      default:      default:
1196      throw DataException("Programmer error - resolveNodeNP1OUT2P can not resolve operator "+opToString(m_op)+".");          throw DataException("Programmer error - resolveNodeNP1OUT2P can not resolve operator "+opToString(m_op)+".");
1197    }    }
1198    return &m_samples;    return &m_samples;
1199  }  }
1200    
1201    const DataTypes::RealVectorType*
1202    DataLazy::resolveNodeCondEval(int tid, int sampleNo, size_t& roffset) const
1203    {
1204      if (m_readytype!='E')
1205      {
1206        throw DataException("Programmer error - resolveNodeCondEval should only be called on expanded Data.");
1207      }
1208      if (m_op!=CONDEVAL)
1209      {
1210        throw DataException("Programmer error - resolveNodeCondEval should only be called on CONDEVAL nodes.");
1211      }
1212      size_t subroffset;
1213    
1214      const RealVectorType* maskres=m_mask->resolveNodeSample(tid, sampleNo, subroffset);
1215      const RealVectorType* srcres=0;
1216      if ((*maskres)[subroffset]>0)
1217      {
1218            srcres=m_left->resolveNodeSample(tid, sampleNo, subroffset);
1219      }
1220      else
1221      {
1222            srcres=m_right->resolveNodeSample(tid, sampleNo, subroffset);
1223      }
1224    
1225      // Now we need to copy the result
1226    
1227      roffset=m_samplesize*tid;
1228      for (int i=0;i<m_samplesize;++i)
1229      {
1230            m_samples[roffset+i]=(*srcres)[subroffset+i];  
1231      }
1232    
1233      return &m_samples;
1234    }
1235    
1236  // This method assumes that any subexpressions which evaluate to Constant or Tagged Data  // This method assumes that any subexpressions which evaluate to Constant or Tagged Data
1237  // have already been collapsed to IDENTITY. So we must have at least one expanded child.  // have already been collapsed to IDENTITY. So we must have at least one expanded child.
# Line 1905  DataLazy::resolveNodeNP1OUT_2P(int tid, Line 1242  DataLazy::resolveNodeNP1OUT_2P(int tid,
1242  // There is an additional complication when scalar operations are considered.  // There is an additional complication when scalar operations are considered.
1243  // For example, 2+Vector.  // For example, 2+Vector.
1244  // In this case each double within the point is treated individually  // In this case each double within the point is treated individually
1245  const DataTypes::ValueType*  const DataTypes::RealVectorType*
1246  DataLazy::resolveNodeBinary(int tid, int sampleNo, size_t& roffset)  DataLazy::resolveNodeBinary(int tid, int sampleNo, size_t& roffset) const
1247  {  {
1248  LAZYDEBUG(cout << "Resolve binary: " << toString() << endl;)  LAZYDEBUG(cout << "Resolve binary: " << toString() << endl;)
1249    
1250    size_t lroffset=0, rroffset=0;    // offsets in the left and right result vectors    size_t lroffset=0, rroffset=0;        // offsets in the left and right result vectors
1251      // first work out which of the children are expanded          // first work out which of the children are expanded
1252    bool leftExp=(m_left->m_readytype=='E');    bool leftExp=(m_left->m_readytype=='E');
1253    bool rightExp=(m_right->m_readytype=='E');    bool rightExp=(m_right->m_readytype=='E');
1254    if (!leftExp && !rightExp)    if (!leftExp && !rightExp)
1255    {    {
1256      throw DataException("Programmer Error - please use collapse if neither argument has type 'E'.");          throw DataException("Programmer Error - please use collapse if neither argument has type 'E'.");
1257    }    }
1258    bool leftScalar=(m_left->getRank()==0);    bool leftScalar=(m_left->getRank()==0);
1259    bool rightScalar=(m_right->getRank()==0);    bool rightScalar=(m_right->getRank()==0);
1260    if ((m_left->getRank()!=m_right->getRank()) && (!leftScalar && !rightScalar))    if ((m_left->getRank()!=m_right->getRank()) && (!leftScalar && !rightScalar))
1261    {    {
1262      throw DataException("resolveBinary - ranks of arguments must match unless one of them is scalar.");          throw DataException("resolveBinary - ranks of arguments must match unless one of them is scalar.");
1263    }    }
1264    size_t leftsize=m_left->getNoValues();    size_t leftsize=m_left->getNoValues();
1265    size_t rightsize=m_right->getNoValues();    size_t rightsize=m_right->getNoValues();
1266    size_t chunksize=1;           // how many doubles will be processed in one go    size_t chunksize=1;                   // how many doubles will be processed in one go
1267    int leftstep=0;       // how far should the left offset advance after each step    int leftstep=0;               // how far should the left offset advance after each step
1268    int rightstep=0;    int rightstep=0;
1269    int numsteps=0;       // total number of steps for the inner loop    int numsteps=0;               // total number of steps for the inner loop
1270    int oleftstep=0;  // the o variables refer to the outer loop    int oleftstep=0;      // the o variables refer to the outer loop
1271    int orightstep=0; // The outer loop is only required in cases where there is an extended scalar    int orightstep=0;     // The outer loop is only required in cases where there is an extended scalar
1272    int onumsteps=1;    int onumsteps=1;
1273        
1274    bool LES=(leftExp && leftScalar); // Left is an expanded scalar    bool LES=(leftExp && leftScalar);     // Left is an expanded scalar
1275    bool RES=(rightExp && rightScalar);    bool RES=(rightExp && rightScalar);
1276    bool LS=(!leftExp && leftScalar); // left is a single scalar    bool LS=(!leftExp && leftScalar);     // left is a single scalar
1277    bool RS=(!rightExp && rightScalar);    bool RS=(!rightExp && rightScalar);
1278    bool LN=(!leftExp && !leftScalar);    // left is a single non-scalar    bool LN=(!leftExp && !leftScalar);    // left is a single non-scalar
1279    bool RN=(!rightExp && !rightScalar);    bool RN=(!rightExp && !rightScalar);
1280    bool LEN=(leftExp && !leftScalar);    // left is an expanded non-scalar    bool LEN=(leftExp && !leftScalar);    // left is an expanded non-scalar
1281    bool REN=(rightExp && !rightScalar);    bool REN=(rightExp && !rightScalar);
1282    
1283    if ((LES && RES) || (LEN && REN)) // both are Expanded scalars or both are expanded non-scalars    if ((LES && RES) || (LEN && REN))     // both are Expanded scalars or both are expanded non-scalars
1284    {    {
1285      chunksize=m_left->getNumDPPSample()*leftsize;          chunksize=m_left->getNumDPPSample()*leftsize;
1286      leftstep=0;          leftstep=0;
1287      rightstep=0;          rightstep=0;
1288      numsteps=1;          numsteps=1;
1289    }    }
1290    else if (LES || RES)    else if (LES || RES)
1291    {    {
1292      chunksize=1;          chunksize=1;
1293      if (LES)        // left is an expanded scalar          if (LES)                // left is an expanded scalar
1294      {          {
1295          if (RS)                  if (RS)
1296          {                  {
1297             leftstep=1;                     leftstep=1;
1298             rightstep=0;                     rightstep=0;
1299             numsteps=m_left->getNumDPPSample();                     numsteps=m_left->getNumDPPSample();
1300          }                  }
1301          else        // RN or REN                  else            // RN or REN
1302          {                  {
1303             leftstep=0;                     leftstep=0;
1304             oleftstep=1;                     oleftstep=1;
1305             rightstep=1;                     rightstep=1;
1306             orightstep=(RN ? -(int)rightsize : 0);                     orightstep=(RN ? -(int)rightsize : 0);
1307             numsteps=rightsize;                     numsteps=rightsize;
1308             onumsteps=m_left->getNumDPPSample();                     onumsteps=m_left->getNumDPPSample();
1309          }                  }
1310      }          }
1311      else        // right is an expanded scalar          else            // right is an expanded scalar
1312      {          {
1313          if (LS)                  if (LS)
1314          {                  {
1315             rightstep=1;                     rightstep=1;
1316             leftstep=0;                     leftstep=0;
1317             numsteps=m_right->getNumDPPSample();                     numsteps=m_right->getNumDPPSample();
1318          }                  }
1319          else                  else
1320          {                  {
1321             rightstep=0;                     rightstep=0;
1322             orightstep=1;                     orightstep=1;
1323             leftstep=1;                     leftstep=1;
1324             oleftstep=(LN ? -(int)leftsize : 0);                     oleftstep=(LN ? -(int)leftsize : 0);
1325             numsteps=leftsize;                     numsteps=leftsize;
1326             onumsteps=m_right->getNumDPPSample();                     onumsteps=m_right->getNumDPPSample();
1327          }                  }
1328      }          }
1329    }    }
1330    else  // this leaves (LEN, RS), (LEN, RN) and their transposes    else  // this leaves (LEN, RS), (LEN, RN) and their transposes
1331    {    {
1332      if (LEN)    // and Right will be a single value          if (LEN)        // and Right will be a single value
1333      {          {
1334          chunksize=rightsize;                  chunksize=rightsize;
1335          leftstep=rightsize;                  leftstep=rightsize;
1336          rightstep=0;                  rightstep=0;
1337          numsteps=m_left->getNumDPPSample();                  numsteps=m_left->getNumDPPSample();
1338          if (RS)                  if (RS)
1339          {                  {
1340             numsteps*=leftsize;                     numsteps*=leftsize;
1341          }                  }
1342      }          }
1343      else    // REN          else    // REN
1344      {          {
1345          chunksize=leftsize;                  chunksize=leftsize;
1346          rightstep=leftsize;                  rightstep=leftsize;
1347          leftstep=0;                  leftstep=0;
1348          numsteps=m_right->getNumDPPSample();                  numsteps=m_right->getNumDPPSample();
1349          if (LS)                  if (LS)
1350          {                  {
1351             numsteps*=rightsize;                     numsteps*=rightsize;
1352          }                  }
1353      }          }
1354    }    }
1355    
1356    int resultStep=max(leftstep,rightstep);   // only one (at most) should be !=0    int resultStep=max(leftstep,rightstep);       // only one (at most) should be !=0
1357      // Get the values of sub-expressions          // Get the values of sub-expressions
1358    const ValueType* left=m_left->resolveNodeSample(tid,sampleNo,lroffset);      const RealVectorType* left=m_left->resolveNodeSample(tid,sampleNo,lroffset);      
1359    const ValueType* right=m_right->resolveNodeSample(tid,sampleNo,rroffset);    const RealVectorType* right=m_right->resolveNodeSample(tid,sampleNo,rroffset);
1360  LAZYDEBUG(cout << "Post sub calls in " << toString() << endl;)  LAZYDEBUG(cout << "Post sub calls in " << toString() << endl;)
1361  LAZYDEBUG(cout << "shapes=" << DataTypes::shapeToString(m_left->getShape()) << "," << DataTypes::shapeToString(m_right->getShape()) << endl;)  LAZYDEBUG(cout << "shapes=" << DataTypes::shapeToString(m_left->getShape()) << "," << DataTypes::shapeToString(m_right->getShape()) << endl;)
1362  LAZYDEBUG(cout << "chunksize=" << chunksize << endl << "leftstep=" << leftstep << " rightstep=" << rightstep;)  LAZYDEBUG(cout << "chunksize=" << chunksize << endl << "leftstep=" << leftstep << " rightstep=" << rightstep;)
# Line 2033  LAZYDEBUG(cout << "Right res["<< rroffse Line 1370  LAZYDEBUG(cout << "Right res["<< rroffse
1370    
1371    
1372    roffset=m_samplesize*tid;    roffset=m_samplesize*tid;
1373    double* resultp=&(m_samples[roffset]);        // results are stored at the vector offset we recieved    double* resultp=&(m_samples[roffset]);                // results are stored at the vector offset we received
1374    switch(m_op)    switch(m_op)
1375    {    {
1376      case ADD:      case ADD:
1377          PROC_OP(NO_ARG,plus<double>());          //PROC_OP(NO_ARG,plus<double>());
1378      break;        escript::binaryOpVectorLazyHelper<real_t, real_t, real_t>(resultp,
1379                 &(*left)[0],
1380                 &(*right)[0],
1381                 chunksize,
1382                 onumsteps,
1383                 numsteps,
1384                 resultStep,
1385                 leftstep,
1386                 rightstep,
1387                 oleftstep,
1388                 orightstep,
1389                 lroffset,
1390                 rroffset,
1391                 escript::ES_optype::ADD);  
1392            break;
1393      case SUB:      case SUB:
1394      PROC_OP(NO_ARG,minus<double>());        escript::binaryOpVectorLazyHelper<real_t, real_t, real_t>(resultp,
1395      break;               &(*left)[0],
1396                 &(*right)[0],
1397                 chunksize,
1398                 onumsteps,
1399                 numsteps,
1400                 resultStep,
1401                 leftstep,
1402                 rightstep,
1403                 oleftstep,
1404                 orightstep,
1405                 lroffset,
1406                 rroffset,
1407                 escript::ES_optype::SUB);        
1408            //PROC_OP(NO_ARG,minus<double>());
1409            break;
1410      case MUL:      case MUL:
1411      PROC_OP(NO_ARG,multiplies<double>());          //PROC_OP(NO_ARG,multiplies<double>());
1412      break;        escript::binaryOpVectorLazyHelper<real_t, real_t, real_t>(resultp,
1413                 &(*left)[0],
1414                 &(*right)[0],
1415                 chunksize,
1416                 onumsteps,
1417                 numsteps,
1418                 resultStep,
1419                 leftstep,
1420                 rightstep,
1421                 oleftstep,
1422                 orightstep,
1423                 lroffset,
1424                 rroffset,
1425                 escript::ES_optype::MUL);        
1426            break;
1427      case DIV:      case DIV:
1428      PROC_OP(NO_ARG,divides<double>());          //PROC_OP(NO_ARG,divides<double>());
1429      break;        escript::binaryOpVectorLazyHelper<real_t, real_t, real_t>(resultp,
1430                 &(*left)[0],
1431                 &(*right)[0],
1432                 chunksize,
1433                 onumsteps,
1434                 numsteps,
1435                 resultStep,
1436                 leftstep,
1437                 rightstep,
1438                 oleftstep,
1439                 orightstep,
1440                 lroffset,
1441                 rroffset,
1442                 escript::ES_optype::DIV);        
1443            break;
1444      case POW:      case POW:
1445         PROC_OP(double (double,double),::pow);         //PROC_OP(double (double,double),::pow);
1446      break;        escript::binaryOpVectorLazyHelper<real_t, real_t, real_t>(resultp,
1447                 &(*left)[0],
1448                 &(*right)[0],
1449                 chunksize,
1450                 onumsteps,
1451                 numsteps,
1452                 resultStep,
1453                 leftstep,
1454                 rightstep,
1455                 oleftstep,
1456                 orightstep,
1457                 lroffset,
1458                 rroffset,
1459                 escript::ES_optype::POW);        
1460            break;
1461      default:      default:
1462      throw DataException("Programmer error - resolveBinary can not resolve operator "+opToString(m_op)+".");          throw DataException("Programmer error - resolveBinary can not resolve operator "+opToString(m_op)+".");
1463    }    }
1464  LAZYDEBUG(cout << "Result res[" << roffset<< "]" << m_samples[roffset] << endl;)  LAZYDEBUG(cout << "Result res[" << roffset<< "]" << m_samples[roffset] << endl;)
1465    return &m_samples;    return &m_samples;
# Line 2062  LAZYDEBUG(cout << "Result res[" << roffs Line 1469  LAZYDEBUG(cout << "Result res[" << roffs
1469  // This method assumes that any subexpressions which evaluate to Constant or Tagged Data  // This method assumes that any subexpressions which evaluate to Constant or Tagged Data
1470  // have already been collapsed to IDENTITY. So we must have at least one expanded child.  // have already been collapsed to IDENTITY. So we must have at least one expanded child.
1471  // unlike the other resolve helpers, we must treat these datapoints separately.  // unlike the other resolve helpers, we must treat these datapoints separately.
1472  const DataTypes::ValueType*  const DataTypes::RealVectorType*
1473  DataLazy::resolveNodeTProd(int tid, int sampleNo, size_t& roffset)  DataLazy::resolveNodeTProd(int tid, int sampleNo, size_t& roffset) const
1474  {  {
1475  LAZYDEBUG(cout << "Resolve TensorProduct: " << toString() << endl;)  LAZYDEBUG(cout << "Resolve TensorProduct: " << toString() << endl;)
1476    
1477    size_t lroffset=0, rroffset=0;    // offsets in the left and right result vectors    size_t lroffset=0, rroffset=0;        // offsets in the left and right result vectors
1478      // first work out which of the children are expanded          // first work out which of the children are expanded
1479    bool leftExp=(m_left->m_readytype=='E');    bool leftExp=(m_left->m_readytype=='E');
1480    bool rightExp=(m_right->m_readytype=='E');    bool rightExp=(m_right->m_readytype=='E');
1481    int steps=getNumDPPSample();    int steps=getNumDPPSample();
1482    int leftStep=(leftExp? m_left->getNoValues() : 0);        // do not have scalars as input to this method    int leftStep=(leftExp? m_left->getNoValues() : 0);            // do not have scalars as input to this method
1483    int rightStep=(rightExp?m_right->getNoValues() : 0);    int rightStep=(rightExp?m_right->getNoValues() : 0);
1484    
1485    int resultStep=getNoValues();    int resultStep=getNoValues();
1486    roffset=m_samplesize*tid;    roffset=m_samplesize*tid;
1487    size_t offset=roffset;    size_t offset=roffset;
1488    
1489    const ValueType* left=m_left->resolveNodeSample(tid, sampleNo, lroffset);    const RealVectorType* left=m_left->resolveNodeSample(tid, sampleNo, lroffset);
1490    
1491    const ValueType* right=m_right->resolveNodeSample(tid, sampleNo, rroffset);    const RealVectorType* right=m_right->resolveNodeSample(tid, sampleNo, rroffset);
1492    
1493  LAZYDEBUG(cerr << "[Left shape]=" << DataTypes::shapeToString(m_left->getShape()) << "\n[Right shape]=" << DataTypes::shapeToString(m_right->getShape()) << " result=" <<DataTypes::shapeToString(getShape()) <<  endl;  LAZYDEBUG(cerr << "[Left shape]=" << DataTypes::shapeToString(m_left->getShape()) << "\n[Right shape]=" << DataTypes::shapeToString(m_right->getShape()) << " result=" <<DataTypes::shapeToString(getShape()) <<  endl;
1494  cout << getNoValues() << endl;)  cout << getNoValues() << endl;)
# Line 2095  LAZYDEBUG(cout << "m_samplesize=" << m_s Line 1502  LAZYDEBUG(cout << "m_samplesize=" << m_s
1502  LAZYDEBUG(cout << "outputshape=" << DataTypes::shapeToString(getShape()) << endl;)  LAZYDEBUG(cout << "outputshape=" << DataTypes::shapeToString(getShape()) << endl;)
1503  LAZYDEBUG(cout << "DPPS=" << m_right->getNumDPPSample() <<"."<<endl;)  LAZYDEBUG(cout << "DPPS=" << m_right->getNumDPPSample() <<"."<<endl;)
1504    
1505    double* resultp=&(m_samples[offset]);     // results are stored at the vector offset we recieved    double* resultp=&(m_samples[offset]);         // results are stored at the vector offset we received
1506    switch(m_op)    switch(m_op)
1507    {    {
1508      case PROD:      case PROD:
1509      for (int i=0;i<steps;++i,resultp+=resultStep)          for (int i=0;i<steps;++i,resultp+=resultStep)
1510      {          {
1511            const double *ptr_0 = &((*left)[lroffset]);            const double *ptr_0 = &((*left)[lroffset]);
1512            const double *ptr_1 = &((*right)[rroffset]);            const double *ptr_1 = &((*right)[rroffset]);
1513    
1514  LAZYDEBUG(cout << DataTypes::pointToString(*left, m_left->getShape(),lroffset,"LEFT") << endl;)  LAZYDEBUG(cout << DataTypes::pointToString(*left, m_left->getShape(),lroffset,"LEFT") << endl;)
1515  LAZYDEBUG(cout << DataTypes::pointToString(*right,m_right->getShape(),rroffset, "RIGHT") << endl;)  LAZYDEBUG(cout << DataTypes::pointToString(*right,m_right->getShape(),rroffset, "RIGHT") << endl;)
1516    
1517            matrix_matrix_product(m_SL, m_SM, m_SR, ptr_0, ptr_1, resultp, m_transpose);            matrix_matrix_product(m_SL, m_SM, m_SR, ptr_0, ptr_1, resultp, m_transpose);
1518    
1519        lroffset+=leftStep;            lroffset+=leftStep;
1520        rroffset+=rightStep;            rroffset+=rightStep;
1521      }          }
1522      break;          break;
1523      default:      default:
1524      throw DataException("Programmer error - resolveTProduct can not resolve operator "+opToString(m_op)+".");          throw DataException("Programmer error - resolveTProduct can not resolve operator "+opToString(m_op)+".");
1525    }    }
1526    roffset=offset;    roffset=offset;
1527    return &m_samples;    return &m_samples;
1528  }  }
 #endif //LAZY_NODE_STORAGE  
   
 /*  
   \brief Compute the value of the expression for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
 */  
 // the vector and the offset are a place where the method could write its data if it wishes  
 // it is not obligated to do so. For example, if it has its own storage already, it can use that.  
 // Hence the return value to indicate where the data is actually stored.  
 // Regardless, the storage should be assumed to be used, even if it isn't.  
   
 // the roffset is the offset within the returned vector where the data begins  
 const DataTypes::ValueType*  
 DataLazy::resolveSample(ValueType& v, size_t offset, int sampleNo, size_t& roffset)  
 {  
 LAZYDEBUG(cout << "Resolve sample " << toString() << endl;)  
     // collapse so we have a 'E' node or an IDENTITY for some other type  
   if (m_readytype!='E' && m_op!=IDENTITY)  
   {  
     collapse();  
   }  
   if (m_op==IDENTITY)    
   {  
     const ValueType& vec=m_id->getVectorRO();  
     if (m_readytype=='C')  
     {  
     roffset=0;  
 LAZYDEBUG(cout << "Finish  sample " << toString() << endl;)  
     return &(vec);  
     }  
     roffset=m_id->getPointOffset(sampleNo, 0);  
 LAZYDEBUG(cout << "Finish  sample " << toString() << endl;)  
     return &(vec);  
   }  
   if (m_readytype!='E')  
   {  
     throw DataException("Programmer Error - Collapse did not produce an expanded node.");  
   }  
   switch (getOpgroup(m_op))  
   {  
   case G_UNARY:  
   case G_UNARY_P: return resolveUnary(v, offset,sampleNo,roffset);  
   case G_BINARY: return resolveBinary(v, offset,sampleNo,roffset);  
   case G_NP1OUT: return resolveNP1OUT(v, offset, sampleNo,roffset);  
   case G_NP1OUT_P: return resolveNP1OUT_P(v, offset, sampleNo,roffset);  
   case G_TENSORPROD: return resolveTProd(v,offset, sampleNo,roffset);  
   case G_NP1OUT_2P: return resolveNP1OUT_2P(v, offset, sampleNo, roffset);  
   default:  
     throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");  
   }  
1529    
 }  
1530    
1531  const DataTypes::ValueType*  const DataTypes::RealVectorType*
1532  DataLazy::resolveSample(BufferGroup& bg, int sampleNo, size_t& roffset)  DataLazy::resolveSample(int sampleNo, size_t& roffset) const
1533  {  {
1534  #ifdef _OPENMP  #ifdef _OPENMP
1535      int tid=omp_get_thread_num();          int tid=omp_get_thread_num();
1536  #else  #else
1537      int tid=0;          int tid=0;
1538  #endif  #endif
1539      return resolveSample(bg.getBuffer(tid),bg.getOffset(tid),sampleNo,roffset);  
1540    #ifdef LAZY_STACK_PROF
1541            stackstart[tid]=&tid;
1542            stackend[tid]=&tid;
1543            const DataTypes::RealVectorType* r=resolveNodeSample(tid, sampleNo, roffset);
1544            size_t d=(size_t)stackstart[tid]-(size_t)stackend[tid];
1545            #pragma omp critical
1546            if (d>maxstackuse)
1547            {
1548    cout << "Max resolve Stack use " << d << endl;
1549                    maxstackuse=d;
1550            }
1551            return r;
1552    #else
1553            return resolveNodeSample(tid, sampleNo, roffset);
1554    #endif
1555  }  }
1556    
1557    
# Line 2195  void Line 1560  void
1560  DataLazy::resolveToIdentity()  DataLazy::resolveToIdentity()
1561  {  {
1562     if (m_op==IDENTITY)     if (m_op==IDENTITY)
1563      return;          return;
 #ifndef LAZY_NODE_STORAGE  
    DataReady_ptr p=resolveVectorWorker();  
 #else  
1564     DataReady_ptr p=resolveNodeWorker();     DataReady_ptr p=resolveNodeWorker();
 #endif  
1565     makeIdentity(p);     makeIdentity(p);
1566  }  }
1567    
# Line 2216  void DataLazy::makeIdentity(const DataRe Line 1577  void DataLazy::makeIdentity(const DataRe
1577     else if(p->isExpanded()) {m_readytype='E';}     else if(p->isExpanded()) {m_readytype='E';}
1578     else if (p->isTagged()) {m_readytype='T';}     else if (p->isTagged()) {m_readytype='T';}
1579     else {throw DataException("Unknown DataReady instance in convertToIdentity constructor.");}     else {throw DataException("Unknown DataReady instance in convertToIdentity constructor.");}
    m_buffsRequired=1;  
1580     m_samplesize=p->getNumDPPSample()*p->getNoValues();     m_samplesize=p->getNumDPPSample()*p->getNoValues();
    m_maxsamplesize=m_samplesize;  
1581     m_left.reset();     m_left.reset();
1582     m_right.reset();     m_right.reset();
1583  }  }
# Line 2231  DataLazy::resolve() Line 1590  DataLazy::resolve()
1590      return m_id;      return m_id;
1591  }  }
1592    
 #ifdef LAZY_NODE_STORAGE  
1593    
1594  // This version of resolve uses storage in each node to hold results  /* This is really a static method but I think that caused problems in windows */
1595  DataReady_ptr  void
1596  DataLazy::resolveNodeWorker()  DataLazy::resolveGroupWorker(std::vector<DataLazy*>& dats)
1597  {  {
1598    if (m_readytype!='E')     // if the whole sub-expression is Constant or Tagged, then evaluate it normally    if (dats.empty())
1599    {    {
1600      collapse();          return;
1601    }    }
1602    if (m_op==IDENTITY)       // So a lazy expression of Constant or Tagged data will be returned here.    vector<DataLazy*> work;
1603    {    FunctionSpace fs=dats[0]->getFunctionSpace();
1604      return m_id;    bool match=true;
1605    }    for (int i=dats.size()-1;i>=0;--i)
1606      // from this point on we must have m_op!=IDENTITY and m_readytype=='E'    {
1607    DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(),  ValueType(getNoValues()));          if (dats[i]->m_readytype!='E')
1608    ValueType& resvec=result->getVectorRW();          {
1609    DataReady_ptr resptr=DataReady_ptr(result);                  dats[i]->collapse();
1610            }
1611    int sample;          if (dats[i]->m_op!=IDENTITY)
1612    int totalsamples=getNumSamples();          {
1613    const ValueType* res=0;   // Storage for answer                  work.push_back(dats[i]);
1614  LAZYDEBUG(cout << "Total number of samples=" <<totalsamples << endl;)                  if (fs!=dats[i]->getFunctionSpace())
1615    #pragma omp parallel for private(sample,res) schedule(static)                  {
1616    for (sample=0;sample<totalsamples;++sample)                          match=false;
1617    {                  }
1618      size_t roffset=0;          }
1619      }
1620      if (work.empty())
1621      {
1622            return;         // no work to do
1623      }
1624      if (match)    // all functionspaces match.  Yes I realise this is overly strict
1625      {             // it is possible that dats[0] is one of the objects which we discarded and
1626                    // all the other functionspaces match.
1627            vector<DataExpanded*> dep;
1628            vector<RealVectorType*> vecs;
1629            for (int i=0;i<work.size();++i)
1630            {
1631                    dep.push_back(new DataExpanded(fs,work[i]->getShape(), RealVectorType(work[i]->getNoValues())));
1632                    vecs.push_back(&(dep[i]->getVectorRW()));
1633            }
1634            int totalsamples=work[0]->getNumSamples();
1635            const RealVectorType* res=0; // Storage for answer
1636            int sample;
1637            #pragma omp parallel private(sample, res)
1638            {
1639                size_t roffset=0;
1640                #pragma omp for schedule(static)
1641                for (sample=0;sample<totalsamples;++sample)
1642                {
1643                    roffset=0;
1644                    int j;
1645                    for (j=work.size()-1;j>=0;--j)
1646                    {
1647  #ifdef _OPENMP  #ifdef _OPENMP
1648      res=resolveNodeSample(omp_get_thread_num(),sample,roffset);                      res=work[j]->resolveNodeSample(omp_get_thread_num(),sample,roffset);
1649  #else  #else
1650      res=resolveNodeSample(0,sample,roffset);                      res=work[j]->resolveNodeSample(0,sample,roffset);
1651  #endif  #endif
1652  LAZYDEBUG(cout << "Sample #" << sample << endl;)                      RealVectorType::size_type outoffset=dep[j]->getPointOffset(sample,0);
1653  LAZYDEBUG(cout << "Final res[" << roffset<< "]=" << (*res)[roffset] << (*res)[roffset]<< endl; )                      memcpy(&((*vecs[j])[outoffset]),&((*res)[roffset]),work[j]->m_samplesize*sizeof(RealVectorType::ElementType));
1654      DataVector::size_type outoffset=result->getPointOffset(sample,0);                  }
1655      memcpy(&(resvec[outoffset]),&((*res)[roffset]),m_samplesize*sizeof(DataVector::ElementType));              }
1656            }
1657            // Now we need to load the new results as identity ops into the lazy nodes
1658            for (int i=work.size()-1;i>=0;--i)
1659            {
1660                work[i]->makeIdentity(REFCOUNTNS::dynamic_pointer_cast<DataReady>(dep[i]->getPtr()));
1661            }
1662      }
1663      else  // functionspaces do not match
1664      {
1665            for (int i=0;i<work.size();++i)
1666            {
1667                    work[i]->resolveToIdentity();
1668            }
1669    }    }
   return resptr;  
1670  }  }
1671    
 #endif // LAZY_NODE_STORAGE  
1672    
1673  // To simplify the memory management, all threads operate on one large vector, rather than one each.  
1674  // Each sample is evaluated independently and copied into the result DataExpanded.  // This version of resolve uses storage in each node to hold results
1675  DataReady_ptr  DataReady_ptr
1676  DataLazy::resolveVectorWorker()  DataLazy::resolveNodeWorker()
1677  {  {
1678      if (m_readytype!='E')         // if the whole sub-expression is Constant or Tagged, then evaluate it normally
 LAZYDEBUG(cout << "Sample size=" << m_samplesize << endl;)  
 LAZYDEBUG(cout << "Buffers=" << m_buffsRequired << endl;)  
   if (m_readytype!='E')     // if the whole sub-expression is Constant or Tagged, then evaluate it normally  
1679    {    {
1680      collapse();      collapse();
1681    }    }
1682    if (m_op==IDENTITY)       // So a lazy expression of Constant or Tagged data will be returned here.    if (m_op==IDENTITY)           // So a lazy expression of Constant or Tagged data will be returned here.
1683    {    {
1684      return m_id;      return m_id;
1685    }    }
1686      // from this point on we must have m_op!=IDENTITY and m_readytype=='E'          // from this point on we must have m_op!=IDENTITY and m_readytype=='E'
1687    size_t threadbuffersize=m_maxsamplesize*(max(1,m_buffsRequired)); // Each thread needs to have enough    DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(),  RealVectorType(getNoValues()));
1688      // storage to evaluate its expression    RealVectorType& resvec=result->getVectorRW();
   int numthreads=1;  
 #ifdef _OPENMP  
   numthreads=omp_get_max_threads();  
 #endif  
   ValueType v(numthreads*threadbuffersize);  
 LAZYDEBUG(cout << "Buffer created with size=" << v.size() << endl;)  
   DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(),  ValueType(getNoValues()));  
   ValueType& resvec=result->getVectorRW();  
1689    DataReady_ptr resptr=DataReady_ptr(result);    DataReady_ptr resptr=DataReady_ptr(result);
1690    
1691    int sample;    int sample;
   size_t outoffset;     // offset in the output data  
1692    int totalsamples=getNumSamples();    int totalsamples=getNumSamples();
1693    const ValueType* res=0;   // Vector storing the answer    const RealVectorType* res=0;       // Storage for answer
   size_t resoffset=0;       // where in the vector to find the answer  
1694  LAZYDEBUG(cout << "Total number of samples=" <<totalsamples << endl;)  LAZYDEBUG(cout << "Total number of samples=" <<totalsamples << endl;)
1695    #pragma omp parallel for private(sample,resoffset,outoffset,res) schedule(static)    #pragma omp parallel private(sample,res)
   for (sample=0;sample<totalsamples;++sample)  
1696    {    {
1697  LAZYDEBUG(cout << "################################# " << sample << endl;)          size_t roffset=0;
1698    #ifdef LAZY_STACK_PROF
1699            stackstart[omp_get_thread_num()]=&roffset;
1700            stackend[omp_get_thread_num()]=&roffset;
1701    #endif
1702            #pragma omp for schedule(static)
1703            for (sample=0;sample<totalsamples;++sample)
1704            {
1705                    roffset=0;
1706  #ifdef _OPENMP  #ifdef _OPENMP
1707      res=resolveSample(v,threadbuffersize*omp_get_thread_num(),sample,resoffset);                  res=resolveNodeSample(omp_get_thread_num(),sample,roffset);
1708  #else  #else
1709      res=resolveSample(v,0,sample,resoffset);   // res would normally be v, but not if its a single IDENTITY op.                  res=resolveNodeSample(0,sample,roffset);
1710  #endif  #endif
1711  LAZYDEBUG(cerr << "-------------------------------- " << endl;)  LAZYDEBUG(cout << "Sample #" << sample << endl;)
1712  LAZYDEBUG(cerr<< "Copying sample#" << sample << endl;)  LAZYDEBUG(cout << "Final res[" << roffset<< "]=" << (*res)[roffset] << (*res)[roffset]<< endl; )
1713      outoffset=result->getPointOffset(sample,0);                  RealVectorType::size_type outoffset=result->getPointOffset(sample,0);
1714  LAZYDEBUG(cerr << "offset=" << outoffset << " from offset=" << resoffset << " " << m_samplesize << " doubles" << endl;)                  memcpy(&(resvec[outoffset]),&((*res)[roffset]),m_samplesize*sizeof(RealVectorType::ElementType));
1715      for (unsigned int i=0;i<m_samplesize;++i,++outoffset,++resoffset)   // copy values into the output vector          }
1716      {    }
1717  LAZYDEBUG(cerr << "outoffset=" << outoffset << " resoffset=" << resoffset << " " << (*res)[resoffset]<< endl;)  #ifdef LAZY_STACK_PROF
1718      resvec[outoffset]=(*res)[resoffset];    for (int i=0;i<getNumberOfThreads();++i)
1719      }    {
1720  LAZYDEBUG(cerr << DataTypes::pointToString(resvec,getShape(),outoffset-m_samplesize+DataTypes::noValues(getShape()),"Final result:") << endl;)          size_t r=((size_t)stackstart[i] - (size_t)stackend[i]);
1721  LAZYDEBUG(cerr << "*********************************" << endl;)  //      cout << i << " " << stackstart[i] << " .. " << stackend[i] << " = " <<  r << endl;
1722            if (r>maxstackuse)
1723            {
1724                    maxstackuse=r;
1725            }
1726    }    }
1727      cout << "Max resolve Stack use=" << maxstackuse << endl;
1728    #endif
1729    return resptr;    return resptr;
1730  }  }
1731    
# Line 2335  std::string Line 1733  std::string
1733  DataLazy::toString() const  DataLazy::toString() const
1734  {  {
1735    ostringstream oss;    ostringstream oss;
1736    oss << "Lazy Data:";    oss << "Lazy Data: [depth=" << m_height<< "] ";
1737    intoString(oss);    switch (escriptParams.getLazyStrFmt())
1738      {
1739      case 1:       // tree format
1740            oss << endl;
1741            intoTreeString(oss,"");
1742            break;
1743      case 2:       // just the depth
1744            break;
1745      default:
1746            intoString(oss);
1747            break;
1748      }
1749    return oss.str();    return oss.str();
1750  }  }
1751    
# Line 2348  DataLazy::intoString(ostringstream& oss) Line 1757  DataLazy::intoString(ostringstream& oss)
1757    switch (getOpgroup(m_op))    switch (getOpgroup(m_op))
1758    {    {
1759    case G_IDENTITY:    case G_IDENTITY:
1760      if (m_id->isExpanded())          if (m_id->isExpanded())
1761      {          {
1762         oss << "E";             oss << "E";
1763      }          }
1764      else if (m_id->isTagged())          else if (m_id->isTagged())
1765      {          {
1766        oss << "T";            oss << "T";
1767      }          }
1768      else if (m_id->isConstant())          else if (m_id->isConstant())
1769      {          {
1770        oss << "C";            oss << "C";
1771      }          }
1772      else          else
1773      {          {
1774        oss << "?";            oss << "?";
1775      }          }
1776      oss << '@' << m_id.get();          oss << '@' << m_id.get();
1777      break;          break;
1778    case G_BINARY:    case G_BINARY:
1779      oss << '(';          oss << '(';
1780      m_left->intoString(oss);          m_left->intoString(oss);
1781      oss << ' ' << opToString(m_op) << ' ';          oss << ' ' << opToString(m_op) << ' ';
1782      m_right->intoString(oss);          m_right->intoString(oss);
1783      oss << ')';          oss << ')';
1784      break;          break;
1785    case G_UNARY:    case G_UNARY:
1786    case G_UNARY_P:    case G_UNARY_P:
1787    case G_NP1OUT:    case G_NP1OUT:
1788    case G_NP1OUT_P:    case G_NP1OUT_P:
1789      oss << opToString(m_op) << '(';    case G_REDUCTION:
1790      m_left->intoString(oss);          oss << opToString(m_op) << '(';
1791      oss << ')';          m_left->intoString(oss);
1792      break;          oss << ')';
1793            break;
1794    case G_TENSORPROD:    case G_TENSORPROD:
1795      oss << opToString(m_op) << '(';          oss << opToString(m_op) << '(';
1796      m_left->intoString(oss);          m_left->intoString(oss);
1797      oss << ", ";          oss << ", ";
1798      m_right->intoString(oss);          m_right->intoString(oss);
1799      oss << ')';          oss << ')';
1800      break;          break;
1801    case G_NP1OUT_2P:    case G_NP1OUT_2P:
1802      oss << opToString(m_op) << '(';          oss << opToString(m_op) << '(';
1803      m_left->intoString(oss);          m_left->intoString(oss);
1804      oss << ", " << m_axis_offset << ", " << m_transpose;          oss << ", " << m_axis_offset << ", " << m_transpose;
1805      oss << ')';          oss << ')';
1806      break;          break;
1807      case G_CONDEVAL:
1808            oss << opToString(m_op)<< '(' ;
1809            m_mask->intoString(oss);
1810            oss << " ? ";
1811            m_left->intoString(oss);
1812            oss << " : ";
1813            m_right->intoString(oss);
1814            oss << ')';
1815            break;
1816    default:    default:
1817      oss << "UNKNOWN";          oss << "UNKNOWN";
1818    }    }
1819  }  }
1820    
1821    
1822    void
1823    DataLazy::intoTreeString(ostringstream& oss, string indent) const
1824    {
1825      oss << '[' << m_rank << ':' << setw(3) << m_samplesize << "] " << indent;
1826      switch (getOpgroup(m_op))
1827      {
1828      case G_IDENTITY:
1829            if (m_id->isExpanded())
1830            {
1831               oss << "E";
1832            }
1833            else if (m_id->isTagged())
1834            {
1835              oss << "T";
1836            }
1837            else if (m_id->isConstant())
1838            {
1839              oss << "C";
1840            }
1841            else
1842            {
1843              oss << "?";
1844            }
1845            oss << '@' << m_id.get() << endl;
1846            break;
1847      case G_BINARY:
1848            oss << opToString(m_op) << endl;
1849            indent+='.';
1850            m_left->intoTreeString(oss, indent);
1851            m_right->intoTreeString(oss, indent);
1852            break;
1853      case G_UNARY:
1854      case G_UNARY_P:
1855      case G_NP1OUT:
1856      case G_NP1OUT_P:
1857      case G_REDUCTION:
1858            oss << opToString(m_op) << endl;
1859            indent+='.';
1860            m_left->intoTreeString(oss, indent);
1861            break;
1862      case G_TENSORPROD:
1863            oss << opToString(m_op) << endl;
1864            indent+='.';
1865            m_left->intoTreeString(oss, indent);
1866            m_right->intoTreeString(oss, indent);
1867            break;
1868      case G_NP1OUT_2P:
1869            oss << opToString(m_op) << ", " << m_axis_offset << ", " << m_transpose<< endl;
1870            indent+='.';
1871            m_left->intoTreeString(oss, indent);
1872            break;
1873      default:
1874            oss << "UNKNOWN";
1875      }
1876    }
1877    
1878    
1879  DataAbstract*  DataAbstract*
1880  DataLazy::deepCopy()  DataLazy::deepCopy() const
1881  {  {
1882    switch (getOpgroup(m_op))    switch (getOpgroup(m_op))
1883    {    {
1884    case G_IDENTITY:  return new DataLazy(m_id->deepCopy()->getPtr());    case G_IDENTITY:  return new DataLazy(m_id->deepCopy()->getPtr());
1885    case G_UNARY: return new DataLazy(m_left->deepCopy()->getPtr(),m_op);    case G_UNARY:
1886    case G_BINARY:    return new DataLazy(m_left->deepCopy()->getPtr(),m_right->deepCopy()->getPtr(),m_op);    case G_REDUCTION:      return new DataLazy(m_left->deepCopy()->getPtr(),m_op);
1887      case G_UNARY_P:       return new DataLazy(m_left->deepCopy()->getPtr(), m_op, m_tol);
1888      case G_BINARY:        return new DataLazy(m_left->deepCopy()->getPtr(),m_right->deepCopy()->getPtr(),m_op);
1889    case G_NP1OUT: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(),m_op);    case G_NP1OUT: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(),m_op);
1890    case G_TENSORPROD: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(), m_op, m_axis_offset, m_transpose);    case G_TENSORPROD: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(), m_op, m_axis_offset, m_transpose);
1891      case G_NP1OUT_P:   return new DataLazy(m_left->deepCopy()->getPtr(),m_op,  m_axis_offset);
1892      case G_NP1OUT_2P:  return new DataLazy(m_left->deepCopy()->getPtr(), m_op, m_axis_offset, m_transpose);
1893    default:    default:
1894      throw DataException("Programmer error - do not know how to deepcopy operator "+opToString(m_op)+".");          throw DataException("Programmer error - do not know how to deepcopy operator "+opToString(m_op)+".");
1895    }    }
1896  }  }
1897    
1898    // For this, we don't care what op you were doing because the answer is now zero
1899    DataAbstract*
1900    DataLazy::zeroedCopy() const
1901    {
1902      return new DataLazy(m_id->zeroedCopy()->getPtr());
1903    }
1904    
1905  // There is no single, natural interpretation of getLength on DataLazy.  // There is no single, natural interpretation of getLength on DataLazy.
1906  // Instances of DataReady can look at the size of their vectors.  // Instances of DataReady can look at the size of their vectors.
# Line 2421  DataLazy::deepCopy() Line 1908  DataLazy::deepCopy()
1908  // or it could be some function of the lengths of the DataReady instances which  // or it could be some function of the lengths of the DataReady instances which
1909  // form part of the expression.  // form part of the expression.
1910  // Rather than have people making assumptions, I have disabled the method.  // Rather than have people making assumptions, I have disabled the method.
1911  DataTypes::ValueType::size_type  DataTypes::RealVectorType::size_type
1912  DataLazy::getLength() const  DataLazy::getLength() const
1913  {  {
1914    throw DataException("getLength() does not make sense for lazy data.");    throw DataException("getLength() does not make sense for lazy data.");
# Line 2436  DataLazy::getSlice(const DataTypes::Regi Line 1923  DataLazy::getSlice(const DataTypes::Regi
1923    
1924    
1925  // To do this we need to rely on our child nodes  // To do this we need to rely on our child nodes
1926  DataTypes::ValueType::size_type  DataTypes::RealVectorType::size_type
1927  DataLazy::getPointOffset(int sampleNo,  DataLazy::getPointOffset(int sampleNo,
1928                   int dataPointNo)                   int dataPointNo)
1929  {  {
1930    if (m_op==IDENTITY)    if (m_op==IDENTITY)
1931    {    {
1932      return m_id->getPointOffset(sampleNo,dataPointNo);          return m_id->getPointOffset(sampleNo,dataPointNo);
1933    }    }
1934    if (m_readytype!='E')    if (m_readytype!='E')
1935    {    {
1936      collapse();          collapse();
1937      return m_id->getPointOffset(sampleNo,dataPointNo);          return m_id->getPointOffset(sampleNo,dataPointNo);
1938    }    }
1939    // at this point we do not have an identity node and the expression will be Expanded    // at this point we do not have an identity node and the expression will be Expanded
1940    // so we only need to know which child to ask    // so we only need to know which child to ask
1941    if (m_left->m_readytype=='E')    if (m_left->m_readytype=='E')
1942    {    {
1943      return m_left->getPointOffset(sampleNo,dataPointNo);          return m_left->getPointOffset(sampleNo,dataPointNo);
1944    }    }
1945    else    else
1946    {    {
1947      return m_right->getPointOffset(sampleNo,dataPointNo);          return m_right->getPointOffset(sampleNo,dataPointNo);
1948    }    }
1949  }  }
1950    
1951  // To do this we need to rely on our child nodes  // To do this we need to rely on our child nodes
1952  DataTypes::ValueType::size_type  DataTypes::RealVectorType::size_type
1953  DataLazy::getPointOffset(int sampleNo,  DataLazy::getPointOffset(int sampleNo,
1954                   int dataPointNo) const                   int dataPointNo) const
1955  {  {
1956    if (m_op==IDENTITY)    if (m_op==IDENTITY)
1957    {    {
1958      return m_id->getPointOffset(sampleNo,dataPointNo);          return m_id->getPointOffset(sampleNo,dataPointNo);
1959    }    }
1960    if (m_readytype=='E')    if (m_readytype=='E')
1961    {    {
# Line 2476  DataLazy::getPointOffset(int sampleNo, Line 1963  DataLazy::getPointOffset(int sampleNo,
1963      // so we only need to know which child to ask      // so we only need to know which child to ask
1964      if (m_left->m_readytype=='E')      if (m_left->m_readytype=='E')
1965      {      {
1966      return m_left->getPointOffset(sampleNo,dataPointNo);          return m_left->getPointOffset(sampleNo,dataPointNo);
1967      }      }
1968      else      else
1969      {      {
1970      return m_right->getPointOffset(sampleNo,dataPointNo);          return m_right->getPointOffset(sampleNo,dataPointNo);
1971      }      }
1972    }    }
1973    if (m_readytype=='C')    if (m_readytype=='C')
1974    {    {
1975      return m_left->getPointOffset(sampleNo,dataPointNo); // which child doesn't matter          return m_left->getPointOffset(sampleNo,dataPointNo); // which child doesn't matter
1976    }    }
1977    throw DataException("Programmer error - getPointOffset on lazy data may require collapsing (but this object is marked const).");    throw DataException("Programmer error - getPointOffset on lazy data may require collapsing (but this object is marked const).");
1978  }  }
# Line 2495  DataLazy::getPointOffset(int sampleNo, Line 1982  DataLazy::getPointOffset(int sampleNo,
1982  void  void
1983  DataLazy::setToZero()  DataLazy::setToZero()
1984  {  {
1985  //   DataTypes::ValueType v(getNoValues(),0);  //   DataTypes::RealVectorType v(getNoValues(),0);
1986  //   m_id=DataReady_ptr(new DataConstant(getFunctionSpace(),getShape(),v));  //   m_id=DataReady_ptr(new DataConstant(getFunctionSpace(),getShape(),v));
1987  //   m_op=IDENTITY;  //   m_op=IDENTITY;
1988  //   m_right.reset();    //   m_right.reset();  
# Line 2503  DataLazy::setToZero() Line 1990  DataLazy::setToZero()
1990  //   m_readytype='C';  //   m_readytype='C';
1991  //   m_buffsRequired=1;  //   m_buffsRequired=1;
1992    
1993    privdebug=privdebug;  // to stop the compiler complaining about unused privdebug    (void)privdebug;  // to stop the compiler complaining about unused privdebug
1994    throw DataException("Programmer error - setToZero not supported for DataLazy (DataLazy objects should be read only).");    throw DataException("Programmer error - setToZero not supported for DataLazy (DataLazy objects should be read only).");
1995  }  }
1996    
1997  bool  bool
1998  DataLazy::actsExpanded() const  DataLazy::actsExpanded() const
1999  {  {
2000      return (m_readytype=='E');          return (m_readytype=='E');
2001  }  }
2002    
2003  }   // end namespace  } // end namespace
2004    

Legend:
Removed from v.2514  
changed lines
  Added in v.6469

  ViewVC Help
Powered by ViewVC 1.1.26