/[escript]/branches/clazy/escriptcore/src/DataLazy.cpp
ViewVC logotype

Diff of /branches/clazy/escriptcore/src/DataLazy.cpp

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

trunk/escript/src/DataLazy.cpp revision 2147 by jfenwick, Wed Dec 10 04:41:26 2008 UTC trunk/escriptcore/src/DataLazy.cpp revision 6144 by caltinay, Wed Apr 6 05:25:13 2016 UTC
# Line 1  Line 1 
1    
2  /*******************************************************  /*****************************************************************************
3  *  *
4  * Copyright (c) 2003-2008 by University of Queensland  * Copyright (c) 2003-2016 by The University of Queensland
5  * Earth Systems Science Computational Center (ESSCC)  * http://www.uq.edu.au
 * http://www.uq.edu.au/esscc  
6  *  *
7  * Primary Business: Queensland, Australia  * Primary Business: Queensland, Australia
8  * Licensed under the Open Software License version 3.0  * Licensed under the Apache License, version 2.0
9  * http://www.opensource.org/licenses/osl-3.0.php  * http://www.apache.org/licenses/LICENSE-2.0
10  *  *
11  *******************************************************/  * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12    * Development 2012-2013 by School of Earth Sciences
13    * Development from 2014 by Centre for Geoscience Computing (GeoComp)
14    *
15    *****************************************************************************/
16    
17  #include "DataLazy.h"  #include "DataLazy.h"
 #ifdef USE_NETCDF  
 #include <netcdfcpp.h>  
 #endif  
 #ifdef PASO_MPI  
 #include <mpi.h>  
 #endif  
 #ifdef _OPENMP  
 #include <omp.h>  
 #endif  
 #include "FunctionSpace.h"  
 #include "DataTypes.h"  
18  #include "Data.h"  #include "Data.h"
19  #include "UnaryFuncs.h"     // for escript::fsign  #include "DataTypes.h"
20    #include "EscriptParams.h"
21    #include "FunctionSpace.h"
22  #include "Utils.h"  #include "Utils.h"
23    #include "DataVectorOps.h"
24    
25    #include <iomanip> // for some fancy formatting in debug
26    
27  // #define LAZYDEBUG(X) X;  using namespace escript::DataTypes;
28    
29    #define NO_ARG
30    
31    // #define LAZYDEBUG(X) if (privdebug){X;}
32  #define LAZYDEBUG(X)  #define LAZYDEBUG(X)
33    namespace
34    {
35    bool privdebug=false;
36    
37    #define ENABLEDEBUG privdebug=true;
38    #define DISABLEDEBUG privdebug=false;
39    }
40    
41    // #define SIZELIMIT if ((m_height>escript::escriptParams.getTOO_MANY_LEVELS()) || (m_children>escript::escriptParams.getTOO_MANY_NODES())) {cerr << "\n!!!!!!! SIZE LIMIT EXCEEDED " << m_children << ";" << m_height << endl << toString() << endl;resolveToIdentity();}
42    
43    // #define SIZELIMIT if ((m_height>escript::escriptParams.getTOO_MANY_LEVELS()) || (m_children>escript::escriptParams.getTOO_MANY_NODES())) {cerr << "SIZE LIMIT EXCEEDED " << m_height << endl;resolveToIdentity();}
44    
45    
46    #define SIZELIMIT if (m_height>escript::escriptParams.getTOO_MANY_LEVELS())  {if (escript::escriptParams.getLAZY_VERBOSE()){cerr << "SIZE LIMIT EXCEEDED height=" << m_height << endl;}resolveToIdentity();}
47    
48  /*  /*
49  How does DataLazy work?  How does DataLazy work?
# Line 42  A special operation, IDENTITY, stores an Line 56  A special operation, IDENTITY, stores an
56  This means that all "internal" nodes in the structure are instances of DataLazy.  This means that all "internal" nodes in the structure are instances of DataLazy.
57    
58  Each operation has a string representation as well as an opgroup - eg G_IDENTITY, G_BINARY, ...  Each operation has a string representation as well as an opgroup - eg G_IDENTITY, G_BINARY, ...
59  Note that IDENITY is not considered a unary operation.  Note that IDENTITY is not considered a unary operation.
60    
61  I am avoiding calling the structure formed a tree because it is not guaranteed to be one (eg c=a+a).  I am avoiding calling the structure formed a tree because it is not guaranteed to be one (eg c=a+a).
62  It must however form a DAG (directed acyclic graph).  It must however form a DAG (directed acyclic graph).
# Line 50  I will refer to individual DataLazy obje Line 64  I will refer to individual DataLazy obje
64    
65  Each node also stores:  Each node also stores:
66  - m_readytype \in {'E','T','C','?'} ~ indicates what sort of DataReady would be produced if the expression was  - m_readytype \in {'E','T','C','?'} ~ indicates what sort of DataReady would be produced if the expression was
67      evaluated.          evaluated.
68  - m_buffsrequired ~ the larged number of samples which would need to be kept simultaneously in order to  - m_buffsrequired ~ the large number of samples which would need to be kept simultaneously in order to
69      evaluate the expression.          evaluate the expression.
70  - m_samplesize ~ the number of doubles stored in a sample.  - m_samplesize ~ the number of doubles stored in a sample.
71    
72  When a new node is created, the above values are computed based on the values in the child nodes.  When a new node is created, the above values are computed based on the values in the child nodes.
# Line 93  namespace escript Line 107  namespace escript
107  namespace  namespace
108  {  {
109    
110    
111    // enabling this will print out when ever the maximum stacksize used by resolve increases
112    // it assumes _OPENMP is also in use
113    //#define LAZY_STACK_PROF
114    
115    
116    
117    #ifndef _OPENMP
118      #ifdef LAZY_STACK_PROF
119      #undef LAZY_STACK_PROF
120      #endif
121    #endif
122    
123    
124    #ifdef LAZY_STACK_PROF
125    std::vector<void*> stackstart(getNumberOfThreads());
126    std::vector<void*> stackend(getNumberOfThreads());
127    size_t maxstackuse=0;
128    #endif
129    
130  enum ES_opgroup  enum ES_opgroup
131  {  {
132     G_UNKNOWN,     G_UNKNOWN,
133     G_IDENTITY,     G_IDENTITY,
134     G_BINARY,        // pointwise operations with two arguments     G_BINARY,            // pointwise operations with two arguments
135     G_UNARY,     // pointwise operations with one argument     G_UNARY,             // pointwise operations with one argument
136     G_UNARY_P,       // pointwise operations with one argument, requiring a parameter     G_UNARY_P,           // pointwise operations with one argument, requiring a parameter
137     G_NP1OUT,        // non-pointwise op with one output     G_NP1OUT,            // non-pointwise op with one output
138     G_NP1OUT_P,      // non-pointwise op with one output requiring a parameter     G_NP1OUT_P,          // non-pointwise op with one output requiring a parameter
139     G_TENSORPROD     // general tensor product     G_TENSORPROD,        // general tensor product
140       G_NP1OUT_2P,         // non-pointwise op with one output requiring two params
141       G_REDUCTION,         // non-pointwise unary op with a scalar output
142       G_CONDEVAL
143  };  };
144    
145    
146    
147    
148  string ES_opstrings[]={"UNKNOWN","IDENTITY","+","-","*","/","^",  string ES_opstrings[]={"UNKNOWN","IDENTITY","+","-","*","/","^",
149              "sin","cos","tan",                          "sin","cos","tan",
150              "asin","acos","atan","sinh","cosh","tanh","erf",                          "asin","acos","atan","sinh","cosh","tanh","erf",
151              "asinh","acosh","atanh",                          "asinh","acosh","atanh",
152              "log10","log","sign","abs","neg","pos","exp","sqrt",                          "log10","log","sign","abs","neg","pos","exp","sqrt",
153              "1/","where>0","where<0","where>=0","where<=0", "where<>0","where=0",                          "1/","where>0","where<0","where>=0","where<=0", "where<>0","where=0",
154              "symmetric","nonsymmetric",                          "symmetric","antisymmetric",
155              "prod",                          "prod",
156              "transpose", "trace"};                          "transpose", "trace",
157  int ES_opcount=40;                          "swapaxes",
158                            "minval", "maxval",
159                            "condEval",
160                            "hermitian","antihermitian"
161    };
162    int ES_opcount=46;
163  ES_opgroup opgroups[]={G_UNKNOWN,G_IDENTITY,G_BINARY,G_BINARY,G_BINARY,G_BINARY, G_BINARY,  ES_opgroup opgroups[]={G_UNKNOWN,G_IDENTITY,G_BINARY,G_BINARY,G_BINARY,G_BINARY, G_BINARY,
164              G_UNARY,G_UNARY,G_UNARY, //10                          G_UNARY,G_UNARY,G_UNARY, //10
165              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,    // 17                          G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,        // 17
166              G_UNARY,G_UNARY,G_UNARY,                    // 20                          G_UNARY,G_UNARY,G_UNARY,                                        // 20
167              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,    // 28                          G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,        // 28
168              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY, G_UNARY_P, G_UNARY_P,      // 35                          G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY, G_UNARY_P, G_UNARY_P,          // 35
169              G_NP1OUT,G_NP1OUT,                          G_NP1OUT,G_NP1OUT,
170              G_TENSORPROD,                          G_TENSORPROD,
171              G_NP1OUT_P, G_NP1OUT_P};                          G_NP1OUT_P, G_NP1OUT_P,
172                            G_NP1OUT_2P,
173                            G_REDUCTION, G_REDUCTION,
174                            G_CONDEVAL,
175                            G_UNARY,G_UNARY
176    };
177  inline  inline
178  ES_opgroup  ES_opgroup
179  getOpgroup(ES_optype op)  getOpgroup(ES_optype op)
# Line 138  getOpgroup(ES_optype op) Line 185  getOpgroup(ES_optype op)
185  FunctionSpace  FunctionSpace
186  resultFS(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)  resultFS(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
187  {  {
188      // perhaps this should call interpolate and throw or something?          // perhaps this should call interpolate and throw or something?
189      // maybe we need an interpolate node -          // maybe we need an interpolate node -
190      // that way, if interpolate is required in any other op we can just throw a          // that way, if interpolate is required in any other op we can just throw a
191      // programming error exception.          // programming error exception.
192    
193    FunctionSpace l=left->getFunctionSpace();    FunctionSpace l=left->getFunctionSpace();
194    FunctionSpace r=right->getFunctionSpace();    FunctionSpace r=right->getFunctionSpace();
195    if (l!=r)    if (l!=r)
196    {    {
197      if (r.probeInterpolation(l))      signed char res=r.getDomain()->preferredInterpolationOnDomain(r.getTypeCode(), l.getTypeCode());
198        if (res==1)
199      {      {
200      return l;          return l;
201      }      }
202      if (l.probeInterpolation(r))      if (res==-1)
203      {      {
204      return r;          return r;
205      }      }
206      throw DataException("Cannot interpolate between the FunctionSpaces given for operation "+opToString(op)+".");      throw DataException("Cannot interpolate between the FunctionSpaces given for operation "+opToString(op)+".");
207    }    }
# Line 165  resultFS(DataAbstract_ptr left, DataAbst Line 213  resultFS(DataAbstract_ptr left, DataAbst
213  DataTypes::ShapeType  DataTypes::ShapeType
214  resultShape(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)  resultShape(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
215  {  {
216      if (left->getShape()!=right->getShape())          if (left->getShape()!=right->getShape())
217      {          {
218        if ((getOpgroup(op)!=G_BINARY) && (getOpgroup(op)!=G_NP1OUT))            if ((getOpgroup(op)!=G_BINARY) && (getOpgroup(op)!=G_NP1OUT))
219        {            {
220          throw DataException("Shapes not the name - shapes must match for (point)binary operations.");                  throw DataException("Shapes not the name - shapes must match for (point)binary operations.");
221        }            }
222        if (left->getRank()==0)   // we need to allow scalar * anything  
223        {            if (left->getRank()==0)       // we need to allow scalar * anything
224          return right->getShape();            {
225        }                  return right->getShape();
226        if (right->getRank()==0)            }
227        {            if (right->getRank()==0)
228          return left->getShape();            {
229        }                  return left->getShape();
230        throw DataException("Shapes not the same - arguments must have matching shapes (or be scalars) for (point)binary operations on lazy data.");            }
231      }            throw DataException("Shapes not the same - arguments must have matching shapes (or be scalars) for (point)binary operations on lazy data.");
232      return left->getShape();          }
233            return left->getShape();
234  }  }
235    
236  // return the shape for "op left"  // return the shape for "op left"
237    
238  DataTypes::ShapeType  DataTypes::ShapeType
239  resultShape(DataAbstract_ptr left, ES_optype op)  resultShape(DataAbstract_ptr left, ES_optype op, int axis_offset)
240    {
241            switch(op)
242            {
243            case TRANS:
244               {                    // for the scoping of variables
245                    const DataTypes::ShapeType& s=left->getShape();
246                    DataTypes::ShapeType sh;
247                    int rank=left->getRank();
248                    if (axis_offset<0 || axis_offset>rank)
249                    {
250                stringstream e;
251                e << "Error - Data::transpose must have 0 <= axis_offset <= rank=" << rank;
252                throw DataException(e.str());
253            }
254            for (int i=0; i<rank; i++)
255                    {
256                       int index = (axis_offset+i)%rank;
257               sh.push_back(s[index]); // Append to new shape
258            }
259                    return sh;
260               }
261            break;
262            case TRACE:
263               {
264                    int rank=left->getRank();
265                    if (rank<2)
266                    {
267                       throw DataException("Trace can only be computed for objects with rank 2 or greater.");
268                    }
269                    if ((axis_offset>rank-2) || (axis_offset<0))
270                    {
271                       throw DataException("Trace: axis offset must lie between 0 and rank-2 inclusive.");
272                    }
273                    if (rank==2)
274                    {
275                       return DataTypes::scalarShape;
276                    }
277                    else if (rank==3)
278                    {
279                       DataTypes::ShapeType sh;
280                       if (axis_offset==0)
281                       {
282                            sh.push_back(left->getShape()[2]);
283                       }
284                       else         // offset==1
285                       {
286                            sh.push_back(left->getShape()[0]);
287                       }
288                       return sh;
289                    }
290                    else if (rank==4)
291                    {
292                       DataTypes::ShapeType sh;
293                       const DataTypes::ShapeType& s=left->getShape();
294                       if (axis_offset==0)
295                       {
296                            sh.push_back(s[2]);
297                            sh.push_back(s[3]);
298                       }
299                       else if (axis_offset==1)
300                       {
301                            sh.push_back(s[0]);
302                            sh.push_back(s[3]);
303                       }
304                       else         // offset==2
305                       {
306                            sh.push_back(s[0]);
307                            sh.push_back(s[1]);
308                       }
309                       return sh;
310                    }
311                    else            // unknown rank
312                    {
313                       throw DataException("Error - Data::trace can only be calculated for rank 2, 3 or 4 object.");
314                    }
315               }
316            break;
317            default:
318            throw DataException("Programmer error - resultShape(left,op) can't compute shapes for operator "+opToString(op)+".");
319            }
320    }
321    
322    DataTypes::ShapeType
323    SwapShape(DataAbstract_ptr left, const int axis0, const int axis1)
324  {  {
325      switch(op)       // This code taken from the Data.cpp swapaxes() method
326      {       // Some of the checks are probably redundant here
327          case TRANS:       int axis0_tmp,axis1_tmp;
328          return left->getShape();       const DataTypes::ShapeType& s=left->getShape();
329      break;       DataTypes::ShapeType out_shape;
330      case TRACE:       // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
331          return DataTypes::scalarShape;       // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
332      break;       int rank=left->getRank();
333          default:       if (rank<2) {
334      throw DataException("Programmer error - resultShape(left,op) can't compute shapes for operator "+opToString(op)+".");          throw DataException("Error - Data::swapaxes argument must have at least rank 2.");
335      }       }
336         if (axis0<0 || axis0>rank-1) {
337            stringstream e;
338            e << "Error - Data::swapaxes: axis0 must be between 0 and rank-1=" << (rank-1);
339            throw DataException(e.str());
340         }
341         if (axis1<0 || axis1>rank-1) {
342            stringstream e;
343            e << "Error - Data::swapaxes: axis1 must be between 0 and rank-1=" << (rank-1);
344            throw DataException(e.str());
345         }
346         if (axis0 == axis1) {
347             throw DataException("Error - Data::swapaxes: axis indices must be different.");
348         }
349         if (axis0 > axis1) {
350             axis0_tmp=axis1;
351             axis1_tmp=axis0;
352         } else {
353             axis0_tmp=axis0;
354             axis1_tmp=axis1;
355         }
356         for (int i=0; i<rank; i++) {
357           if (i == axis0_tmp) {
358              out_shape.push_back(s[axis1_tmp]);
359           } else if (i == axis1_tmp) {
360              out_shape.push_back(s[axis0_tmp]);
361           } else {
362              out_shape.push_back(s[i]);
363           }
364         }
365        return out_shape;
366  }  }
367    
368    
369  // determine the output shape for the general tensor product operation  // determine the output shape for the general tensor product operation
370  // the additional parameters return information required later for the product  // the additional parameters return information required later for the product
371  // the majority of this code is copy pasted from C_General_Tensor_Product  // the majority of this code is copy pasted from C_General_Tensor_Product
372  DataTypes::ShapeType  DataTypes::ShapeType
373  GTPShape(DataAbstract_ptr left, DataAbstract_ptr right, int axis_offset, int transpose, int& SL, int& SM, int& SR)  GTPShape(DataAbstract_ptr left, DataAbstract_ptr right, int axis_offset, int transpose, int& SL, int& SM, int& SR)
374  {  {
375                
376    // Get rank and shape of inputs    // Get rank and shape of inputs
377    int rank0 = left->getRank();    int rank0 = left->getRank();
378    int rank1 = right->getRank();    int rank1 = right->getRank();
# Line 217  GTPShape(DataAbstract_ptr left, DataAbst Line 381  GTPShape(DataAbstract_ptr left, DataAbst
381    
382    // Prepare for the loops of the product and verify compatibility of shapes    // Prepare for the loops of the product and verify compatibility of shapes
383    int start0=0, start1=0;    int start0=0, start1=0;
384    if (transpose == 0)       {}    if (transpose == 0)           {}
385    else if (transpose == 1)  { start0 = axis_offset; }    else if (transpose == 1)      { start0 = axis_offset; }
386    else if (transpose == 2)  { start1 = rank1-axis_offset; }    else if (transpose == 2)      { start1 = rank1-axis_offset; }
387    else              { throw DataException("DataLazy GeneralTensorProduct Constructor: Error - transpose should be 0, 1 or 2"); }    else                          { throw DataException("DataLazy GeneralTensorProduct Constructor: Error - transpose should be 0, 1 or 2"); }
388    
389    if (rank0<axis_offset)    if (rank0<axis_offset)
390    {    {
391      throw DataException("DataLazy GeneralTensorProduct Constructor: Error - rank of left < axisoffset");          throw DataException("DataLazy GeneralTensorProduct Constructor: Error - rank of left < axisoffset");
392    }    }
393    
394    // Adjust the shapes for transpose    // Adjust the shapes for transpose
395    DataTypes::ShapeType tmpShape0(rank0);    // pre-sizing the vectors rather    DataTypes::ShapeType tmpShape0(rank0);        // pre-sizing the vectors rather
396    DataTypes::ShapeType tmpShape1(rank1);    // than using push_back    DataTypes::ShapeType tmpShape1(rank1);        // than using push_back
397    for (int i=0; i<rank0; i++)   { tmpShape0[i]=shape0[(i+start0)%rank0]; }    for (int i=0; i<rank0; i++)   { tmpShape0[i]=shape0[(i+start0)%rank0]; }
398    for (int i=0; i<rank1; i++)   { tmpShape1[i]=shape1[(i+start1)%rank1]; }    for (int i=0; i<rank1; i++)   { tmpShape1[i]=shape1[(i+start1)%rank1]; }
399    
400    // Prepare for the loops of the product    // Prepare for the loops of the product
401    SL=1, SM=1, SR=1;    SL=1, SM=1, SR=1;
402    for (int i=0; i<rank0-axis_offset; i++)   {    for (int i=0; i<rank0-axis_offset; i++)       {
403      SL *= tmpShape0[i];      SL *= tmpShape0[i];
404    }    }
405    for (int i=rank0-axis_offset; i<rank0; i++)   {    for (int i=rank0-axis_offset; i<rank0; i++)   {
406      if (tmpShape0[i] != tmpShape1[i-(rank0-axis_offset)]) {      if (tmpShape0[i] != tmpShape1[i-(rank0-axis_offset)]) {
407        throw DataException("C_GeneralTensorProduct: Error - incompatible shapes");        throw DataException("C_GeneralTensorProduct: Error - incompatible shapes");
408      }      }
409      SM *= tmpShape0[i];      SM *= tmpShape0[i];
410    }    }
411    for (int i=axis_offset; i<rank1; i++)     {    for (int i=axis_offset; i<rank1; i++)         {
412      SR *= tmpShape1[i];      SR *= tmpShape1[i];
413    }    }
414    
415    // Define the shape of the output (rank of shape is the sum of the loop ranges below)    // Define the shape of the output (rank of shape is the sum of the loop ranges below)
416    DataTypes::ShapeType shape2(rank0+rank1-2*axis_offset);      DataTypes::ShapeType shape2(rank0+rank1-2*axis_offset);      
417    {         // block to limit the scope of out_index    {                     // block to limit the scope of out_index
418       int out_index=0;       int out_index=0;
419       for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z       for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z
420       for (int i=axis_offset; i<rank1; i++, ++out_index)   { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z       for (int i=axis_offset; i<rank1; i++, ++out_index)   { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z
# Line 266  GTPShape(DataAbstract_ptr left, DataAbst Line 430  GTPShape(DataAbstract_ptr left, DataAbst
430    return shape2;    return shape2;
431  }  }
432    
433    }       // end anonymous namespace
 // determine the number of points in the result of "left op right"  
 // note that determining the resultLength for G_TENSORPROD is more complex and will not be processed here  
 // size_t  
 // resultLength(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)  
 // {  
 //    switch (getOpgroup(op))  
 //    {  
 //    case G_BINARY: return left->getLength();  
 //    case G_UNARY: return left->getLength();  
 //    case G_NP1OUT: return left->getLength();  
 //    default:  
 //  throw DataException("Programmer Error - attempt to getLength() for operator "+opToString(op)+".");  
 //    }  
 // }  
   
 // determine the number of samples requires to evaluate an expression combining left and right  
 // NP1OUT needs an extra buffer because we can't write the answers over the top of the input.  
 // The same goes for G_TENSORPROD  
 int  
 calcBuffs(const DataLazy_ptr& left, const DataLazy_ptr& right, ES_optype op)  
 {  
    switch(getOpgroup(op))  
    {  
    case G_IDENTITY: return 1;  
    case G_BINARY: return max(left->getBuffsRequired(),right->getBuffsRequired()+1);  
    case G_UNARY:  
    case G_UNARY_P: return max(left->getBuffsRequired(),1);  
    case G_NP1OUT: return 1+max(left->getBuffsRequired(),1);  
    case G_NP1OUT_P: return 1+max(left->getBuffsRequired(),1);  
    case G_TENSORPROD: return 1+max(left->getBuffsRequired(),right->getBuffsRequired()+1);  
    default:  
     throw DataException("Programmer Error - attempt to calcBuffs() for operator "+opToString(op)+".");  
    }  
 }  
   
   
 }   // end anonymous namespace  
434    
435    
436    
# Line 318  opToString(ES_optype op) Line 445  opToString(ES_optype op)
445    return ES_opstrings[op];    return ES_opstrings[op];
446  }  }
447    
448    void DataLazy::LazyNodeSetup()
449    {
450    #ifdef _OPENMP
451        int numthreads=omp_get_max_threads();
452        m_samples.resize(numthreads*m_samplesize);
453        m_sampleids=new int[numthreads];
454        for (int i=0;i<numthreads;++i)
455        {
456            m_sampleids[i]=-1;  
457        }
458    #else
459        m_samples.resize(m_samplesize);
460        m_sampleids=new int[1];
461        m_sampleids[0]=-1;
462    #endif  // _OPENMP
463    }
464    
465    
466    // Creates an identity node
467  DataLazy::DataLazy(DataAbstract_ptr p)  DataLazy::DataLazy(DataAbstract_ptr p)
468      : parent(p->getFunctionSpace(),p->getShape()),          : parent(p->getFunctionSpace(),p->getShape())
469      m_op(IDENTITY),          ,m_sampleids(0),
470      m_axis_offset(0),          m_samples(1)
     m_transpose(0),  
     m_SL(0), m_SM(0), m_SR(0)  
471  {  {
472     if (p->isLazy())     if (p->isLazy())
473     {     {
474      // I don't want identity of Lazy.          // I don't want identity of Lazy.
475      // Question: Why would that be so bad?          // Question: Why would that be so bad?
476      // Answer: We assume that the child of ID is something we can call getVector on          // Answer: We assume that the child of ID is something we can call getVector on
477      throw DataException("Programmer error - attempt to create identity from a DataLazy.");          throw DataException("Programmer error - attempt to create identity from a DataLazy.");
478     }     }
479     else     else
480     {     {
481      m_id=dynamic_pointer_cast<DataReady>(p);          DataReady_ptr dr=dynamic_pointer_cast<DataReady>(p);
482      if(p->isConstant()) {m_readytype='C';}          makeIdentity(dr);
483      else if(p->isExpanded()) {m_readytype='E';}  LAZYDEBUG(cout << "Wrapping " << dr.get() << " id=" << m_id.get() << endl;)
     else if (p->isTagged()) {m_readytype='T';}  
     else {throw DataException("Unknown DataReady instance in DataLazy constructor.");}  
484     }     }
    m_buffsRequired=1;  
    m_samplesize=getNumDPPSample()*getNoValues();  
    m_maxsamplesize=m_samplesize;  
485  LAZYDEBUG(cout << "(1)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(1)Lazy created with " << m_samplesize << endl;)
486  }  }
487    
   
   
   
488  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op)  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op)
489      : parent(left->getFunctionSpace(),left->getShape()),          : parent(left->getFunctionSpace(),(getOpgroup(op)!=G_REDUCTION)?left->getShape():DataTypes::scalarShape),
490      m_op(op),          m_op(op),
491      m_axis_offset(0),          m_axis_offset(0),
492      m_transpose(0),          m_transpose(0),
493      m_SL(0), m_SM(0), m_SR(0)          m_SL(0), m_SM(0), m_SR(0)
494  {  {
495     if ((getOpgroup(op)!=G_UNARY) && (getOpgroup(op)!=G_NP1OUT))     if ((getOpgroup(op)!=G_UNARY) && (getOpgroup(op)!=G_NP1OUT) && (getOpgroup(op)!=G_REDUCTION))
496     {     {
497      throw DataException("Programmer error - constructor DataLazy(left, op) will only process UNARY operations.");          throw DataException("Programmer error - constructor DataLazy(left, op) will only process UNARY operations.");
498     }     }
499    
500     DataLazy_ptr lleft;     DataLazy_ptr lleft;
501     if (!left->isLazy())     if (!left->isLazy())
502     {     {
503      lleft=DataLazy_ptr(new DataLazy(left));          lleft=DataLazy_ptr(new DataLazy(left));
504     }     }
505     else     else
506     {     {
507      lleft=dynamic_pointer_cast<DataLazy>(left);          lleft=dynamic_pointer_cast<DataLazy>(left);
508     }     }
509     m_readytype=lleft->m_readytype;     m_readytype=lleft->m_readytype;
510     m_left=lleft;     m_left=lleft;
    m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point  
511     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
512     m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());     m_children=m_left->m_children+1;
513       m_height=m_left->m_height+1;
514       LazyNodeSetup();
515       SIZELIMIT
516  }  }
517    
518    
519  // In this constructor we need to consider interpolation  // In this constructor we need to consider interpolation
520  DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)  DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
521      : parent(resultFS(left,right,op), resultShape(left,right,op)),          : parent(resultFS(left,right,op), resultShape(left,right,op)),
522      m_op(op),          m_op(op),
523      m_SL(0), m_SM(0), m_SR(0)          m_SL(0), m_SM(0), m_SR(0)
524  {  {
525    LAZYDEBUG(cout << "Forming operator with " << left.get() << " " << right.get() << endl;)
526     if ((getOpgroup(op)!=G_BINARY))     if ((getOpgroup(op)!=G_BINARY))
527     {     {
528      throw DataException("Programmer error - constructor DataLazy(left, right, op) will only process BINARY operations.");          throw DataException("Programmer error - constructor DataLazy(left, right, op) will only process BINARY operations.");
529     }     }
530    
531     if (getFunctionSpace()!=left->getFunctionSpace())    // left needs to be interpolated     if (getFunctionSpace()!=left->getFunctionSpace())    // left needs to be interpolated
532     {     {
533      FunctionSpace fs=getFunctionSpace();          FunctionSpace fs=getFunctionSpace();
534      Data ltemp(left);          Data ltemp(left);
535      Data tmp(ltemp,fs);          Data tmp(ltemp,fs);
536      left=tmp.borrowDataPtr();          left=tmp.borrowDataPtr();
537     }     }
538     if (getFunctionSpace()!=right->getFunctionSpace())   // right needs to be interpolated     if (getFunctionSpace()!=right->getFunctionSpace())   // right needs to be interpolated
539     {     {
540      Data tmp(Data(right),getFunctionSpace());          Data tmp(Data(right),getFunctionSpace());
541      right=tmp.borrowDataPtr();          right=tmp.borrowDataPtr();
542    LAZYDEBUG(cout << "Right interpolation required " << right.get() << endl;)
543     }     }
544     left->operandCheck(*right);     left->operandCheck(*right);
545    
546     if (left->isLazy())          // the children need to be DataLazy. Wrap them in IDENTITY if required     if (left->isLazy())                  // the children need to be DataLazy. Wrap them in IDENTITY if required
547     {     {
548      m_left=dynamic_pointer_cast<DataLazy>(left);          m_left=dynamic_pointer_cast<DataLazy>(left);
549    LAZYDEBUG(cout << "Left is " << m_left->toString() << endl;)
550     }     }
551     else     else
552     {     {
553      m_left=DataLazy_ptr(new DataLazy(left));          m_left=DataLazy_ptr(new DataLazy(left));
554    LAZYDEBUG(cout << "Left " << left.get() << " wrapped " << m_left->m_id.get() << endl;)
555     }     }
556     if (right->isLazy())     if (right->isLazy())
557     {     {
558      m_right=dynamic_pointer_cast<DataLazy>(right);          m_right=dynamic_pointer_cast<DataLazy>(right);
559    LAZYDEBUG(cout << "Right is " << m_right->toString() << endl;)
560     }     }
561     else     else
562     {     {
563      m_right=DataLazy_ptr(new DataLazy(right));          m_right=DataLazy_ptr(new DataLazy(right));
564    LAZYDEBUG(cout << "Right " << right.get() << " wrapped " << m_right->m_id.get() << endl;)
565     }     }
566     char lt=m_left->m_readytype;     char lt=m_left->m_readytype;
567     char rt=m_right->m_readytype;     char rt=m_right->m_readytype;
568     if (lt=='E' || rt=='E')     if (lt=='E' || rt=='E')
569     {     {
570      m_readytype='E';          m_readytype='E';
571     }     }
572     else if (lt=='T' || rt=='T')     else if (lt=='T' || rt=='T')
573     {     {
574      m_readytype='T';          m_readytype='T';
575     }     }
576     else     else
577     {     {
578      m_readytype='C';          m_readytype='C';
579     }     }
580     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
581     m_maxsamplesize=max(max(m_samplesize,m_right->getMaxSampleSize()),m_left->getMaxSampleSize());       m_children=m_left->m_children+m_right->m_children+2;
582     m_buffsRequired=calcBuffs(m_left, m_right,m_op);     m_height=max(m_left->m_height,m_right->m_height)+1;
583       LazyNodeSetup();
584       SIZELIMIT
585  LAZYDEBUG(cout << "(3)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(3)Lazy created with " << m_samplesize << endl;)
586  }  }
587    
588  DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op, int axis_offset, int transpose)  DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op, int axis_offset, int transpose)
589      : parent(resultFS(left,right,op), GTPShape(left,right, axis_offset, transpose, m_SL,m_SM, m_SR)),          : parent(resultFS(left,right,op), GTPShape(left,right, axis_offset, transpose, m_SL,m_SM, m_SR)),
590      m_op(op),          m_op(op),
591      m_axis_offset(axis_offset),          m_axis_offset(axis_offset),
592      m_transpose(transpose)          m_transpose(transpose)
593  {  {
594     if ((getOpgroup(op)!=G_TENSORPROD))     if ((getOpgroup(op)!=G_TENSORPROD))
595     {     {
596      throw DataException("Programmer error - constructor DataLazy(left, right, op, ax, tr) will only process BINARY operations which require parameters.");          throw DataException("Programmer error - constructor DataLazy(left, right, op, ax, tr) will only process BINARY operations which require parameters.");
597     }     }
598     if ((transpose>2) || (transpose<0))     if ((transpose>2) || (transpose<0))
599     {     {
600      throw DataException("DataLazy GeneralTensorProduct constructor: Error - transpose should be 0, 1 or 2");          throw DataException("DataLazy GeneralTensorProduct constructor: Error - transpose should be 0, 1 or 2");
601     }     }
602     if (getFunctionSpace()!=left->getFunctionSpace())    // left needs to be interpolated     if (getFunctionSpace()!=left->getFunctionSpace())    // left needs to be interpolated
603     {     {
604      FunctionSpace fs=getFunctionSpace();          FunctionSpace fs=getFunctionSpace();
605      Data ltemp(left);          Data ltemp(left);
606      Data tmp(ltemp,fs);          Data tmp(ltemp,fs);
607      left=tmp.borrowDataPtr();          left=tmp.borrowDataPtr();
608     }     }
609     if (getFunctionSpace()!=right->getFunctionSpace())   // right needs to be interpolated     if (getFunctionSpace()!=right->getFunctionSpace())   // right needs to be interpolated
610     {     {
611      Data tmp(Data(right),getFunctionSpace());          Data tmp(Data(right),getFunctionSpace());
612      right=tmp.borrowDataPtr();          right=tmp.borrowDataPtr();
613     }     }
614     left->operandCheck(*right);  //    left->operandCheck(*right);
615    
616     if (left->isLazy())          // the children need to be DataLazy. Wrap them in IDENTITY if required     if (left->isLazy())                  // the children need to be DataLazy. Wrap them in IDENTITY if required
617     {     {
618      m_left=dynamic_pointer_cast<DataLazy>(left);          m_left=dynamic_pointer_cast<DataLazy>(left);
619     }     }
620     else     else
621     {     {
622      m_left=DataLazy_ptr(new DataLazy(left));          m_left=DataLazy_ptr(new DataLazy(left));
623     }     }
624     if (right->isLazy())     if (right->isLazy())
625     {     {
626      m_right=dynamic_pointer_cast<DataLazy>(right);          m_right=dynamic_pointer_cast<DataLazy>(right);
627     }     }
628     else     else
629     {     {
630      m_right=DataLazy_ptr(new DataLazy(right));          m_right=DataLazy_ptr(new DataLazy(right));
631     }     }
632     char lt=m_left->m_readytype;     char lt=m_left->m_readytype;
633     char rt=m_right->m_readytype;     char rt=m_right->m_readytype;
634     if (lt=='E' || rt=='E')     if (lt=='E' || rt=='E')
635     {     {
636      m_readytype='E';          m_readytype='E';
637     }     }
638     else if (lt=='T' || rt=='T')     else if (lt=='T' || rt=='T')
639     {     {
640      m_readytype='T';          m_readytype='T';
641     }     }
642     else     else
643     {     {
644      m_readytype='C';          m_readytype='C';
645     }     }
646     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
647     m_maxsamplesize=max(max(m_samplesize,m_right->getMaxSampleSize()),m_left->getMaxSampleSize());       m_children=m_left->m_children+m_right->m_children+2;
648     m_buffsRequired=calcBuffs(m_left, m_right,m_op);     m_height=max(m_left->m_height,m_right->m_height)+1;
649       LazyNodeSetup();
650       SIZELIMIT
651  LAZYDEBUG(cout << "(4)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(4)Lazy created with " << m_samplesize << endl;)
652  }  }
653    
654    
655  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, int axis_offset)  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, int axis_offset)
656      : parent(left->getFunctionSpace(), resultShape(left,op)),          : parent(left->getFunctionSpace(), resultShape(left,op, axis_offset)),
657      m_op(op),          m_op(op),
658      m_axis_offset(axis_offset),          m_axis_offset(axis_offset),
659      m_transpose(0),          m_transpose(0),
660      m_tol(0)          m_tol(0)
661  {  {
662     if ((getOpgroup(op)!=G_NP1OUT_P))     if ((getOpgroup(op)!=G_NP1OUT_P))
663     {     {
664      throw DataException("Programmer error - constructor DataLazy(left, op, ax) will only process UNARY operations which require parameters.");          throw DataException("Programmer error - constructor DataLazy(left, op, ax) will only process UNARY operations which require parameters.");
665     }     }
666     DataLazy_ptr lleft;     DataLazy_ptr lleft;
667     if (!left->isLazy())     if (!left->isLazy())
668     {     {
669      lleft=DataLazy_ptr(new DataLazy(left));          lleft=DataLazy_ptr(new DataLazy(left));
670     }     }
671     else     else
672     {     {
673      lleft=dynamic_pointer_cast<DataLazy>(left);          lleft=dynamic_pointer_cast<DataLazy>(left);
674     }     }
675     m_readytype=lleft->m_readytype;     m_readytype=lleft->m_readytype;
676     m_left=lleft;     m_left=lleft;
    m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point  
677     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
678     m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());     m_children=m_left->m_children+1;
679       m_height=m_left->m_height+1;
680       LazyNodeSetup();
681       SIZELIMIT
682  LAZYDEBUG(cout << "(5)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(5)Lazy created with " << m_samplesize << endl;)
683  }  }
684    
685  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, double tol)  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, double tol)
686      : parent(left->getFunctionSpace(), left->getShape()),          : parent(left->getFunctionSpace(), left->getShape()),
687      m_op(op),          m_op(op),
688      m_axis_offset(0),          m_axis_offset(0),
689      m_transpose(0),          m_transpose(0),
690      m_tol(tol)          m_tol(tol)
691  {  {
692     if ((getOpgroup(op)!=G_UNARY_P))     if ((getOpgroup(op)!=G_UNARY_P))
693     {     {
694      throw DataException("Programmer error - constructor DataLazy(left, op, tol) will only process UNARY operations which require parameters.");          throw DataException("Programmer error - constructor DataLazy(left, op, tol) will only process UNARY operations which require parameters.");
695     }     }
696     DataLazy_ptr lleft;     DataLazy_ptr lleft;
697     if (!left->isLazy())     if (!left->isLazy())
698     {     {
699      lleft=DataLazy_ptr(new DataLazy(left));          lleft=DataLazy_ptr(new DataLazy(left));
700     }     }
701     else     else
702     {     {
703      lleft=dynamic_pointer_cast<DataLazy>(left);          lleft=dynamic_pointer_cast<DataLazy>(left);
704     }     }
705     m_readytype=lleft->m_readytype;     m_readytype=lleft->m_readytype;
706     m_left=lleft;     m_left=lleft;
    m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point  
707     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
708     m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());     m_children=m_left->m_children+1;
709       m_height=m_left->m_height+1;
710       LazyNodeSetup();
711       SIZELIMIT
712  LAZYDEBUG(cout << "(6)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(6)Lazy created with " << m_samplesize << endl;)
713  }  }
714    
715  DataLazy::~DataLazy()  
716    DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, const int axis0, const int axis1)
717            : parent(left->getFunctionSpace(), SwapShape(left,axis0,axis1)),
718            m_op(op),
719            m_axis_offset(axis0),
720            m_transpose(axis1),
721            m_tol(0)
722  {  {
723       if ((getOpgroup(op)!=G_NP1OUT_2P))
724       {
725            throw DataException("Programmer error - constructor DataLazy(left, op, tol) will only process UNARY operations which require two integer parameters.");
726       }
727       DataLazy_ptr lleft;
728       if (!left->isLazy())
729       {
730            lleft=DataLazy_ptr(new DataLazy(left));
731       }
732       else
733       {
734            lleft=dynamic_pointer_cast<DataLazy>(left);
735       }
736       m_readytype=lleft->m_readytype;
737       m_left=lleft;
738       m_samplesize=getNumDPPSample()*getNoValues();
739       m_children=m_left->m_children+1;
740       m_height=m_left->m_height+1;
741       LazyNodeSetup();
742       SIZELIMIT
743    LAZYDEBUG(cout << "(7)Lazy created with " << m_samplesize << endl;)
744  }  }
745    
746    
747  int  namespace
 DataLazy::getBuffsRequired() const  
748  {  {
749      return m_buffsRequired;  
750        inline int max3(int a, int b, int c)
751        {
752            int t=(a>b?a:b);
753            return (t>c?t:c);
754    
755        }
756  }  }
757    
758    DataLazy::DataLazy(DataAbstract_ptr mask, DataAbstract_ptr left, DataAbstract_ptr right/*, double tol*/)
759            : parent(left->getFunctionSpace(), left->getShape()),
760            m_op(CONDEVAL),
761            m_axis_offset(0),
762            m_transpose(0),
763            m_tol(0)
764    {
765    
766  size_t     DataLazy_ptr lmask;
767  DataLazy::getMaxSampleSize() const     DataLazy_ptr lleft;
768       DataLazy_ptr lright;
769       if (!mask->isLazy())
770       {
771            lmask=DataLazy_ptr(new DataLazy(mask));
772       }
773       else
774       {
775            lmask=dynamic_pointer_cast<DataLazy>(mask);
776       }
777       if (!left->isLazy())
778       {
779            lleft=DataLazy_ptr(new DataLazy(left));
780       }
781       else
782       {
783            lleft=dynamic_pointer_cast<DataLazy>(left);
784       }
785       if (!right->isLazy())
786       {
787            lright=DataLazy_ptr(new DataLazy(right));
788       }
789       else
790       {
791            lright=dynamic_pointer_cast<DataLazy>(right);
792       }
793       m_readytype=lmask->m_readytype;
794       if ((lleft->m_readytype!=lright->m_readytype) || (lmask->m_readytype!=lleft->m_readytype))
795       {
796            throw DataException("Programmer Error - condEval arguments must have the same readytype");
797       }
798       m_left=lleft;
799       m_right=lright;
800       m_mask=lmask;
801       m_samplesize=getNumDPPSample()*getNoValues();
802       m_children=m_left->m_children+m_right->m_children+m_mask->m_children+1;
803       m_height=max3(m_left->m_height,m_right->m_height,m_mask->m_height)+1;
804       LazyNodeSetup();
805       SIZELIMIT
806    LAZYDEBUG(cout << "(8)Lazy created with " << m_samplesize << endl;)
807    }
808    
809    
810    
811    DataLazy::~DataLazy()
812  {  {
813      return m_maxsamplesize;     delete[] m_sampleids;
814  }  }
815    
816    
817  /*  /*
818    \brief Evaluates the expression using methods on Data.    \brief Evaluates the expression using methods on Data.
819    This does the work for the collapse method.    This does the work for the collapse method.
820    For reasons of efficiency do not call this method on DataExpanded nodes.    For reasons of efficiency do not call this method on DataExpanded nodes.
821  */  */
822  DataReady_ptr  DataReady_ptr
823  DataLazy::collapseToReady()  DataLazy::collapseToReady() const
824  {  {
825    if (m_readytype=='E')    if (m_readytype=='E')
826    { // this is more an efficiency concern than anything else    {     // this is more an efficiency concern than anything else
827      throw DataException("Programmer Error - do not use collapse on Expanded data.");      throw DataException("Programmer Error - do not use collapse on Expanded data.");
828    }    }
829    if (m_op==IDENTITY)    if (m_op==IDENTITY)
# Line 606  DataLazy::collapseToReady() Line 841  DataLazy::collapseToReady()
841    switch(m_op)    switch(m_op)
842    {    {
843      case ADD:      case ADD:
844      result=left+right;          result=left+right;
845      break;          break;
846      case SUB:            case SUB:          
847      result=left-right;          result=left-right;
848      break;          break;
849      case MUL:            case MUL:          
850      result=left*right;          result=left*right;
851      break;          break;
852      case DIV:            case DIV:          
853      result=left/right;          result=left/right;
854      break;          break;
855      case SIN:      case SIN:
856      result=left.sin();            result=left.sin();      
857      break;          break;
858      case COS:      case COS:
859      result=left.cos();          result=left.cos();
860      break;          break;
861      case TAN:      case TAN:
862      result=left.tan();          result=left.tan();
863      break;          break;
864      case ASIN:      case ASIN:
865      result=left.asin();          result=left.asin();
866      break;          break;
867      case ACOS:      case ACOS:
868      result=left.acos();          result=left.acos();
869      break;          break;
870      case ATAN:      case ATAN:
871      result=left.atan();          result=left.atan();
872      break;          break;
873      case SINH:      case SINH:
874      result=left.sinh();          result=left.sinh();
875      break;          break;
876      case COSH:      case COSH:
877      result=left.cosh();          result=left.cosh();
878      break;          break;
879      case TANH:      case TANH:
880      result=left.tanh();          result=left.tanh();
881      break;          break;
882      case ERF:      case ERF:
883      result=left.erf();          result=left.erf();
884      break;          break;
885     case ASINH:     case ASINH:
886      result=left.asinh();          result=left.asinh();
887      break;          break;
888     case ACOSH:     case ACOSH:
889      result=left.acosh();          result=left.acosh();
890      break;          break;
891     case ATANH:     case ATANH:
892      result=left.atanh();          result=left.atanh();
893      break;          break;
894      case LOG10:      case LOG10:
895      result=left.log10();          result=left.log10();
896      break;          break;
897      case LOG:      case LOG:
898      result=left.log();          result=left.log();
899      break;          break;
900      case SIGN:      case SIGN:
901      result=left.sign();          result=left.sign();
902      break;          break;
903      case ABS:      case ABS:
904      result=left.abs();          result=left.abs();
905      break;          break;
906      case NEG:      case NEG:
907      result=left.neg();          result=left.neg();
908      break;          break;
909      case POS:      case POS:
910      // it doesn't mean anything for delayed.          // it doesn't mean anything for delayed.
911      // it will just trigger a deep copy of the lazy object          // it will just trigger a deep copy of the lazy object
912      throw DataException("Programmer error - POS not supported for lazy data.");          throw DataException("Programmer error - POS not supported for lazy data.");
913      break;          break;
914      case EXP:      case EXP:
915      result=left.exp();          result=left.exp();
916      break;          break;
917      case SQRT:      case SQRT:
918      result=left.sqrt();          result=left.sqrt();
919      break;          break;
920      case RECIP:      case RECIP:
921      result=left.oneOver();          result=left.oneOver();
922      break;          break;
923      case GZ:      case GZ:
924      result=left.wherePositive();          result=left.wherePositive();
925      break;          break;
926      case LZ:      case LZ:
927      result=left.whereNegative();          result=left.whereNegative();
928      break;          break;
929      case GEZ:      case GEZ:
930      result=left.whereNonNegative();          result=left.whereNonNegative();
931      break;          break;
932      case LEZ:      case LEZ:
933      result=left.whereNonPositive();          result=left.whereNonPositive();
934      break;          break;
935      case NEZ:      case NEZ:
936      result=left.whereNonZero(m_tol);          result=left.whereNonZero(m_tol);
937      break;          break;
938      case EZ:      case EZ:
939      result=left.whereZero(m_tol);          result=left.whereZero(m_tol);
940      break;          break;
941      case SYM:      case SYM:
942      result=left.symmetric();          result=left.symmetric();
943      break;          break;
944      case NSYM:      case NSYM:
945      result=left.nonsymmetric();          result=left.antisymmetric();
946      break;          break;
947      case PROD:      case PROD:
948      result=C_GeneralTensorProduct(left,right,m_axis_offset, m_transpose);          result=C_GeneralTensorProduct(left,right,m_axis_offset, m_transpose);
949      break;          break;
950      case TRANS:      case TRANS:
951      result=left.transpose(m_axis_offset);          result=left.transpose(m_axis_offset);
952      break;          break;
953      case TRACE:      case TRACE:
954      result=left.trace(m_axis_offset);          result=left.trace(m_axis_offset);
955            break;
956        case SWAP:
957            result=left.swapaxes(m_axis_offset, m_transpose);
958            break;
959        case MINVAL:
960            result=left.minval();
961            break;
962        case MAXVAL:
963            result=left.minval();
964            break;
965        case HER:
966        result=left.hermitian();
967      break;      break;
968      default:      default:
969      throw DataException("Programmer error - collapseToReady does not know how to resolve operator "+opToString(m_op)+".");          throw DataException("Programmer error - collapseToReady does not know how to resolve operator "+opToString(m_op)+".");
970    }    }
971    return result.borrowReadyPtr();    return result.borrowReadyPtr();
972  }  }
# Line 731  DataLazy::collapseToReady() Line 978  DataLazy::collapseToReady()
978     the purpose of using DataLazy in the first place).     the purpose of using DataLazy in the first place).
979  */  */
980  void  void
981  DataLazy::collapse()  DataLazy::collapse() const
982  {  {
983    if (m_op==IDENTITY)    if (m_op==IDENTITY)
984    {    {
985      return;          return;
986    }    }
987    if (m_readytype=='E')    if (m_readytype=='E')
988    { // this is more an efficiency concern than anything else    {     // this is more an efficiency concern than anything else
989      throw DataException("Programmer Error - do not use collapse on Expanded data.");      throw DataException("Programmer Error - do not use collapse on Expanded data.");
990    }    }
991    m_id=collapseToReady();    m_id=collapseToReady();
992    m_op=IDENTITY;    m_op=IDENTITY;
993  }  }
994    
995  /*  // The result will be stored in m_samples
996    \brief Compute the value of the expression (unary operation) for the given sample.  // The return value is a pointer to the DataVector, offset is the offset within the return value
997    \return Vector which stores the value of the subexpression for the given sample.  const DataTypes::RealVectorType*
998    \param v A vector to store intermediate results.  DataLazy::resolveNodeSample(int tid, int sampleNo, size_t& roffset) const
999    \param offset Index in v to begin storing results.  {
1000    \param sampleNo Sample number to evaluate.  LAZYDEBUG(cout << "Resolve sample " << toString() << endl;)
1001    \param roffset (output parameter) the offset in the return vector where the result begins.          // collapse so we have a 'E' node or an IDENTITY for some other type
1002      if (m_readytype!='E' && m_op!=IDENTITY)
1003    The return value will be an existing vector so do not deallocate it.    {
1004    If the result is stored in v it should be stored at the offset given.          collapse();
1005    Everything from offset to the end of v should be considered available for this method to use.    }
1006  */    if (m_op==IDENTITY)  
1007  DataTypes::ValueType*    {
1008  DataLazy::resolveUnary(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const      const RealVectorType& vec=m_id->getVectorRO();
1009        roffset=m_id->getPointOffset(sampleNo, 0);
1010    #ifdef LAZY_STACK_PROF
1011    int x;
1012    if (&x<stackend[omp_get_thread_num()])
1013  {  {
1014      // we assume that any collapsing has been done before we get here         stackend[omp_get_thread_num()]=&x;
1015      // since we only have one argument we don't need to think about only  }
1016      // processing single points.  #endif
1017        return &(vec);
1018      }
1019      if (m_readytype!='E')
1020      {
1021        throw DataException("Programmer Error - Collapse did not produce an expanded node.");
1022      }
1023      if (m_sampleids[tid]==sampleNo)
1024      {
1025            roffset=tid*m_samplesize;
1026            return &(m_samples);            // sample is already resolved
1027      }
1028      m_sampleids[tid]=sampleNo;
1029    
1030      switch (getOpgroup(m_op))
1031      {
1032      case G_UNARY:
1033      case G_UNARY_P: return resolveNodeUnary(tid, sampleNo, roffset);
1034      case G_BINARY: return resolveNodeBinary(tid, sampleNo, roffset);
1035      case G_NP1OUT: return resolveNodeNP1OUT(tid, sampleNo, roffset);
1036      case G_NP1OUT_P: return resolveNodeNP1OUT_P(tid, sampleNo, roffset);
1037      case G_TENSORPROD: return resolveNodeTProd(tid, sampleNo, roffset);
1038      case G_NP1OUT_2P: return resolveNodeNP1OUT_2P(tid, sampleNo, roffset);
1039      case G_REDUCTION: return resolveNodeReduction(tid, sampleNo, roffset);
1040      case G_CONDEVAL: return resolveNodeCondEval(tid, sampleNo, roffset);
1041      default:
1042        throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");
1043      }
1044    }
1045    
1046    const DataTypes::RealVectorType*
1047    DataLazy::resolveNodeUnary(int tid, int sampleNo, size_t& roffset) const
1048    {
1049            // we assume that any collapsing has been done before we get here
1050            // since we only have one argument we don't need to think about only
1051            // processing single points.
1052            // we will also know we won't get identity nodes
1053    if (m_readytype!='E')    if (m_readytype!='E')
1054    {    {
1055      throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");      throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");
1056    }    }
1057    const ValueType* vleft=m_left->resolveSample(v,offset,sampleNo,roffset);    if (m_op==IDENTITY)
1058    const double* left=&((*vleft)[roffset]);    {
1059    double* result=&(v[offset]);      throw DataException("Programmer error - resolveNodeUnary should not be called on identity nodes.");
1060    roffset=offset;    }
1061      const DataTypes::RealVectorType* leftres=m_left->resolveNodeSample(tid, sampleNo, roffset);
1062      const double* left=&((*leftres)[roffset]);
1063      roffset=m_samplesize*tid;
1064      double* result=&(m_samples[roffset]);
1065      escript::ESFunction operation=SINF;
1066    switch (m_op)    switch (m_op)
1067    {    {
1068      case SIN:        case SIN:
1069      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sin);      operation=SINF;
1070      break;      break;
1071      case COS:      case COS:
1072      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::cos);          operation=COSF;
1073      break;      break;
1074      case TAN:      case TAN:
1075      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::tan);          operation=TANF;
1076      break;      break;
1077      case ASIN:      case ASIN:
1078      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::asin);          operation=ASINF;
1079      break;      break;
1080      case ACOS:      case ACOS:
1081      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::acos);          operation=ACOSF;
1082      break;      break;
1083      case ATAN:      case ATAN:
1084      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::atan);          operation=ATANF;
1085      break;      break;
1086      case SINH:      case SINH:
1087      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sinh);          operation=SINHF;
1088      break;      break;
1089      case COSH:      case COSH:
1090      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::cosh);          operation=COSHF;
1091      break;      break;
1092      case TANH:      case TANH:
1093      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::tanh);          operation=TANHF;
1094      break;      break;
1095      case ERF:      case ERF:
1096  #if defined (_WIN32) && !defined(__INTEL_COMPILER)          operation=ERFF;
     throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::erf);  
1097      break;      break;
 #endif  
1098     case ASINH:     case ASINH:
1099  #if defined (_WIN32) && !defined(__INTEL_COMPILER)          operation=ASINHF;
     tensor_unary_operation(m_samplesize, left, result, escript::asinh_substitute);  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::asinh);  
 #endif    
1100      break;      break;
1101     case ACOSH:     case ACOSH:
1102  #if defined (_WIN32) && !defined(__INTEL_COMPILER)          operation=ACOSHF;
     tensor_unary_operation(m_samplesize, left, result, escript::acosh_substitute);  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::acosh);  
 #endif    
1103      break;      break;
1104     case ATANH:     case ATANH:
1105  #if defined (_WIN32) && !defined(__INTEL_COMPILER)          operation=ATANHF;
     tensor_unary_operation(m_samplesize, left, result, escript::atanh_substitute);  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::atanh);  
 #endif    
1106      break;      break;
1107      case LOG10:      case LOG10:
1108      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::log10);          operation=LOG10F;
1109      break;      break;
1110      case LOG:      case LOG:
1111      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::log);          operation=LOGF;
1112      break;      break;
1113      case SIGN:      case SIGN:
1114      tensor_unary_operation(m_samplesize, left, result, escript::fsign);          operation=SIGNF;
1115      break;      break;
1116      case ABS:      case ABS:
1117      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::fabs);          operation=ABSF;
1118      break;      break;
1119      case NEG:      case NEG:
1120      tensor_unary_operation(m_samplesize, left, result, negate<double>());          operation=NEGF;
1121      break;      break;
1122      case POS:      case POS:
1123      // it doesn't mean anything for delayed.          // it doesn't mean anything for delayed.
1124      // it will just trigger a deep copy of the lazy object          // it will just trigger a deep copy of the lazy object
1125      throw DataException("Programmer error - POS not supported for lazy data.");          throw DataException("Programmer error - POS not supported for lazy data.");
1126      break;          break;
1127      case EXP:      case EXP:
1128      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::exp);          operation=EXPF;
1129      break;      break;
1130      case SQRT:      case SQRT:
1131      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sqrt);          operation=SQRTF;
1132      break;      break;
1133      case RECIP:      case RECIP:
1134      tensor_unary_operation(m_samplesize, left, result, bind1st(divides<double>(),1.));          operation=INVF;
1135      break;      break;
1136      case GZ:      case GZ:
1137      tensor_unary_operation(m_samplesize, left, result, bind2nd(greater<double>(),0.0));          operation=GTZEROF;
1138      break;      break;
1139      case LZ:      case LZ:
1140      tensor_unary_operation(m_samplesize, left, result, bind2nd(less<double>(),0.0));          operation=LTZEROF;
1141      break;      break;
1142      case GEZ:      case GEZ:
1143      tensor_unary_operation(m_samplesize, left, result, bind2nd(greater_equal<double>(),0.0));          operation=GEZEROF;
1144      break;      break;
1145      case LEZ:      case LEZ:
1146      tensor_unary_operation(m_samplesize, left, result, bind2nd(less_equal<double>(),0.0));          operation=LEZEROF;
1147      break;      break;
1148  // There are actually G_UNARY_P but I don't see a compelling reason to treat them differently  // There are actually G_UNARY_P but I don't see a compelling reason to treat them differently
1149      case NEZ:      case NEZ:
1150      tensor_unary_operation(m_samplesize, left, result, bind2nd(AbsGT(),m_tol));          operation=NEQZEROF;
1151      break;      break;
1152      case EZ:      case EZ:
1153      tensor_unary_operation(m_samplesize, left, result, bind2nd(AbsLTE(),m_tol));          operation=EQZEROF;
1154      break;      break;
   
1155      default:      default:
1156      throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");          throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");
1157    }    }
1158    return &v;    tensor_unary_array_operation(m_samplesize,
1159                                 left,
1160                                 result,
1161                                 operation,
1162                                 m_tol);  
1163      return &(m_samples);
1164  }  }
1165    
1166    
1167    const DataTypes::RealVectorType*
1168    DataLazy::resolveNodeReduction(int tid, int sampleNo, size_t& roffset) const
1169    {
1170            // we assume that any collapsing has been done before we get here
1171            // since we only have one argument we don't need to think about only
1172            // processing single points.
1173            // we will also know we won't get identity nodes
1174      if (m_readytype!='E')
1175      {
1176        throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");
1177      }
1178      if (m_op==IDENTITY)
1179      {
1180        throw DataException("Programmer error - resolveNodeUnary should not be called on identity nodes.");
1181      }
1182      size_t loffset=0;
1183      const DataTypes::RealVectorType* leftres=m_left->resolveNodeSample(tid, sampleNo, loffset);
1184    
1185      roffset=m_samplesize*tid;
1186      unsigned int ndpps=getNumDPPSample();
1187      unsigned int psize=DataTypes::noValues(m_left->getShape());
1188      double* result=&(m_samples[roffset]);
1189      switch (m_op)
1190      {
1191        case MINVAL:
1192            {
1193              for (unsigned int z=0;z<ndpps;++z)
1194              {
1195                FMin op;
1196                *result=escript::reductionOpVector(*leftres, m_left->getShape(), loffset, op, numeric_limits<double>::max());
1197                loffset+=psize;
1198                result++;
1199              }
1200            }
1201            break;
1202        case MAXVAL:
1203            {
1204              for (unsigned int z=0;z<ndpps;++z)
1205              {
1206              FMax op;
1207              *result=escript::reductionOpVector(*leftres, m_left->getShape(), loffset, op, numeric_limits<double>::max()*-1);
1208              loffset+=psize;
1209              result++;
1210              }
1211            }
1212            break;
1213        default:
1214            throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");
1215      }
1216      return &(m_samples);
1217    }
1218    
1219    const DataTypes::RealVectorType*
1220    DataLazy::resolveNodeNP1OUT(int tid, int sampleNo, size_t& roffset) const
 /*  
   \brief Compute the value of the expression (unary operation) for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
 DataTypes::ValueType*  
 DataLazy::resolveNP1OUT(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const  
1221  {  {
1222      // we assume that any collapsing has been done before we get here          // we assume that any collapsing has been done before we get here
1223      // since we only have one argument we don't need to think about only          // since we only have one argument we don't need to think about only
1224      // processing single points.          // processing single points.
1225    if (m_readytype!='E')    if (m_readytype!='E')
1226    {    {
1227      throw DataException("Programmer error - resolveNP1OUT should only be called on expanded Data.");      throw DataException("Programmer error - resolveNodeNP1OUT should only be called on expanded Data.");
1228    }    }
1229      // since we can't write the result over the input, we need a result offset further along    if (m_op==IDENTITY)
1230    size_t subroffset=roffset+m_samplesize;    {
1231    const ValueType* vleft=m_left->resolveSample(v,offset,sampleNo,subroffset);      throw DataException("Programmer error - resolveNodeNP1OUT should not be called on identity nodes.");
1232    roffset=offset;    }
1233      size_t subroffset;
1234      const RealVectorType* leftres=m_left->resolveNodeSample(tid, sampleNo, subroffset);
1235      roffset=m_samplesize*tid;
1236      size_t loop=0;
1237      size_t numsteps=(m_readytype=='E')?getNumDPPSample():1;
1238      size_t step=getNoValues();
1239      size_t offset=roffset;
1240    switch (m_op)    switch (m_op)
1241    {    {
1242      case SYM:      case SYM:
1243      DataMaths::symmetric(*vleft,m_left->getShape(),subroffset, v, getShape(), offset);          for (loop=0;loop<numsteps;++loop)
1244      break;          {
1245                escript::symmetric(*leftres,m_left->getShape(),subroffset, m_samples, getShape(), offset);
1246                subroffset+=step;
1247                offset+=step;
1248            }
1249            break;
1250      case NSYM:      case NSYM:
1251      DataMaths::nonsymmetric(*vleft,m_left->getShape(),subroffset, v, getShape(), offset);          for (loop=0;loop<numsteps;++loop)
1252      break;          {
1253                escript::antisymmetric(*leftres,m_left->getShape(),subroffset, m_samples, getShape(), offset);
1254                subroffset+=step;
1255                offset+=step;
1256            }
1257            break;
1258      default:      default:
1259      throw DataException("Programmer error - resolveNP1OUT can not resolve operator "+opToString(m_op)+".");          throw DataException("Programmer error - resolveNP1OUT can not resolve operator "+opToString(m_op)+".");
1260    }    }
1261    return &v;    return &m_samples;
1262  }  }
1263    
1264  /*  const DataTypes::RealVectorType*
1265    \brief Compute the value of the expression (unary operation) for the given sample.  DataLazy::resolveNodeNP1OUT_P(int tid, int sampleNo, size_t& roffset) const
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
 DataTypes::ValueType*  
 DataLazy::resolveNP1OUT_P(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const  
1266  {  {
1267      // we assume that any collapsing has been done before we get here          // we assume that any collapsing has been done before we get here
1268      // since we only have one argument we don't need to think about only          // since we only have one argument we don't need to think about only
1269      // processing single points.          // processing single points.
1270    if (m_readytype!='E')    if (m_readytype!='E')
1271    {    {
1272      throw DataException("Programmer error - resolveNP1OUT_P should only be called on expanded Data.");      throw DataException("Programmer error - resolveNodeNP1OUT_P should only be called on expanded Data.");
1273    }    }
1274      // since we can't write the result over the input, we need a result offset further along    if (m_op==IDENTITY)
1275    size_t subroffset=roffset+m_samplesize;    {
1276    const ValueType* vleft=m_left->resolveSample(v,offset,sampleNo,subroffset);      throw DataException("Programmer error - resolveNodeNP1OUT_P should not be called on identity nodes.");
1277    roffset=offset;    }
1278      size_t subroffset;
1279      size_t offset;
1280      const RealVectorType* leftres=m_left->resolveNodeSample(tid, sampleNo, subroffset);
1281      roffset=m_samplesize*tid;
1282      offset=roffset;
1283      size_t loop=0;
1284      size_t numsteps=(m_readytype=='E')?getNumDPPSample():1;
1285      size_t outstep=getNoValues();
1286      size_t instep=m_left->getNoValues();
1287    switch (m_op)    switch (m_op)
1288    {    {
1289      case TRACE:      case TRACE:
1290           DataMaths::trace(*vleft,m_left->getShape(),subroffset, v,getShape(),offset,m_axis_offset);          for (loop=0;loop<numsteps;++loop)
1291      break;          {
1292                escript::trace(*leftres,m_left->getShape(),subroffset, m_samples ,getShape(),offset,m_axis_offset);
1293                subroffset+=instep;
1294                offset+=outstep;
1295            }
1296            break;
1297      case TRANS:      case TRANS:
1298           DataMaths::transpose(*vleft,m_left->getShape(),subroffset, v,getShape(),offset,m_axis_offset);          for (loop=0;loop<numsteps;++loop)
1299      break;          {
1300                escript::transpose(*leftres,m_left->getShape(),subroffset, m_samples, getShape(),offset,m_axis_offset);
1301                subroffset+=instep;
1302                offset+=outstep;
1303            }
1304            break;
1305      default:      default:
1306      throw DataException("Programmer error - resolveNP1OUTP can not resolve operator "+opToString(m_op)+".");          throw DataException("Programmer error - resolveNP1OUTP can not resolve operator "+opToString(m_op)+".");
1307    }    }
1308    return &v;    return &m_samples;
1309  }  }
1310    
1311    
1312  #define PROC_OP(TYPE,X)                               \  const DataTypes::RealVectorType*
1313      for (int i=0;i<steps;++i,resultp+=resultStep) \  DataLazy::resolveNodeNP1OUT_2P(int tid, int sampleNo, size_t& roffset) const
1314      { \  {
1315         tensor_binary_operation< TYPE >(chunksize, &((*left)[lroffset]), &((*right)[rroffset]), resultp, X); \    if (m_readytype!='E')
1316         lroffset+=leftStep; \    {
1317         rroffset+=rightStep; \      throw DataException("Programmer error - resolveNodeNP1OUT_2P should only be called on expanded Data.");
1318      }    }
1319      if (m_op==IDENTITY)
1320      {
1321        throw DataException("Programmer error - resolveNodeNP1OUT_2P should not be called on identity nodes.");
1322      }
1323      size_t subroffset;
1324      size_t offset;
1325      const RealVectorType* leftres=m_left->resolveNodeSample(tid, sampleNo, subroffset);
1326      roffset=m_samplesize*tid;
1327      offset=roffset;
1328      size_t loop=0;
1329      size_t numsteps=(m_readytype=='E')?getNumDPPSample():1;
1330      size_t outstep=getNoValues();
1331      size_t instep=m_left->getNoValues();
1332      switch (m_op)
1333      {
1334        case SWAP:
1335            for (loop=0;loop<numsteps;++loop)
1336            {
1337                escript::swapaxes(*leftres,m_left->getShape(),subroffset, m_samples, getShape(),offset, m_axis_offset, m_transpose);
1338                subroffset+=instep;
1339                offset+=outstep;
1340            }
1341            break;
1342        default:
1343            throw DataException("Programmer error - resolveNodeNP1OUT2P can not resolve operator "+opToString(m_op)+".");
1344      }
1345      return &m_samples;
1346    }
1347    
1348    const DataTypes::RealVectorType*
1349    DataLazy::resolveNodeCondEval(int tid, int sampleNo, size_t& roffset) const
1350    {
1351      if (m_readytype!='E')
1352      {
1353        throw DataException("Programmer error - resolveNodeCondEval should only be called on expanded Data.");
1354      }
1355      if (m_op!=CONDEVAL)
1356      {
1357        throw DataException("Programmer error - resolveNodeCondEval should only be called on CONDEVAL nodes.");
1358      }
1359      size_t subroffset;
1360    
1361      const RealVectorType* maskres=m_mask->resolveNodeSample(tid, sampleNo, subroffset);
1362      const RealVectorType* srcres=0;
1363      if ((*maskres)[subroffset]>0)
1364      {
1365            srcres=m_left->resolveNodeSample(tid, sampleNo, subroffset);
1366      }
1367      else
1368      {
1369            srcres=m_right->resolveNodeSample(tid, sampleNo, subroffset);
1370      }
1371    
1372      // Now we need to copy the result
1373    
1374      roffset=m_samplesize*tid;
1375      for (int i=0;i<m_samplesize;++i)
1376      {
1377            m_samples[roffset+i]=(*srcres)[subroffset+i];  
1378      }
1379    
1380      return &m_samples;
1381    }
1382    
 /*  
   \brief Compute the value of the expression (binary operation) for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
1383  // This method assumes that any subexpressions which evaluate to Constant or Tagged Data  // This method assumes that any subexpressions which evaluate to Constant or Tagged Data
1384  // have already been collapsed to IDENTITY. So we must have at least one expanded child.  // have already been collapsed to IDENTITY. So we must have at least one expanded child.
1385  // If both children are expanded, then we can process them in a single operation (we treat  // If both children are expanded, then we can process them in a single operation (we treat
# Line 998  DataLazy::resolveNP1OUT_P(ValueType& v, Line 1389  DataLazy::resolveNP1OUT_P(ValueType& v,
1389  // There is an additional complication when scalar operations are considered.  // There is an additional complication when scalar operations are considered.
1390  // For example, 2+Vector.  // For example, 2+Vector.
1391  // In this case each double within the point is treated individually  // In this case each double within the point is treated individually
1392  DataTypes::ValueType*  const DataTypes::RealVectorType*
1393  DataLazy::resolveBinary(ValueType& v,  size_t offset, int sampleNo, size_t& roffset) const  DataLazy::resolveNodeBinary(int tid, int sampleNo, size_t& roffset) const
1394  {  {
1395  LAZYDEBUG(cout << "Resolve binary: " << toString() << endl;)  LAZYDEBUG(cout << "Resolve binary: " << toString() << endl;)
1396    
1397    size_t lroffset=0, rroffset=0;    // offsets in the left and right result vectors    size_t lroffset=0, rroffset=0;        // offsets in the left and right result vectors
1398      // first work out which of the children are expanded          // first work out which of the children are expanded
1399    bool leftExp=(m_left->m_readytype=='E');    bool leftExp=(m_left->m_readytype=='E');
1400    bool rightExp=(m_right->m_readytype=='E');    bool rightExp=(m_right->m_readytype=='E');
1401    if (!leftExp && !rightExp)    if (!leftExp && !rightExp)
1402    {    {
1403      throw DataException("Programmer Error - please use collapse if neither argument has type 'E'.");          throw DataException("Programmer Error - please use collapse if neither argument has type 'E'.");
1404    }    }
1405    bool leftScalar=(m_left->getRank()==0);    bool leftScalar=(m_left->getRank()==0);
1406    bool rightScalar=(m_right->getRank()==0);    bool rightScalar=(m_right->getRank()==0);
1407    bool bigloops=((leftExp && rightExp) || (!leftExp && !rightExp)); // is processing in single step?    if ((m_left->getRank()!=m_right->getRank()) && (!leftScalar && !rightScalar))
1408    int steps=(bigloops?1:getNumDPPSample());    {
1409    size_t chunksize=(bigloops? m_samplesize : getNoValues());    // if bigloops, pretend the whole sample is a datapoint          throw DataException("resolveBinary - ranks of arguments must match unless one of them is scalar.");
1410    if (m_left->getRank()!=m_right->getRank())    // need to deal with scalar * ? ops    }
1411    {    size_t leftsize=m_left->getNoValues();
1412      if (!leftScalar && !rightScalar)    size_t rightsize=m_right->getNoValues();
1413      {    size_t chunksize=1;                   // how many doubles will be processed in one go
1414         throw DataException("resolveBinary - ranks of arguments must match unless one of them is scalar.");    int leftstep=0;               // how far should the left offset advance after each step
1415      }    int rightstep=0;
1416      steps=getNumDPPSample()*max(m_left->getNoValues(),m_right->getNoValues());    int numsteps=0;               // total number of steps for the inner loop
1417      chunksize=1;    // for scalar    int oleftstep=0;      // the o variables refer to the outer loop
1418    }        int orightstep=0;     // The outer loop is only required in cases where there is an extended scalar
1419    int leftStep=((leftExp && (!rightExp || rightScalar))? m_right->getNoValues() : 0);    int onumsteps=1;
1420    int rightStep=((rightExp && (!leftExp || leftScalar))? m_left->getNoValues() : 0);    
1421    int resultStep=max(leftStep,rightStep);   // only one (at most) should be !=0    bool LES=(leftExp && leftScalar);     // Left is an expanded scalar
1422      // Get the values of sub-expressions    bool RES=(rightExp && rightScalar);
1423    const ValueType* left=m_left->resolveSample(v,offset,sampleNo,lroffset);    bool LS=(!leftExp && leftScalar);     // left is a single scalar
1424    const ValueType* right=m_right->resolveSample(v,offset+m_samplesize,sampleNo,rroffset); // Note    bool RS=(!rightExp && rightScalar);
1425      // the right child starts further along.    bool LN=(!leftExp && !leftScalar);    // left is a single non-scalar
1426    double* resultp=&(v[offset]);     // results are stored at the vector offset we recieved    bool RN=(!rightExp && !rightScalar);
1427      bool LEN=(leftExp && !leftScalar);    // left is an expanded non-scalar
1428      bool REN=(rightExp && !rightScalar);
1429    
1430      if ((LES && RES) || (LEN && REN))     // both are Expanded scalars or both are expanded non-scalars
1431      {
1432            chunksize=m_left->getNumDPPSample()*leftsize;
1433            leftstep=0;
1434            rightstep=0;
1435            numsteps=1;
1436      }
1437      else if (LES || RES)
1438      {
1439            chunksize=1;
1440            if (LES)                // left is an expanded scalar
1441            {
1442                    if (RS)
1443                    {
1444                       leftstep=1;
1445                       rightstep=0;
1446                       numsteps=m_left->getNumDPPSample();
1447                    }
1448                    else            // RN or REN
1449                    {
1450                       leftstep=0;
1451                       oleftstep=1;
1452                       rightstep=1;
1453                       orightstep=(RN ? -(int)rightsize : 0);
1454                       numsteps=rightsize;
1455                       onumsteps=m_left->getNumDPPSample();
1456                    }
1457            }
1458            else            // right is an expanded scalar
1459            {
1460                    if (LS)
1461                    {
1462                       rightstep=1;
1463                       leftstep=0;
1464                       numsteps=m_right->getNumDPPSample();
1465                    }
1466                    else
1467                    {
1468                       rightstep=0;
1469                       orightstep=1;
1470                       leftstep=1;
1471                       oleftstep=(LN ? -(int)leftsize : 0);
1472                       numsteps=leftsize;
1473                       onumsteps=m_right->getNumDPPSample();
1474                    }
1475            }
1476      }
1477      else  // this leaves (LEN, RS), (LEN, RN) and their transposes
1478      {
1479            if (LEN)        // and Right will be a single value
1480            {
1481                    chunksize=rightsize;
1482                    leftstep=rightsize;
1483                    rightstep=0;
1484                    numsteps=m_left->getNumDPPSample();
1485                    if (RS)
1486                    {
1487                       numsteps*=leftsize;
1488                    }
1489            }
1490            else    // REN
1491            {
1492                    chunksize=leftsize;
1493                    rightstep=leftsize;
1494                    leftstep=0;
1495                    numsteps=m_right->getNumDPPSample();
1496                    if (LS)
1497                    {
1498                       numsteps*=rightsize;
1499                    }
1500            }
1501      }
1502    
1503      int resultStep=max(leftstep,rightstep);       // only one (at most) should be !=0
1504            // Get the values of sub-expressions
1505      const RealVectorType* left=m_left->resolveNodeSample(tid,sampleNo,lroffset);      
1506      const RealVectorType* right=m_right->resolveNodeSample(tid,sampleNo,rroffset);
1507    LAZYDEBUG(cout << "Post sub calls in " << toString() << endl;)
1508    LAZYDEBUG(cout << "shapes=" << DataTypes::shapeToString(m_left->getShape()) << "," << DataTypes::shapeToString(m_right->getShape()) << endl;)
1509    LAZYDEBUG(cout << "chunksize=" << chunksize << endl << "leftstep=" << leftstep << " rightstep=" << rightstep;)
1510    LAZYDEBUG(cout << " numsteps=" << numsteps << endl << "oleftstep=" << oleftstep << " orightstep=" << orightstep;)
1511    LAZYDEBUG(cout << "onumsteps=" << onumsteps << endl;)
1512    LAZYDEBUG(cout << " DPPS=" << m_left->getNumDPPSample() << "," <<m_right->getNumDPPSample() << endl;)
1513    LAZYDEBUG(cout << "" << LS << RS << LN << RN << LES << RES <<LEN << REN <<   endl;)
1514    
1515    LAZYDEBUG(cout << "Left res["<< lroffset<< "]=" << (*left)[lroffset] << endl;)
1516    LAZYDEBUG(cout << "Right res["<< rroffset<< "]=" << (*right)[rroffset] << endl;)
1517    
1518    
1519      roffset=m_samplesize*tid;
1520      double* resultp=&(m_samples[roffset]);                // results are stored at the vector offset we received
1521    switch(m_op)    switch(m_op)
1522    {    {
1523      case ADD:      case ADD:
1524          PROC_OP(NO_ARG,plus<double>());          //PROC_OP(NO_ARG,plus<double>());
1525      break;        escript::binaryOpVectorLazyHelper<real_t, real_t, real_t>(resultp,
1526                 &(*left)[0],
1527                 &(*right)[0],
1528                 chunksize,
1529                 onumsteps,
1530                 numsteps,
1531                 resultStep,
1532                 leftstep,
1533                 rightstep,
1534                 oleftstep,
1535                 orightstep,
1536                 lroffset,
1537                 rroffset,
1538                 escript::ESFunction::PLUSF);  
1539            break;
1540      case SUB:      case SUB:
1541      PROC_OP(NO_ARG,minus<double>());        escript::binaryOpVectorLazyHelper<real_t, real_t, real_t>(resultp,
1542      break;               &(*left)[0],
1543                 &(*right)[0],
1544                 chunksize,
1545                 onumsteps,
1546                 numsteps,
1547                 resultStep,
1548                 leftstep,
1549                 rightstep,
1550                 oleftstep,
1551                 orightstep,
1552                 lroffset,
1553                 rroffset,
1554                 escript::ESFunction::MINUSF);        
1555            //PROC_OP(NO_ARG,minus<double>());
1556            break;
1557      case MUL:      case MUL:
1558      PROC_OP(NO_ARG,multiplies<double>());          //PROC_OP(NO_ARG,multiplies<double>());
1559      break;        escript::binaryOpVectorLazyHelper<real_t, real_t, real_t>(resultp,
1560                 &(*left)[0],
1561                 &(*right)[0],
1562                 chunksize,
1563                 onumsteps,
1564                 numsteps,
1565                 resultStep,
1566                 leftstep,
1567                 rightstep,
1568                 oleftstep,
1569                 orightstep,
1570                 lroffset,
1571                 rroffset,
1572                 escript::ESFunction::MULTIPLIESF);      
1573            break;
1574      case DIV:      case DIV:
1575      PROC_OP(NO_ARG,divides<double>());          //PROC_OP(NO_ARG,divides<double>());
1576      break;        escript::binaryOpVectorLazyHelper<real_t, real_t, real_t>(resultp,
1577                 &(*left)[0],
1578                 &(*right)[0],
1579                 chunksize,
1580                 onumsteps,
1581                 numsteps,
1582                 resultStep,
1583                 leftstep,
1584                 rightstep,
1585                 oleftstep,
1586                 orightstep,
1587                 lroffset,
1588                 rroffset,
1589                 escript::ESFunction::DIVIDESF);          
1590            break;
1591      case POW:      case POW:
1592         PROC_OP(double (double,double),::pow);         //PROC_OP(double (double,double),::pow);
1593      break;        escript::binaryOpVectorLazyHelper<real_t, real_t, real_t>(resultp,
1594                 &(*left)[0],
1595                 &(*right)[0],
1596                 chunksize,
1597                 onumsteps,
1598                 numsteps,
1599                 resultStep,
1600                 leftstep,
1601                 rightstep,
1602                 oleftstep,
1603                 orightstep,
1604                 lroffset,
1605                 rroffset,
1606                 escript::ESFunction::POWF);          
1607            break;
1608      default:      default:
1609      throw DataException("Programmer error - resolveBinary can not resolve operator "+opToString(m_op)+".");          throw DataException("Programmer error - resolveBinary can not resolve operator "+opToString(m_op)+".");
1610    }    }
1611    roffset=offset;    LAZYDEBUG(cout << "Result res[" << roffset<< "]" << m_samples[roffset] << endl;)
1612    return &v;    return &m_samples;
1613  }  }
1614    
1615    
 /*  
   \brief Compute the value of the expression (tensor product) for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
1616  // This method assumes that any subexpressions which evaluate to Constant or Tagged Data  // This method assumes that any subexpressions which evaluate to Constant or Tagged Data
1617  // have already been collapsed to IDENTITY. So we must have at least one expanded child.  // have already been collapsed to IDENTITY. So we must have at least one expanded child.
1618  // unlike the other resolve helpers, we must treat these datapoints separately.  // unlike the other resolve helpers, we must treat these datapoints separately.
1619  DataTypes::ValueType*  const DataTypes::RealVectorType*
1620  DataLazy::resolveTProd(ValueType& v,  size_t offset, int sampleNo, size_t& roffset) const  DataLazy::resolveNodeTProd(int tid, int sampleNo, size_t& roffset) const
1621  {  {
1622  LAZYDEBUG(cout << "Resolve TensorProduct: " << toString() << endl;)  LAZYDEBUG(cout << "Resolve TensorProduct: " << toString() << endl;)
1623    
1624    size_t lroffset=0, rroffset=0;    // offsets in the left and right result vectors    size_t lroffset=0, rroffset=0;        // offsets in the left and right result vectors
1625      // first work out which of the children are expanded          // first work out which of the children are expanded
1626    bool leftExp=(m_left->m_readytype=='E');    bool leftExp=(m_left->m_readytype=='E');
1627    bool rightExp=(m_right->m_readytype=='E');    bool rightExp=(m_right->m_readytype=='E');
1628    int steps=getNumDPPSample();    int steps=getNumDPPSample();
1629    int leftStep=((leftExp && !rightExp)? m_right->getNoValues() : 0);    int leftStep=(leftExp? m_left->getNoValues() : 0);            // do not have scalars as input to this method
1630    int rightStep=((rightExp && !leftExp)? m_left->getNoValues() : 0);    int rightStep=(rightExp?m_right->getNoValues() : 0);
1631    int resultStep=max(leftStep,rightStep);   // only one (at most) should be !=0  
1632      // Get the values of sub-expressions (leave a gap of one sample for the result).    int resultStep=getNoValues();
1633    const ValueType* left=m_left->resolveSample(v,offset+m_samplesize,sampleNo,lroffset);    roffset=m_samplesize*tid;
1634    const ValueType* right=m_right->resolveSample(v,offset+2*m_samplesize,sampleNo,rroffset);    size_t offset=roffset;
1635    double* resultp=&(v[offset]);     // results are stored at the vector offset we recieved  
1636      const RealVectorType* left=m_left->resolveNodeSample(tid, sampleNo, lroffset);
1637    
1638      const RealVectorType* right=m_right->resolveNodeSample(tid, sampleNo, rroffset);
1639    
1640    LAZYDEBUG(cerr << "[Left shape]=" << DataTypes::shapeToString(m_left->getShape()) << "\n[Right shape]=" << DataTypes::shapeToString(m_right->getShape()) << " result=" <<DataTypes::shapeToString(getShape()) <<  endl;
1641    cout << getNoValues() << endl;)
1642    
1643    
1644    LAZYDEBUG(cerr << "Post sub calls: " << toString() << endl;)
1645    LAZYDEBUG(cout << "LeftExp=" << leftExp << " rightExp=" << rightExp << endl;)
1646    LAZYDEBUG(cout << "LeftR=" << m_left->getRank() << " rightExp=" << m_right->getRank() << endl;)
1647    LAZYDEBUG(cout << "LeftSize=" << m_left->getNoValues() << " RightSize=" << m_right->getNoValues() << endl;)
1648    LAZYDEBUG(cout << "m_samplesize=" << m_samplesize << endl;)
1649    LAZYDEBUG(cout << "outputshape=" << DataTypes::shapeToString(getShape()) << endl;)
1650    LAZYDEBUG(cout << "DPPS=" << m_right->getNumDPPSample() <<"."<<endl;)
1651    
1652      double* resultp=&(m_samples[offset]);         // results are stored at the vector offset we received
1653    switch(m_op)    switch(m_op)
1654    {    {
1655      case PROD:      case PROD:
1656      for (int i=0;i<steps;++i,resultp+=resultStep)          for (int i=0;i<steps;++i,resultp+=resultStep)
1657      {          {
1658            const double *ptr_0 = &((*left)[lroffset]);            const double *ptr_0 = &((*left)[lroffset]);
1659            const double *ptr_1 = &((*right)[rroffset]);            const double *ptr_1 = &((*right)[rroffset]);
1660            matrix_matrix_product(m_SL, m_SM, m_SR, ptr_0, ptr_1, resultp, m_transpose);  
1661        lroffset+=leftStep;  LAZYDEBUG(cout << DataTypes::pointToString(*left, m_left->getShape(),lroffset,"LEFT") << endl;)
1662        rroffset+=rightStep;  LAZYDEBUG(cout << DataTypes::pointToString(*right,m_right->getShape(),rroffset, "RIGHT") << endl;)
1663      }  
1664      break;            matrix_matrix_product(m_SL, m_SM, m_SR, ptr_0, ptr_1, resultp, m_transpose);
1665    
1666              lroffset+=leftStep;
1667              rroffset+=rightStep;
1668            }
1669            break;
1670      default:      default:
1671      throw DataException("Programmer error - resolveTProduct can not resolve operator "+opToString(m_op)+".");          throw DataException("Programmer error - resolveTProduct can not resolve operator "+opToString(m_op)+".");
1672    }    }
1673    roffset=offset;    roffset=offset;
1674    return &v;    return &m_samples;
1675  }  }
1676    
1677    
1678    const DataTypes::RealVectorType*
1679    DataLazy::resolveSample(int sampleNo, size_t& roffset) const
1680    {
1681    #ifdef _OPENMP
1682            int tid=omp_get_thread_num();
1683    #else
1684            int tid=0;
1685    #endif
1686    
1687  /*  #ifdef LAZY_STACK_PROF
1688    \brief Compute the value of the expression for the given sample.          stackstart[tid]=&tid;
1689    \return Vector which stores the value of the subexpression for the given sample.          stackend[tid]=&tid;
1690    \param v A vector to store intermediate results.          const DataTypes::RealVectorType* r=resolveNodeSample(tid, sampleNo, roffset);
1691    \param offset Index in v to begin storing results.          size_t d=(size_t)stackstart[tid]-(size_t)stackend[tid];
1692    \param sampleNo Sample number to evaluate.          #pragma omp critical
1693    \param roffset (output parameter) the offset in the return vector where the result begins.          if (d>maxstackuse)
1694            {
1695    cout << "Max resolve Stack use " << d << endl;
1696                    maxstackuse=d;
1697            }
1698            return r;
1699    #else
1700            return resolveNodeSample(tid, sampleNo, roffset);
1701    #endif
1702    }
1703    
1704    The return value will be an existing vector so do not deallocate it.  
1705  */  // This needs to do the work of the identity constructor
1706  // the vector and the offset are a place where the method could write its data if it wishes  void
1707  // it is not obligated to do so. For example, if it has its own storage already, it can use that.  DataLazy::resolveToIdentity()
 // Hence the return value to indicate where the data is actually stored.  
 // Regardless, the storage should be assumed to be used, even if it isn't.  
   
 // the roffset is the offset within the returned vector where the data begins  
 const DataTypes::ValueType*  
 DataLazy::resolveSample(ValueType& v, size_t offset, int sampleNo, size_t& roffset)  
1708  {  {
1709  LAZYDEBUG(cout << "Resolve sample " << toString() << endl;)     if (m_op==IDENTITY)
1710      // collapse so we have a 'E' node or an IDENTITY for some other type          return;
1711    if (m_readytype!='E' && m_op!=IDENTITY)     DataReady_ptr p=resolveNodeWorker();
1712    {     makeIdentity(p);
1713      collapse();  }
1714    }  
1715    if (m_op==IDENTITY)    void DataLazy::makeIdentity(const DataReady_ptr& p)
1716    {
1717       m_op=IDENTITY;
1718       m_axis_offset=0;
1719       m_transpose=0;
1720       m_SL=m_SM=m_SR=0;
1721       m_children=m_height=0;
1722       m_id=p;
1723       if(p->isConstant()) {m_readytype='C';}
1724       else if(p->isExpanded()) {m_readytype='E';}
1725       else if (p->isTagged()) {m_readytype='T';}
1726       else {throw DataException("Unknown DataReady instance in convertToIdentity constructor.");}
1727       m_samplesize=p->getNumDPPSample()*p->getNoValues();
1728       m_left.reset();
1729       m_right.reset();
1730    }
1731    
1732    
1733    DataReady_ptr
1734    DataLazy::resolve()
1735    {
1736        resolveToIdentity();
1737        return m_id;
1738    }
1739    
1740    
1741    /* This is really a static method but I think that caused problems in windows */
1742    void
1743    DataLazy::resolveGroupWorker(std::vector<DataLazy*>& dats)
1744    {
1745      if (dats.empty())
1746    {    {
1747      const ValueType& vec=m_id->getVector();          return;
     if (m_readytype=='C')  
     {  
     roffset=0;  
     return &(vec);  
     }  
     roffset=m_id->getPointOffset(sampleNo, 0);  
     return &(vec);  
1748    }    }
1749    if (m_readytype!='E')    vector<DataLazy*> work;
1750    {    FunctionSpace fs=dats[0]->getFunctionSpace();
1751      throw DataException("Programmer Error - Collapse did not produce an expanded node.");    bool match=true;
1752      for (int i=dats.size()-1;i>=0;--i)
1753      {
1754            if (dats[i]->m_readytype!='E')
1755            {
1756                    dats[i]->collapse();
1757            }
1758            if (dats[i]->m_op!=IDENTITY)
1759            {
1760                    work.push_back(dats[i]);
1761                    if (fs!=dats[i]->getFunctionSpace())
1762                    {
1763                            match=false;
1764                    }
1765            }
1766      }
1767      if (work.empty())
1768      {
1769            return;         // no work to do
1770      }
1771      if (match)    // all functionspaces match.  Yes I realise this is overly strict
1772      {             // it is possible that dats[0] is one of the objects which we discarded and
1773                    // all the other functionspaces match.
1774            vector<DataExpanded*> dep;
1775            vector<RealVectorType*> vecs;
1776            for (int i=0;i<work.size();++i)
1777            {
1778                    dep.push_back(new DataExpanded(fs,work[i]->getShape(), RealVectorType(work[i]->getNoValues())));
1779                    vecs.push_back(&(dep[i]->getVectorRW()));
1780            }
1781            int totalsamples=work[0]->getNumSamples();
1782            const RealVectorType* res=0; // Storage for answer
1783            int sample;
1784            #pragma omp parallel private(sample, res)
1785            {
1786                size_t roffset=0;
1787                #pragma omp for schedule(static)
1788                for (sample=0;sample<totalsamples;++sample)
1789                {
1790                    roffset=0;
1791                    int j;
1792                    for (j=work.size()-1;j>=0;--j)
1793                    {
1794    #ifdef _OPENMP
1795                        res=work[j]->resolveNodeSample(omp_get_thread_num(),sample,roffset);
1796    #else
1797                        res=work[j]->resolveNodeSample(0,sample,roffset);
1798    #endif
1799                        RealVectorType::size_type outoffset=dep[j]->getPointOffset(sample,0);
1800                        memcpy(&((*vecs[j])[outoffset]),&((*res)[roffset]),work[j]->m_samplesize*sizeof(RealVectorType::ElementType));
1801                    }
1802                }
1803            }
1804            // Now we need to load the new results as identity ops into the lazy nodes
1805            for (int i=work.size()-1;i>=0;--i)
1806            {
1807                work[i]->makeIdentity(REFCOUNTNS::dynamic_pointer_cast<DataReady>(dep[i]->getPtr()));
1808            }
1809    }    }
1810    switch (getOpgroup(m_op))    else  // functionspaces do not match
1811    {    {
1812    case G_UNARY:          for (int i=0;i<work.size();++i)
1813    case G_UNARY_P: return resolveUnary(v, offset,sampleNo,roffset);          {
1814    case G_BINARY: return resolveBinary(v, offset,sampleNo,roffset);                  work[i]->resolveToIdentity();
1815    case G_NP1OUT: return resolveNP1OUT(v, offset, sampleNo,roffset);          }
   case G_NP1OUT_P: return resolveNP1OUT_P(v, offset, sampleNo,roffset);  
   case G_TENSORPROD: return resolveTProd(v,offset, sampleNo,roffset);  
   default:  
     throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");  
1816    }    }
1817  }  }
1818    
1819    
1820  // To simplify the memory management, all threads operate on one large vector, rather than one each.  
1821  // Each sample is evaluated independently and copied into the result DataExpanded.  // This version of resolve uses storage in each node to hold results
1822  DataReady_ptr  DataReady_ptr
1823  DataLazy::resolve()  DataLazy::resolveNodeWorker()
1824  {  {
1825      if (m_readytype!='E')         // if the whole sub-expression is Constant or Tagged, then evaluate it normally
 LAZYDEBUG(cout << "Sample size=" << m_samplesize << endl;)  
 LAZYDEBUG(cout << "Buffers=" << m_buffsRequired << endl;)  
   
   if (m_readytype!='E')     // if the whole sub-expression is Constant or Tagged, then evaluate it normally  
1826    {    {
1827      collapse();      collapse();
1828    }    }
1829    if (m_op==IDENTITY)       // So a lazy expression of Constant or Tagged data will be returned here.    if (m_op==IDENTITY)           // So a lazy expression of Constant or Tagged data will be returned here.
1830    {    {
1831      return m_id;      return m_id;
1832    }    }
1833      // from this point on we must have m_op!=IDENTITY and m_readytype=='E'          // from this point on we must have m_op!=IDENTITY and m_readytype=='E'
1834    size_t threadbuffersize=m_maxsamplesize*(max(1,m_buffsRequired)); // Each thread needs to have enough    DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(),  RealVectorType(getNoValues()));
1835      // storage to evaluate its expression    RealVectorType& resvec=result->getVectorRW();
   int numthreads=1;  
 #ifdef _OPENMP  
   numthreads=getNumberOfThreads();  
 #endif  
   ValueType v(numthreads*threadbuffersize);  
 LAZYDEBUG(cout << "Buffer created with size=" << v.size() << endl;)  
   DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(),  ValueType(getNoValues()));  
   ValueType& resvec=result->getVector();  
1836    DataReady_ptr resptr=DataReady_ptr(result);    DataReady_ptr resptr=DataReady_ptr(result);
1837    
1838    int sample;    int sample;
   size_t outoffset;     // offset in the output data  
1839    int totalsamples=getNumSamples();    int totalsamples=getNumSamples();
1840    const ValueType* res=0;   // Vector storing the answer    const RealVectorType* res=0;       // Storage for answer
1841    size_t resoffset=0;       // where in the vector to find the answer  LAZYDEBUG(cout << "Total number of samples=" <<totalsamples << endl;)
1842    #pragma omp parallel for private(sample,resoffset,outoffset,res) schedule(static)    #pragma omp parallel private(sample,res)
1843    for (sample=0;sample<totalsamples;++sample)    {
1844    {          size_t roffset=0;
1845  LAZYDEBUG(cout << "################################# " << sample << endl;)  #ifdef LAZY_STACK_PROF
1846            stackstart[omp_get_thread_num()]=&roffset;
1847            stackend[omp_get_thread_num()]=&roffset;
1848    #endif
1849            #pragma omp for schedule(static)
1850            for (sample=0;sample<totalsamples;++sample)
1851            {
1852                    roffset=0;
1853  #ifdef _OPENMP  #ifdef _OPENMP
1854      res=resolveSample(v,threadbuffersize*omp_get_thread_num(),sample,resoffset);                  res=resolveNodeSample(omp_get_thread_num(),sample,roffset);
1855  #else  #else
1856      res=resolveSample(v,0,sample,resoffset);   // res would normally be v, but not if its a single IDENTITY op.                  res=resolveNodeSample(0,sample,roffset);
1857  #endif  #endif
1858  LAZYDEBUG(cerr << "-------------------------------- " << endl;)  LAZYDEBUG(cout << "Sample #" << sample << endl;)
1859      outoffset=result->getPointOffset(sample,0);  LAZYDEBUG(cout << "Final res[" << roffset<< "]=" << (*res)[roffset] << (*res)[roffset]<< endl; )
1860  LAZYDEBUG(cerr << "offset=" << outoffset << endl;)                  RealVectorType::size_type outoffset=result->getPointOffset(sample,0);
1861      for (unsigned int i=0;i<m_samplesize;++i,++outoffset,++resoffset)   // copy values into the output vector                  memcpy(&(resvec[outoffset]),&((*res)[roffset]),m_samplesize*sizeof(RealVectorType::ElementType));
1862      {          }
1863      resvec[outoffset]=(*res)[resoffset];    }
1864      }  #ifdef LAZY_STACK_PROF
1865  LAZYDEBUG(cerr << "*********************************" << endl;)    for (int i=0;i<getNumberOfThreads();++i)
1866      {
1867            size_t r=((size_t)stackstart[i] - (size_t)stackend[i]);
1868    //      cout << i << " " << stackstart[i] << " .. " << stackend[i] << " = " <<  r << endl;
1869            if (r>maxstackuse)
1870            {
1871                    maxstackuse=r;
1872            }
1873    }    }
1874      cout << "Max resolve Stack use=" << maxstackuse << endl;
1875    #endif
1876    return resptr;    return resptr;
1877  }  }
1878    
# Line 1224  std::string Line 1880  std::string
1880  DataLazy::toString() const  DataLazy::toString() const
1881  {  {
1882    ostringstream oss;    ostringstream oss;
1883    oss << "Lazy Data:";    oss << "Lazy Data: [depth=" << m_height<< "] ";
1884    intoString(oss);    switch (escriptParams.getLAZY_STR_FMT())
1885      {
1886      case 1:       // tree format
1887            oss << endl;
1888            intoTreeString(oss,"");
1889            break;
1890      case 2:       // just the depth
1891            break;
1892      default:
1893            intoString(oss);
1894            break;
1895      }
1896    return oss.str();    return oss.str();
1897  }  }
1898    
# Line 1233  DataLazy::toString() const Line 1900  DataLazy::toString() const
1900  void  void
1901  DataLazy::intoString(ostringstream& oss) const  DataLazy::intoString(ostringstream& oss) const
1902  {  {
1903    //    oss << "[" << m_children <<";"<<m_height <<"]";
1904    switch (getOpgroup(m_op))    switch (getOpgroup(m_op))
1905    {    {
1906    case G_IDENTITY:    case G_IDENTITY:
1907      if (m_id->isExpanded())          if (m_id->isExpanded())
1908      {          {
1909         oss << "E";             oss << "E";
1910      }          }
1911      else if (m_id->isTagged())          else if (m_id->isTagged())
1912      {          {
1913        oss << "T";            oss << "T";
1914      }          }
1915      else if (m_id->isConstant())          else if (m_id->isConstant())
1916      {          {
1917        oss << "C";            oss << "C";
1918      }          }
1919      else          else
1920      {          {
1921        oss << "?";            oss << "?";
1922      }          }
1923      oss << '@' << m_id.get();          oss << '@' << m_id.get();
1924      break;          break;
1925    case G_BINARY:    case G_BINARY:
1926      oss << '(';          oss << '(';
1927      m_left->intoString(oss);          m_left->intoString(oss);
1928      oss << ' ' << opToString(m_op) << ' ';          oss << ' ' << opToString(m_op) << ' ';
1929      m_right->intoString(oss);          m_right->intoString(oss);
1930      oss << ')';          oss << ')';
1931      break;          break;
1932    case G_UNARY:    case G_UNARY:
1933    case G_UNARY_P:    case G_UNARY_P:
1934    case G_NP1OUT:    case G_NP1OUT:
1935    case G_NP1OUT_P:    case G_NP1OUT_P:
1936      oss << opToString(m_op) << '(';    case G_REDUCTION:
1937      m_left->intoString(oss);          oss << opToString(m_op) << '(';
1938      oss << ')';          m_left->intoString(oss);
1939      break;          oss << ')';
1940            break;
1941    case G_TENSORPROD:    case G_TENSORPROD:
1942      oss << opToString(m_op) << '(';          oss << opToString(m_op) << '(';
1943      m_left->intoString(oss);          m_left->intoString(oss);
1944      oss << ", ";          oss << ", ";
1945      m_right->intoString(oss);          m_right->intoString(oss);
1946      oss << ')';          oss << ')';
1947      break;          break;
1948      case G_NP1OUT_2P:
1949            oss << opToString(m_op) << '(';
1950            m_left->intoString(oss);
1951            oss << ", " << m_axis_offset << ", " << m_transpose;
1952            oss << ')';
1953            break;
1954      case G_CONDEVAL:
1955            oss << opToString(m_op)<< '(' ;
1956            m_mask->intoString(oss);
1957            oss << " ? ";
1958            m_left->intoString(oss);
1959            oss << " : ";
1960            m_right->intoString(oss);
1961            oss << ')';
1962            break;
1963    default:    default:
1964      oss << "UNKNOWN";          oss << "UNKNOWN";
1965    }    }
1966  }  }
1967    
1968    
1969    void
1970    DataLazy::intoTreeString(ostringstream& oss, string indent) const
1971    {
1972      oss << '[' << m_rank << ':' << setw(3) << m_samplesize << "] " << indent;
1973      switch (getOpgroup(m_op))
1974      {
1975      case G_IDENTITY:
1976            if (m_id->isExpanded())
1977            {
1978               oss << "E";
1979            }
1980            else if (m_id->isTagged())
1981            {
1982              oss << "T";
1983            }
1984            else if (m_id->isConstant())
1985            {
1986              oss << "C";
1987            }
1988            else
1989            {
1990              oss << "?";
1991            }
1992            oss << '@' << m_id.get() << endl;
1993            break;
1994      case G_BINARY:
1995            oss << opToString(m_op) << endl;
1996            indent+='.';
1997            m_left->intoTreeString(oss, indent);
1998            m_right->intoTreeString(oss, indent);
1999            break;
2000      case G_UNARY:
2001      case G_UNARY_P:
2002      case G_NP1OUT:
2003      case G_NP1OUT_P:
2004      case G_REDUCTION:
2005            oss << opToString(m_op) << endl;
2006            indent+='.';
2007            m_left->intoTreeString(oss, indent);
2008            break;
2009      case G_TENSORPROD:
2010            oss << opToString(m_op) << endl;
2011            indent+='.';
2012            m_left->intoTreeString(oss, indent);
2013            m_right->intoTreeString(oss, indent);
2014            break;
2015      case G_NP1OUT_2P:
2016            oss << opToString(m_op) << ", " << m_axis_offset << ", " << m_transpose<< endl;
2017            indent+='.';
2018            m_left->intoTreeString(oss, indent);
2019            break;
2020      default:
2021            oss << "UNKNOWN";
2022      }
2023    }
2024    
2025    
2026  DataAbstract*  DataAbstract*
2027  DataLazy::deepCopy()  DataLazy::deepCopy() const
2028  {  {
2029    switch (getOpgroup(m_op))    switch (getOpgroup(m_op))
2030    {    {
2031    case G_IDENTITY:  return new DataLazy(m_id->deepCopy()->getPtr());    case G_IDENTITY:  return new DataLazy(m_id->deepCopy()->getPtr());
2032    case G_UNARY: return new DataLazy(m_left->deepCopy()->getPtr(),m_op);    case G_UNARY:
2033    case G_BINARY:    return new DataLazy(m_left->deepCopy()->getPtr(),m_right->deepCopy()->getPtr(),m_op);    case G_REDUCTION:      return new DataLazy(m_left->deepCopy()->getPtr(),m_op);
2034      case G_UNARY_P:       return new DataLazy(m_left->deepCopy()->getPtr(), m_op, m_tol);
2035      case G_BINARY:        return new DataLazy(m_left->deepCopy()->getPtr(),m_right->deepCopy()->getPtr(),m_op);
2036    case G_NP1OUT: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(),m_op);    case G_NP1OUT: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(),m_op);
2037    case G_TENSORPROD: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(), m_op, m_axis_offset, m_transpose);    case G_TENSORPROD: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(), m_op, m_axis_offset, m_transpose);
2038      case G_NP1OUT_P:   return new DataLazy(m_left->deepCopy()->getPtr(),m_op,  m_axis_offset);
2039      case G_NP1OUT_2P:  return new DataLazy(m_left->deepCopy()->getPtr(), m_op, m_axis_offset, m_transpose);
2040    default:    default:
2041      throw DataException("Programmer error - do not know how to deepcopy operator "+opToString(m_op)+".");          throw DataException("Programmer error - do not know how to deepcopy operator "+opToString(m_op)+".");
2042    }    }
2043  }  }
2044    
2045    
2046    
2047  // There is no single, natural interpretation of getLength on DataLazy.  // There is no single, natural interpretation of getLength on DataLazy.
2048  // Instances of DataReady can look at the size of their vectors.  // Instances of DataReady can look at the size of their vectors.
2049  // For lazy though, it could be the size the data would be if it were resolved;  // For lazy though, it could be the size the data would be if it were resolved;
2050  // or it could be some function of the lengths of the DataReady instances which  // or it could be some function of the lengths of the DataReady instances which
2051  // form part of the expression.  // form part of the expression.
2052  // Rather than have people making assumptions, I have disabled the method.  // Rather than have people making assumptions, I have disabled the method.
2053  DataTypes::ValueType::size_type  DataTypes::RealVectorType::size_type
2054  DataLazy::getLength() const  DataLazy::getLength() const
2055  {  {
2056    throw DataException("getLength() does not make sense for lazy data.");    throw DataException("getLength() does not make sense for lazy data.");
# Line 1318  DataLazy::getSlice(const DataTypes::Regi Line 2065  DataLazy::getSlice(const DataTypes::Regi
2065    
2066    
2067  // To do this we need to rely on our child nodes  // To do this we need to rely on our child nodes
2068  DataTypes::ValueType::size_type  DataTypes::RealVectorType::size_type
2069  DataLazy::getPointOffset(int sampleNo,  DataLazy::getPointOffset(int sampleNo,
2070                   int dataPointNo)                   int dataPointNo)
2071  {  {
2072    if (m_op==IDENTITY)    if (m_op==IDENTITY)
2073    {    {
2074      return m_id->getPointOffset(sampleNo,dataPointNo);          return m_id->getPointOffset(sampleNo,dataPointNo);
2075    }    }
2076    if (m_readytype!='E')    if (m_readytype!='E')
2077    {    {
2078      collapse();          collapse();
2079      return m_id->getPointOffset(sampleNo,dataPointNo);          return m_id->getPointOffset(sampleNo,dataPointNo);
2080    }    }
2081    // at this point we do not have an identity node and the expression will be Expanded    // at this point we do not have an identity node and the expression will be Expanded
2082    // so we only need to know which child to ask    // so we only need to know which child to ask
2083    if (m_left->m_readytype=='E')    if (m_left->m_readytype=='E')
2084    {    {
2085      return m_left->getPointOffset(sampleNo,dataPointNo);          return m_left->getPointOffset(sampleNo,dataPointNo);
2086    }    }
2087    else    else
2088    {    {
2089      return m_right->getPointOffset(sampleNo,dataPointNo);          return m_right->getPointOffset(sampleNo,dataPointNo);
2090    }    }
2091  }  }
2092    
2093  // To do this we need to rely on our child nodes  // To do this we need to rely on our child nodes
2094  DataTypes::ValueType::size_type  DataTypes::RealVectorType::size_type
2095  DataLazy::getPointOffset(int sampleNo,  DataLazy::getPointOffset(int sampleNo,
2096                   int dataPointNo) const                   int dataPointNo) const
2097  {  {
2098    if (m_op==IDENTITY)    if (m_op==IDENTITY)
2099    {    {
2100      return m_id->getPointOffset(sampleNo,dataPointNo);          return m_id->getPointOffset(sampleNo,dataPointNo);
2101    }    }
2102    if (m_readytype=='E')    if (m_readytype=='E')
2103    {    {
# Line 1358  DataLazy::getPointOffset(int sampleNo, Line 2105  DataLazy::getPointOffset(int sampleNo,
2105      // so we only need to know which child to ask      // so we only need to know which child to ask
2106      if (m_left->m_readytype=='E')      if (m_left->m_readytype=='E')
2107      {      {
2108      return m_left->getPointOffset(sampleNo,dataPointNo);          return m_left->getPointOffset(sampleNo,dataPointNo);
2109      }      }
2110      else      else
2111      {      {
2112      return m_right->getPointOffset(sampleNo,dataPointNo);          return m_right->getPointOffset(sampleNo,dataPointNo);
2113      }      }
2114    }    }
2115    if (m_readytype=='C')    if (m_readytype=='C')
2116    {    {
2117      return m_left->getPointOffset(sampleNo,dataPointNo); // which child doesn't matter          return m_left->getPointOffset(sampleNo,dataPointNo); // which child doesn't matter
2118    }    }
2119    throw DataException("Programmer error - getPointOffset on lazy data may require collapsing (but this object is marked const).");    throw DataException("Programmer error - getPointOffset on lazy data may require collapsing (but this object is marked const).");
2120  }  }
2121    
2122  // It would seem that DataTagged will need to be treated differently since even after setting all tags  
2123  // to zero, all the tags from all the DataTags would be in the result.  // I have decided to let Data:: handle this issue.
 // However since they all have the same value (0) whether they are there or not should not matter.  
 // So I have decided that for all types this method will create a constant 0.  
 // It can be promoted up as required.  
 // A possible efficiency concern might be expanded->constant->expanded which has an extra memory management  
 // but we can deal with that if it arrises.  
2124  void  void
2125  DataLazy::setToZero()  DataLazy::setToZero()
2126  {  {
2127    DataTypes::ValueType v(getNoValues(),0);  //   DataTypes::RealVectorType v(getNoValues(),0);
2128    m_id=DataReady_ptr(new DataConstant(getFunctionSpace(),getShape(),v));  //   m_id=DataReady_ptr(new DataConstant(getFunctionSpace(),getShape(),v));
2129    m_op=IDENTITY;  //   m_op=IDENTITY;
2130    m_right.reset();    //   m_right.reset();  
2131    m_left.reset();  //   m_left.reset();
2132    m_readytype='C';  //   m_readytype='C';
2133    m_buffsRequired=1;  //   m_buffsRequired=1;
2134    
2135      (void)privdebug;  // to stop the compiler complaining about unused privdebug
2136      throw DataException("Programmer error - setToZero not supported for DataLazy (DataLazy objects should be read only).");
2137  }  }
2138    
2139  }   // end namespace  bool
2140    DataLazy::actsExpanded() const
2141    {
2142            return (m_readytype=='E');
2143    }
2144    
2145    } // end namespace
2146    

Legend:
Removed from v.2147  
changed lines
  Added in v.6144

  ViewVC Help
Powered by ViewVC 1.1.26