/[escript]/branches/clazy/escriptcore/src/DataLazy.cpp
ViewVC logotype

Diff of /branches/clazy/escriptcore/src/DataLazy.cpp

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

trunk/escript/src/DataLazy.cpp revision 2147 by jfenwick, Wed Dec 10 04:41:26 2008 UTC trunk/escriptcore/src/DataLazy.cpp revision 6114 by jfenwick, Fri Apr 1 03:37:52 2016 UTC
# Line 1  Line 1 
1    
2  /*******************************************************  /*****************************************************************************
3  *  *
4  * Copyright (c) 2003-2008 by University of Queensland  * Copyright (c) 2003-2016 by The University of Queensland
5  * Earth Systems Science Computational Center (ESSCC)  * http://www.uq.edu.au
 * http://www.uq.edu.au/esscc  
6  *  *
7  * Primary Business: Queensland, Australia  * Primary Business: Queensland, Australia
8  * Licensed under the Open Software License version 3.0  * Licensed under the Apache License, version 2.0
9  * http://www.opensource.org/licenses/osl-3.0.php  * http://www.apache.org/licenses/LICENSE-2.0
10  *  *
11  *******************************************************/  * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12    * Development 2012-2013 by School of Earth Sciences
13    * Development from 2014 by Centre for Geoscience Computing (GeoComp)
14    *
15    *****************************************************************************/
16    
17  #include "DataLazy.h"  #include "DataLazy.h"
18    #include "Data.h"
19    #include "DataTypes.h"
20    #include "EscriptParams.h"
21    #include "FunctionSpace.h"
22    #include "Utils.h"
23    #include "DataVectorOps.h"
24    
25  #ifdef USE_NETCDF  #ifdef USE_NETCDF
26  #include <netcdfcpp.h>  #include <netcdfcpp.h>
27  #endif  #endif
 #ifdef PASO_MPI  
 #include <mpi.h>  
 #endif  
 #ifdef _OPENMP  
 #include <omp.h>  
 #endif  
 #include "FunctionSpace.h"  
 #include "DataTypes.h"  
 #include "Data.h"  
 #include "UnaryFuncs.h"     // for escript::fsign  
 #include "Utils.h"  
28    
29  // #define LAZYDEBUG(X) X;  #include <iomanip> // for some fancy formatting in debug
30    
31    using namespace escript::DataTypes;
32    
33    #define NO_ARG
34    
35    // #define LAZYDEBUG(X) if (privdebug){X;}
36  #define LAZYDEBUG(X)  #define LAZYDEBUG(X)
37    namespace
38    {
39    bool privdebug=false;
40    
41    #define ENABLEDEBUG privdebug=true;
42    #define DISABLEDEBUG privdebug=false;
43    }
44    
45    // #define SIZELIMIT if ((m_height>escript::escriptParams.getTOO_MANY_LEVELS()) || (m_children>escript::escriptParams.getTOO_MANY_NODES())) {cerr << "\n!!!!!!! SIZE LIMIT EXCEEDED " << m_children << ";" << m_height << endl << toString() << endl;resolveToIdentity();}
46    
47    // #define SIZELIMIT if ((m_height>escript::escriptParams.getTOO_MANY_LEVELS()) || (m_children>escript::escriptParams.getTOO_MANY_NODES())) {cerr << "SIZE LIMIT EXCEEDED " << m_height << endl;resolveToIdentity();}
48    
49    
50    #define SIZELIMIT if (m_height>escript::escriptParams.getTOO_MANY_LEVELS())  {if (escript::escriptParams.getLAZY_VERBOSE()){cerr << "SIZE LIMIT EXCEEDED height=" << m_height << endl;}resolveToIdentity();}
51    
52  /*  /*
53  How does DataLazy work?  How does DataLazy work?
# Line 42  A special operation, IDENTITY, stores an Line 60  A special operation, IDENTITY, stores an
60  This means that all "internal" nodes in the structure are instances of DataLazy.  This means that all "internal" nodes in the structure are instances of DataLazy.
61    
62  Each operation has a string representation as well as an opgroup - eg G_IDENTITY, G_BINARY, ...  Each operation has a string representation as well as an opgroup - eg G_IDENTITY, G_BINARY, ...
63  Note that IDENITY is not considered a unary operation.  Note that IDENTITY is not considered a unary operation.
64    
65  I am avoiding calling the structure formed a tree because it is not guaranteed to be one (eg c=a+a).  I am avoiding calling the structure formed a tree because it is not guaranteed to be one (eg c=a+a).
66  It must however form a DAG (directed acyclic graph).  It must however form a DAG (directed acyclic graph).
# Line 50  I will refer to individual DataLazy obje Line 68  I will refer to individual DataLazy obje
68    
69  Each node also stores:  Each node also stores:
70  - m_readytype \in {'E','T','C','?'} ~ indicates what sort of DataReady would be produced if the expression was  - m_readytype \in {'E','T','C','?'} ~ indicates what sort of DataReady would be produced if the expression was
71      evaluated.          evaluated.
72  - m_buffsrequired ~ the larged number of samples which would need to be kept simultaneously in order to  - m_buffsrequired ~ the large number of samples which would need to be kept simultaneously in order to
73      evaluate the expression.          evaluate the expression.
74  - m_samplesize ~ the number of doubles stored in a sample.  - m_samplesize ~ the number of doubles stored in a sample.
75    
76  When a new node is created, the above values are computed based on the values in the child nodes.  When a new node is created, the above values are computed based on the values in the child nodes.
# Line 93  namespace escript Line 111  namespace escript
111  namespace  namespace
112  {  {
113    
114    
115    // enabling this will print out when ever the maximum stacksize used by resolve increases
116    // it assumes _OPENMP is also in use
117    //#define LAZY_STACK_PROF
118    
119    
120    
121    #ifndef _OPENMP
122      #ifdef LAZY_STACK_PROF
123      #undef LAZY_STACK_PROF
124      #endif
125    #endif
126    
127    
128    #ifdef LAZY_STACK_PROF
129    std::vector<void*> stackstart(getNumberOfThreads());
130    std::vector<void*> stackend(getNumberOfThreads());
131    size_t maxstackuse=0;
132    #endif
133    
134  enum ES_opgroup  enum ES_opgroup
135  {  {
136     G_UNKNOWN,     G_UNKNOWN,
137     G_IDENTITY,     G_IDENTITY,
138     G_BINARY,        // pointwise operations with two arguments     G_BINARY,            // pointwise operations with two arguments
139     G_UNARY,     // pointwise operations with one argument     G_UNARY,             // pointwise operations with one argument
140     G_UNARY_P,       // pointwise operations with one argument, requiring a parameter     G_UNARY_P,           // pointwise operations with one argument, requiring a parameter
141     G_NP1OUT,        // non-pointwise op with one output     G_NP1OUT,            // non-pointwise op with one output
142     G_NP1OUT_P,      // non-pointwise op with one output requiring a parameter     G_NP1OUT_P,          // non-pointwise op with one output requiring a parameter
143     G_TENSORPROD     // general tensor product     G_TENSORPROD,        // general tensor product
144       G_NP1OUT_2P,         // non-pointwise op with one output requiring two params
145       G_REDUCTION,         // non-pointwise unary op with a scalar output
146       G_CONDEVAL
147  };  };
148    
149    
150    
151    
152  string ES_opstrings[]={"UNKNOWN","IDENTITY","+","-","*","/","^",  string ES_opstrings[]={"UNKNOWN","IDENTITY","+","-","*","/","^",
153              "sin","cos","tan",                          "sin","cos","tan",
154              "asin","acos","atan","sinh","cosh","tanh","erf",                          "asin","acos","atan","sinh","cosh","tanh","erf",
155              "asinh","acosh","atanh",                          "asinh","acosh","atanh",
156              "log10","log","sign","abs","neg","pos","exp","sqrt",                          "log10","log","sign","abs","neg","pos","exp","sqrt",
157              "1/","where>0","where<0","where>=0","where<=0", "where<>0","where=0",                          "1/","where>0","where<0","where>=0","where<=0", "where<>0","where=0",
158              "symmetric","nonsymmetric",                          "symmetric","antisymmetric",
159              "prod",                          "prod",
160              "transpose", "trace"};                          "transpose", "trace",
161  int ES_opcount=40;                          "swapaxes",
162                            "minval", "maxval",
163                            "condEval"};
164    int ES_opcount=44;
165  ES_opgroup opgroups[]={G_UNKNOWN,G_IDENTITY,G_BINARY,G_BINARY,G_BINARY,G_BINARY, G_BINARY,  ES_opgroup opgroups[]={G_UNKNOWN,G_IDENTITY,G_BINARY,G_BINARY,G_BINARY,G_BINARY, G_BINARY,
166              G_UNARY,G_UNARY,G_UNARY, //10                          G_UNARY,G_UNARY,G_UNARY, //10
167              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,    // 17                          G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,        // 17
168              G_UNARY,G_UNARY,G_UNARY,                    // 20                          G_UNARY,G_UNARY,G_UNARY,                                        // 20
169              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,    // 28                          G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,        // 28
170              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY, G_UNARY_P, G_UNARY_P,      // 35                          G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY, G_UNARY_P, G_UNARY_P,          // 35
171              G_NP1OUT,G_NP1OUT,                          G_NP1OUT,G_NP1OUT,
172              G_TENSORPROD,                          G_TENSORPROD,
173              G_NP1OUT_P, G_NP1OUT_P};                          G_NP1OUT_P, G_NP1OUT_P,
174                            G_NP1OUT_2P,
175                            G_REDUCTION, G_REDUCTION,
176                            G_CONDEVAL};
177  inline  inline
178  ES_opgroup  ES_opgroup
179  getOpgroup(ES_optype op)  getOpgroup(ES_optype op)
# Line 138  getOpgroup(ES_optype op) Line 185  getOpgroup(ES_optype op)
185  FunctionSpace  FunctionSpace
186  resultFS(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)  resultFS(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
187  {  {
188      // perhaps this should call interpolate and throw or something?          // perhaps this should call interpolate and throw or something?
189      // maybe we need an interpolate node -          // maybe we need an interpolate node -
190      // that way, if interpolate is required in any other op we can just throw a          // that way, if interpolate is required in any other op we can just throw a
191      // programming error exception.          // programming error exception.
192    
193    FunctionSpace l=left->getFunctionSpace();    FunctionSpace l=left->getFunctionSpace();
194    FunctionSpace r=right->getFunctionSpace();    FunctionSpace r=right->getFunctionSpace();
195    if (l!=r)    if (l!=r)
196    {    {
197      if (r.probeInterpolation(l))      signed char res=r.getDomain()->preferredInterpolationOnDomain(r.getTypeCode(), l.getTypeCode());
198        if (res==1)
199      {      {
200      return l;          return l;
201      }      }
202      if (l.probeInterpolation(r))      if (res==-1)
203      {      {
204      return r;          return r;
205      }      }
206      throw DataException("Cannot interpolate between the FunctionSpaces given for operation "+opToString(op)+".");      throw DataException("Cannot interpolate between the FunctionSpaces given for operation "+opToString(op)+".");
207    }    }
# Line 165  resultFS(DataAbstract_ptr left, DataAbst Line 213  resultFS(DataAbstract_ptr left, DataAbst
213  DataTypes::ShapeType  DataTypes::ShapeType
214  resultShape(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)  resultShape(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
215  {  {
216      if (left->getShape()!=right->getShape())          if (left->getShape()!=right->getShape())
217      {          {
218        if ((getOpgroup(op)!=G_BINARY) && (getOpgroup(op)!=G_NP1OUT))            if ((getOpgroup(op)!=G_BINARY) && (getOpgroup(op)!=G_NP1OUT))
219        {            {
220          throw DataException("Shapes not the name - shapes must match for (point)binary operations.");                  throw DataException("Shapes not the name - shapes must match for (point)binary operations.");
221        }            }
222        if (left->getRank()==0)   // we need to allow scalar * anything  
223        {            if (left->getRank()==0)       // we need to allow scalar * anything
224          return right->getShape();            {
225        }                  return right->getShape();
226        if (right->getRank()==0)            }
227        {            if (right->getRank()==0)
228          return left->getShape();            {
229        }                  return left->getShape();
230        throw DataException("Shapes not the same - arguments must have matching shapes (or be scalars) for (point)binary operations on lazy data.");            }
231      }            throw DataException("Shapes not the same - arguments must have matching shapes (or be scalars) for (point)binary operations on lazy data.");
232      return left->getShape();          }
233            return left->getShape();
234  }  }
235    
236  // return the shape for "op left"  // return the shape for "op left"
237    
238  DataTypes::ShapeType  DataTypes::ShapeType
239  resultShape(DataAbstract_ptr left, ES_optype op)  resultShape(DataAbstract_ptr left, ES_optype op, int axis_offset)
240    {
241            switch(op)
242            {
243            case TRANS:
244               {                    // for the scoping of variables
245                    const DataTypes::ShapeType& s=left->getShape();
246                    DataTypes::ShapeType sh;
247                    int rank=left->getRank();
248                    if (axis_offset<0 || axis_offset>rank)
249                    {
250                stringstream e;
251                e << "Error - Data::transpose must have 0 <= axis_offset <= rank=" << rank;
252                throw DataException(e.str());
253            }
254            for (int i=0; i<rank; i++)
255                    {
256                       int index = (axis_offset+i)%rank;
257               sh.push_back(s[index]); // Append to new shape
258            }
259                    return sh;
260               }
261            break;
262            case TRACE:
263               {
264                    int rank=left->getRank();
265                    if (rank<2)
266                    {
267                       throw DataException("Trace can only be computed for objects with rank 2 or greater.");
268                    }
269                    if ((axis_offset>rank-2) || (axis_offset<0))
270                    {
271                       throw DataException("Trace: axis offset must lie between 0 and rank-2 inclusive.");
272                    }
273                    if (rank==2)
274                    {
275                       return DataTypes::scalarShape;
276                    }
277                    else if (rank==3)
278                    {
279                       DataTypes::ShapeType sh;
280                       if (axis_offset==0)
281                       {
282                            sh.push_back(left->getShape()[2]);
283                       }
284                       else         // offset==1
285                       {
286                            sh.push_back(left->getShape()[0]);
287                       }
288                       return sh;
289                    }
290                    else if (rank==4)
291                    {
292                       DataTypes::ShapeType sh;
293                       const DataTypes::ShapeType& s=left->getShape();
294                       if (axis_offset==0)
295                       {
296                            sh.push_back(s[2]);
297                            sh.push_back(s[3]);
298                       }
299                       else if (axis_offset==1)
300                       {
301                            sh.push_back(s[0]);
302                            sh.push_back(s[3]);
303                       }
304                       else         // offset==2
305                       {
306                            sh.push_back(s[0]);
307                            sh.push_back(s[1]);
308                       }
309                       return sh;
310                    }
311                    else            // unknown rank
312                    {
313                       throw DataException("Error - Data::trace can only be calculated for rank 2, 3 or 4 object.");
314                    }
315               }
316            break;
317            default:
318            throw DataException("Programmer error - resultShape(left,op) can't compute shapes for operator "+opToString(op)+".");
319            }
320    }
321    
322    DataTypes::ShapeType
323    SwapShape(DataAbstract_ptr left, const int axis0, const int axis1)
324  {  {
325      switch(op)       // This code taken from the Data.cpp swapaxes() method
326      {       // Some of the checks are probably redundant here
327          case TRANS:       int axis0_tmp,axis1_tmp;
328          return left->getShape();       const DataTypes::ShapeType& s=left->getShape();
329      break;       DataTypes::ShapeType out_shape;
330      case TRACE:       // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
331          return DataTypes::scalarShape;       // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
332      break;       int rank=left->getRank();
333          default:       if (rank<2) {
334      throw DataException("Programmer error - resultShape(left,op) can't compute shapes for operator "+opToString(op)+".");          throw DataException("Error - Data::swapaxes argument must have at least rank 2.");
335      }       }
336         if (axis0<0 || axis0>rank-1) {
337            stringstream e;
338            e << "Error - Data::swapaxes: axis0 must be between 0 and rank-1=" << (rank-1);
339            throw DataException(e.str());
340         }
341         if (axis1<0 || axis1>rank-1) {
342            stringstream e;
343            e << "Error - Data::swapaxes: axis1 must be between 0 and rank-1=" << (rank-1);
344            throw DataException(e.str());
345         }
346         if (axis0 == axis1) {
347             throw DataException("Error - Data::swapaxes: axis indices must be different.");
348         }
349         if (axis0 > axis1) {
350             axis0_tmp=axis1;
351             axis1_tmp=axis0;
352         } else {
353             axis0_tmp=axis0;
354             axis1_tmp=axis1;
355         }
356         for (int i=0; i<rank; i++) {
357           if (i == axis0_tmp) {
358              out_shape.push_back(s[axis1_tmp]);
359           } else if (i == axis1_tmp) {
360              out_shape.push_back(s[axis0_tmp]);
361           } else {
362              out_shape.push_back(s[i]);
363           }
364         }
365        return out_shape;
366  }  }
367    
368    
369  // determine the output shape for the general tensor product operation  // determine the output shape for the general tensor product operation
370  // the additional parameters return information required later for the product  // the additional parameters return information required later for the product
371  // the majority of this code is copy pasted from C_General_Tensor_Product  // the majority of this code is copy pasted from C_General_Tensor_Product
372  DataTypes::ShapeType  DataTypes::ShapeType
373  GTPShape(DataAbstract_ptr left, DataAbstract_ptr right, int axis_offset, int transpose, int& SL, int& SM, int& SR)  GTPShape(DataAbstract_ptr left, DataAbstract_ptr right, int axis_offset, int transpose, int& SL, int& SM, int& SR)
374  {  {
375                
376    // Get rank and shape of inputs    // Get rank and shape of inputs
377    int rank0 = left->getRank();    int rank0 = left->getRank();
378    int rank1 = right->getRank();    int rank1 = right->getRank();
# Line 217  GTPShape(DataAbstract_ptr left, DataAbst Line 381  GTPShape(DataAbstract_ptr left, DataAbst
381    
382    // Prepare for the loops of the product and verify compatibility of shapes    // Prepare for the loops of the product and verify compatibility of shapes
383    int start0=0, start1=0;    int start0=0, start1=0;
384    if (transpose == 0)       {}    if (transpose == 0)           {}
385    else if (transpose == 1)  { start0 = axis_offset; }    else if (transpose == 1)      { start0 = axis_offset; }
386    else if (transpose == 2)  { start1 = rank1-axis_offset; }    else if (transpose == 2)      { start1 = rank1-axis_offset; }
387    else              { throw DataException("DataLazy GeneralTensorProduct Constructor: Error - transpose should be 0, 1 or 2"); }    else                          { throw DataException("DataLazy GeneralTensorProduct Constructor: Error - transpose should be 0, 1 or 2"); }
388    
389    if (rank0<axis_offset)    if (rank0<axis_offset)
390    {    {
391      throw DataException("DataLazy GeneralTensorProduct Constructor: Error - rank of left < axisoffset");          throw DataException("DataLazy GeneralTensorProduct Constructor: Error - rank of left < axisoffset");
392    }    }
393    
394    // Adjust the shapes for transpose    // Adjust the shapes for transpose
395    DataTypes::ShapeType tmpShape0(rank0);    // pre-sizing the vectors rather    DataTypes::ShapeType tmpShape0(rank0);        // pre-sizing the vectors rather
396    DataTypes::ShapeType tmpShape1(rank1);    // than using push_back    DataTypes::ShapeType tmpShape1(rank1);        // than using push_back
397    for (int i=0; i<rank0; i++)   { tmpShape0[i]=shape0[(i+start0)%rank0]; }    for (int i=0; i<rank0; i++)   { tmpShape0[i]=shape0[(i+start0)%rank0]; }
398    for (int i=0; i<rank1; i++)   { tmpShape1[i]=shape1[(i+start1)%rank1]; }    for (int i=0; i<rank1; i++)   { tmpShape1[i]=shape1[(i+start1)%rank1]; }
399    
400    // Prepare for the loops of the product    // Prepare for the loops of the product
401    SL=1, SM=1, SR=1;    SL=1, SM=1, SR=1;
402    for (int i=0; i<rank0-axis_offset; i++)   {    for (int i=0; i<rank0-axis_offset; i++)       {
403      SL *= tmpShape0[i];      SL *= tmpShape0[i];
404    }    }
405    for (int i=rank0-axis_offset; i<rank0; i++)   {    for (int i=rank0-axis_offset; i<rank0; i++)   {
406      if (tmpShape0[i] != tmpShape1[i-(rank0-axis_offset)]) {      if (tmpShape0[i] != tmpShape1[i-(rank0-axis_offset)]) {
407        throw DataException("C_GeneralTensorProduct: Error - incompatible shapes");        throw DataException("C_GeneralTensorProduct: Error - incompatible shapes");
408      }      }
409      SM *= tmpShape0[i];      SM *= tmpShape0[i];
410    }    }
411    for (int i=axis_offset; i<rank1; i++)     {    for (int i=axis_offset; i<rank1; i++)         {
412      SR *= tmpShape1[i];      SR *= tmpShape1[i];
413    }    }
414    
415    // Define the shape of the output (rank of shape is the sum of the loop ranges below)    // Define the shape of the output (rank of shape is the sum of the loop ranges below)
416    DataTypes::ShapeType shape2(rank0+rank1-2*axis_offset);      DataTypes::ShapeType shape2(rank0+rank1-2*axis_offset);      
417    {         // block to limit the scope of out_index    {                     // block to limit the scope of out_index
418       int out_index=0;       int out_index=0;
419       for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z       for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z
420       for (int i=axis_offset; i<rank1; i++, ++out_index)   { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z       for (int i=axis_offset; i<rank1; i++, ++out_index)   { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z
# Line 266  GTPShape(DataAbstract_ptr left, DataAbst Line 430  GTPShape(DataAbstract_ptr left, DataAbst
430    return shape2;    return shape2;
431  }  }
432    
433    }       // end anonymous namespace
 // determine the number of points in the result of "left op right"  
 // note that determining the resultLength for G_TENSORPROD is more complex and will not be processed here  
 // size_t  
 // resultLength(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)  
 // {  
 //    switch (getOpgroup(op))  
 //    {  
 //    case G_BINARY: return left->getLength();  
 //    case G_UNARY: return left->getLength();  
 //    case G_NP1OUT: return left->getLength();  
 //    default:  
 //  throw DataException("Programmer Error - attempt to getLength() for operator "+opToString(op)+".");  
 //    }  
 // }  
   
 // determine the number of samples requires to evaluate an expression combining left and right  
 // NP1OUT needs an extra buffer because we can't write the answers over the top of the input.  
 // The same goes for G_TENSORPROD  
 int  
 calcBuffs(const DataLazy_ptr& left, const DataLazy_ptr& right, ES_optype op)  
 {  
    switch(getOpgroup(op))  
    {  
    case G_IDENTITY: return 1;  
    case G_BINARY: return max(left->getBuffsRequired(),right->getBuffsRequired()+1);  
    case G_UNARY:  
    case G_UNARY_P: return max(left->getBuffsRequired(),1);  
    case G_NP1OUT: return 1+max(left->getBuffsRequired(),1);  
    case G_NP1OUT_P: return 1+max(left->getBuffsRequired(),1);  
    case G_TENSORPROD: return 1+max(left->getBuffsRequired(),right->getBuffsRequired()+1);  
    default:  
     throw DataException("Programmer Error - attempt to calcBuffs() for operator "+opToString(op)+".");  
    }  
 }  
   
   
 }   // end anonymous namespace  
434    
435    
436    
# Line 318  opToString(ES_optype op) Line 445  opToString(ES_optype op)
445    return ES_opstrings[op];    return ES_opstrings[op];
446  }  }
447    
448    void DataLazy::LazyNodeSetup()
449    {
450    #ifdef _OPENMP
451        int numthreads=omp_get_max_threads();
452        m_samples.resize(numthreads*m_samplesize);
453        m_sampleids=new int[numthreads];
454        for (int i=0;i<numthreads;++i)
455        {
456            m_sampleids[i]=-1;  
457        }
458    #else
459        m_samples.resize(m_samplesize);
460        m_sampleids=new int[1];
461        m_sampleids[0]=-1;
462    #endif  // _OPENMP
463    }
464    
465    
466    // Creates an identity node
467  DataLazy::DataLazy(DataAbstract_ptr p)  DataLazy::DataLazy(DataAbstract_ptr p)
468      : parent(p->getFunctionSpace(),p->getShape()),          : parent(p->getFunctionSpace(),p->getShape())
469      m_op(IDENTITY),          ,m_sampleids(0),
470      m_axis_offset(0),          m_samples(1)
     m_transpose(0),  
     m_SL(0), m_SM(0), m_SR(0)  
471  {  {
472     if (p->isLazy())     if (p->isLazy())
473     {     {
474      // I don't want identity of Lazy.          // I don't want identity of Lazy.
475      // Question: Why would that be so bad?          // Question: Why would that be so bad?
476      // Answer: We assume that the child of ID is something we can call getVector on          // Answer: We assume that the child of ID is something we can call getVector on
477      throw DataException("Programmer error - attempt to create identity from a DataLazy.");          throw DataException("Programmer error - attempt to create identity from a DataLazy.");
478     }     }
479     else     else
480     {     {
481      m_id=dynamic_pointer_cast<DataReady>(p);          DataReady_ptr dr=dynamic_pointer_cast<DataReady>(p);
482      if(p->isConstant()) {m_readytype='C';}          makeIdentity(dr);
483      else if(p->isExpanded()) {m_readytype='E';}  LAZYDEBUG(cout << "Wrapping " << dr.get() << " id=" << m_id.get() << endl;)
     else if (p->isTagged()) {m_readytype='T';}  
     else {throw DataException("Unknown DataReady instance in DataLazy constructor.");}  
484     }     }
    m_buffsRequired=1;  
    m_samplesize=getNumDPPSample()*getNoValues();  
    m_maxsamplesize=m_samplesize;  
485  LAZYDEBUG(cout << "(1)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(1)Lazy created with " << m_samplesize << endl;)
486  }  }
487    
   
   
   
488  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op)  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op)
489      : parent(left->getFunctionSpace(),left->getShape()),          : parent(left->getFunctionSpace(),(getOpgroup(op)!=G_REDUCTION)?left->getShape():DataTypes::scalarShape),
490      m_op(op),          m_op(op),
491      m_axis_offset(0),          m_axis_offset(0),
492      m_transpose(0),          m_transpose(0),
493      m_SL(0), m_SM(0), m_SR(0)          m_SL(0), m_SM(0), m_SR(0)
494  {  {
495     if ((getOpgroup(op)!=G_UNARY) && (getOpgroup(op)!=G_NP1OUT))     if ((getOpgroup(op)!=G_UNARY) && (getOpgroup(op)!=G_NP1OUT) && (getOpgroup(op)!=G_REDUCTION))
496     {     {
497      throw DataException("Programmer error - constructor DataLazy(left, op) will only process UNARY operations.");          throw DataException("Programmer error - constructor DataLazy(left, op) will only process UNARY operations.");
498     }     }
499    
500     DataLazy_ptr lleft;     DataLazy_ptr lleft;
501     if (!left->isLazy())     if (!left->isLazy())
502     {     {
503      lleft=DataLazy_ptr(new DataLazy(left));          lleft=DataLazy_ptr(new DataLazy(left));
504     }     }
505     else     else
506     {     {
507      lleft=dynamic_pointer_cast<DataLazy>(left);          lleft=dynamic_pointer_cast<DataLazy>(left);
508     }     }
509     m_readytype=lleft->m_readytype;     m_readytype=lleft->m_readytype;
510     m_left=lleft;     m_left=lleft;
    m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point  
511     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
512     m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());     m_children=m_left->m_children+1;
513       m_height=m_left->m_height+1;
514       LazyNodeSetup();
515       SIZELIMIT
516  }  }
517    
518    
519  // In this constructor we need to consider interpolation  // In this constructor we need to consider interpolation
520  DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)  DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
521      : parent(resultFS(left,right,op), resultShape(left,right,op)),          : parent(resultFS(left,right,op), resultShape(left,right,op)),
522      m_op(op),          m_op(op),
523      m_SL(0), m_SM(0), m_SR(0)          m_SL(0), m_SM(0), m_SR(0)
524  {  {
525    LAZYDEBUG(cout << "Forming operator with " << left.get() << " " << right.get() << endl;)
526     if ((getOpgroup(op)!=G_BINARY))     if ((getOpgroup(op)!=G_BINARY))
527     {     {
528      throw DataException("Programmer error - constructor DataLazy(left, right, op) will only process BINARY operations.");          throw DataException("Programmer error - constructor DataLazy(left, right, op) will only process BINARY operations.");
529     }     }
530    
531     if (getFunctionSpace()!=left->getFunctionSpace())    // left needs to be interpolated     if (getFunctionSpace()!=left->getFunctionSpace())    // left needs to be interpolated
532     {     {
533      FunctionSpace fs=getFunctionSpace();          FunctionSpace fs=getFunctionSpace();
534      Data ltemp(left);          Data ltemp(left);
535      Data tmp(ltemp,fs);          Data tmp(ltemp,fs);
536      left=tmp.borrowDataPtr();          left=tmp.borrowDataPtr();
537     }     }
538     if (getFunctionSpace()!=right->getFunctionSpace())   // right needs to be interpolated     if (getFunctionSpace()!=right->getFunctionSpace())   // right needs to be interpolated
539     {     {
540      Data tmp(Data(right),getFunctionSpace());          Data tmp(Data(right),getFunctionSpace());
541      right=tmp.borrowDataPtr();          right=tmp.borrowDataPtr();
542    LAZYDEBUG(cout << "Right interpolation required " << right.get() << endl;)
543     }     }
544     left->operandCheck(*right);     left->operandCheck(*right);
545    
546     if (left->isLazy())          // the children need to be DataLazy. Wrap them in IDENTITY if required     if (left->isLazy())                  // the children need to be DataLazy. Wrap them in IDENTITY if required
547     {     {
548      m_left=dynamic_pointer_cast<DataLazy>(left);          m_left=dynamic_pointer_cast<DataLazy>(left);
549    LAZYDEBUG(cout << "Left is " << m_left->toString() << endl;)
550     }     }
551     else     else
552     {     {
553      m_left=DataLazy_ptr(new DataLazy(left));          m_left=DataLazy_ptr(new DataLazy(left));
554    LAZYDEBUG(cout << "Left " << left.get() << " wrapped " << m_left->m_id.get() << endl;)
555     }     }
556     if (right->isLazy())     if (right->isLazy())
557     {     {
558      m_right=dynamic_pointer_cast<DataLazy>(right);          m_right=dynamic_pointer_cast<DataLazy>(right);
559    LAZYDEBUG(cout << "Right is " << m_right->toString() << endl;)
560     }     }
561     else     else
562     {     {
563      m_right=DataLazy_ptr(new DataLazy(right));          m_right=DataLazy_ptr(new DataLazy(right));
564    LAZYDEBUG(cout << "Right " << right.get() << " wrapped " << m_right->m_id.get() << endl;)
565     }     }
566     char lt=m_left->m_readytype;     char lt=m_left->m_readytype;
567     char rt=m_right->m_readytype;     char rt=m_right->m_readytype;
568     if (lt=='E' || rt=='E')     if (lt=='E' || rt=='E')
569     {     {
570      m_readytype='E';          m_readytype='E';
571     }     }
572     else if (lt=='T' || rt=='T')     else if (lt=='T' || rt=='T')
573     {     {
574      m_readytype='T';          m_readytype='T';
575     }     }
576     else     else
577     {     {
578      m_readytype='C';          m_readytype='C';
579     }     }
580     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
581     m_maxsamplesize=max(max(m_samplesize,m_right->getMaxSampleSize()),m_left->getMaxSampleSize());       m_children=m_left->m_children+m_right->m_children+2;
582     m_buffsRequired=calcBuffs(m_left, m_right,m_op);     m_height=max(m_left->m_height,m_right->m_height)+1;
583       LazyNodeSetup();
584       SIZELIMIT
585  LAZYDEBUG(cout << "(3)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(3)Lazy created with " << m_samplesize << endl;)
586  }  }
587    
588  DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op, int axis_offset, int transpose)  DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op, int axis_offset, int transpose)
589      : parent(resultFS(left,right,op), GTPShape(left,right, axis_offset, transpose, m_SL,m_SM, m_SR)),          : parent(resultFS(left,right,op), GTPShape(left,right, axis_offset, transpose, m_SL,m_SM, m_SR)),
590      m_op(op),          m_op(op),
591      m_axis_offset(axis_offset),          m_axis_offset(axis_offset),
592      m_transpose(transpose)          m_transpose(transpose)
593  {  {
594     if ((getOpgroup(op)!=G_TENSORPROD))     if ((getOpgroup(op)!=G_TENSORPROD))
595     {     {
596      throw DataException("Programmer error - constructor DataLazy(left, right, op, ax, tr) will only process BINARY operations which require parameters.");          throw DataException("Programmer error - constructor DataLazy(left, right, op, ax, tr) will only process BINARY operations which require parameters.");
597     }     }
598     if ((transpose>2) || (transpose<0))     if ((transpose>2) || (transpose<0))
599     {     {
600      throw DataException("DataLazy GeneralTensorProduct constructor: Error - transpose should be 0, 1 or 2");          throw DataException("DataLazy GeneralTensorProduct constructor: Error - transpose should be 0, 1 or 2");
601     }     }
602     if (getFunctionSpace()!=left->getFunctionSpace())    // left needs to be interpolated     if (getFunctionSpace()!=left->getFunctionSpace())    // left needs to be interpolated
603     {     {
604      FunctionSpace fs=getFunctionSpace();          FunctionSpace fs=getFunctionSpace();
605      Data ltemp(left);          Data ltemp(left);
606      Data tmp(ltemp,fs);          Data tmp(ltemp,fs);
607      left=tmp.borrowDataPtr();          left=tmp.borrowDataPtr();
608     }     }
609     if (getFunctionSpace()!=right->getFunctionSpace())   // right needs to be interpolated     if (getFunctionSpace()!=right->getFunctionSpace())   // right needs to be interpolated
610     {     {
611      Data tmp(Data(right),getFunctionSpace());          Data tmp(Data(right),getFunctionSpace());
612      right=tmp.borrowDataPtr();          right=tmp.borrowDataPtr();
613     }     }
614     left->operandCheck(*right);  //    left->operandCheck(*right);
615    
616     if (left->isLazy())          // the children need to be DataLazy. Wrap them in IDENTITY if required     if (left->isLazy())                  // the children need to be DataLazy. Wrap them in IDENTITY if required
617     {     {
618      m_left=dynamic_pointer_cast<DataLazy>(left);          m_left=dynamic_pointer_cast<DataLazy>(left);
619     }     }
620     else     else
621     {     {
622      m_left=DataLazy_ptr(new DataLazy(left));          m_left=DataLazy_ptr(new DataLazy(left));
623     }     }
624     if (right->isLazy())     if (right->isLazy())
625     {     {
626      m_right=dynamic_pointer_cast<DataLazy>(right);          m_right=dynamic_pointer_cast<DataLazy>(right);
627     }     }
628     else     else
629     {     {
630      m_right=DataLazy_ptr(new DataLazy(right));          m_right=DataLazy_ptr(new DataLazy(right));
631     }     }
632     char lt=m_left->m_readytype;     char lt=m_left->m_readytype;
633     char rt=m_right->m_readytype;     char rt=m_right->m_readytype;
634     if (lt=='E' || rt=='E')     if (lt=='E' || rt=='E')
635     {     {
636      m_readytype='E';          m_readytype='E';
637     }     }
638     else if (lt=='T' || rt=='T')     else if (lt=='T' || rt=='T')
639     {     {
640      m_readytype='T';          m_readytype='T';
641     }     }
642     else     else
643     {     {
644      m_readytype='C';          m_readytype='C';
645     }     }
646     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
647     m_maxsamplesize=max(max(m_samplesize,m_right->getMaxSampleSize()),m_left->getMaxSampleSize());       m_children=m_left->m_children+m_right->m_children+2;
648     m_buffsRequired=calcBuffs(m_left, m_right,m_op);     m_height=max(m_left->m_height,m_right->m_height)+1;
649       LazyNodeSetup();
650       SIZELIMIT
651  LAZYDEBUG(cout << "(4)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(4)Lazy created with " << m_samplesize << endl;)
652  }  }
653    
654    
655  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, int axis_offset)  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, int axis_offset)
656      : parent(left->getFunctionSpace(), resultShape(left,op)),          : parent(left->getFunctionSpace(), resultShape(left,op, axis_offset)),
657      m_op(op),          m_op(op),
658      m_axis_offset(axis_offset),          m_axis_offset(axis_offset),
659      m_transpose(0),          m_transpose(0),
660      m_tol(0)          m_tol(0)
661  {  {
662     if ((getOpgroup(op)!=G_NP1OUT_P))     if ((getOpgroup(op)!=G_NP1OUT_P))
663     {     {
664      throw DataException("Programmer error - constructor DataLazy(left, op, ax) will only process UNARY operations which require parameters.");          throw DataException("Programmer error - constructor DataLazy(left, op, ax) will only process UNARY operations which require parameters.");
665     }     }
666     DataLazy_ptr lleft;     DataLazy_ptr lleft;
667     if (!left->isLazy())     if (!left->isLazy())
668     {     {
669      lleft=DataLazy_ptr(new DataLazy(left));          lleft=DataLazy_ptr(new DataLazy(left));
670     }     }
671     else     else
672     {     {
673      lleft=dynamic_pointer_cast<DataLazy>(left);          lleft=dynamic_pointer_cast<DataLazy>(left);
674     }     }
675     m_readytype=lleft->m_readytype;     m_readytype=lleft->m_readytype;
676     m_left=lleft;     m_left=lleft;
    m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point  
677     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
678     m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());     m_children=m_left->m_children+1;
679       m_height=m_left->m_height+1;
680       LazyNodeSetup();
681       SIZELIMIT
682  LAZYDEBUG(cout << "(5)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(5)Lazy created with " << m_samplesize << endl;)
683  }  }
684    
685  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, double tol)  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, double tol)
686      : parent(left->getFunctionSpace(), left->getShape()),          : parent(left->getFunctionSpace(), left->getShape()),
687      m_op(op),          m_op(op),
688      m_axis_offset(0),          m_axis_offset(0),
689      m_transpose(0),          m_transpose(0),
690      m_tol(tol)          m_tol(tol)
691  {  {
692     if ((getOpgroup(op)!=G_UNARY_P))     if ((getOpgroup(op)!=G_UNARY_P))
693     {     {
694      throw DataException("Programmer error - constructor DataLazy(left, op, tol) will only process UNARY operations which require parameters.");          throw DataException("Programmer error - constructor DataLazy(left, op, tol) will only process UNARY operations which require parameters.");
695     }     }
696     DataLazy_ptr lleft;     DataLazy_ptr lleft;
697     if (!left->isLazy())     if (!left->isLazy())
698     {     {
699      lleft=DataLazy_ptr(new DataLazy(left));          lleft=DataLazy_ptr(new DataLazy(left));
700     }     }
701     else     else
702     {     {
703      lleft=dynamic_pointer_cast<DataLazy>(left);          lleft=dynamic_pointer_cast<DataLazy>(left);
704     }     }
705     m_readytype=lleft->m_readytype;     m_readytype=lleft->m_readytype;
706     m_left=lleft;     m_left=lleft;
    m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point  
707     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
708     m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());     m_children=m_left->m_children+1;
709       m_height=m_left->m_height+1;
710       LazyNodeSetup();
711       SIZELIMIT
712  LAZYDEBUG(cout << "(6)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(6)Lazy created with " << m_samplesize << endl;)
713  }  }
714    
715  DataLazy::~DataLazy()  
716    DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, const int axis0, const int axis1)
717            : parent(left->getFunctionSpace(), SwapShape(left,axis0,axis1)),
718            m_op(op),
719            m_axis_offset(axis0),
720            m_transpose(axis1),
721            m_tol(0)
722  {  {
723       if ((getOpgroup(op)!=G_NP1OUT_2P))
724       {
725            throw DataException("Programmer error - constructor DataLazy(left, op, tol) will only process UNARY operations which require two integer parameters.");
726       }
727       DataLazy_ptr lleft;
728       if (!left->isLazy())
729       {
730            lleft=DataLazy_ptr(new DataLazy(left));
731       }
732       else
733       {
734            lleft=dynamic_pointer_cast<DataLazy>(left);
735       }
736       m_readytype=lleft->m_readytype;
737       m_left=lleft;
738       m_samplesize=getNumDPPSample()*getNoValues();
739       m_children=m_left->m_children+1;
740       m_height=m_left->m_height+1;
741       LazyNodeSetup();
742       SIZELIMIT
743    LAZYDEBUG(cout << "(7)Lazy created with " << m_samplesize << endl;)
744  }  }
745    
746    
747  int  namespace
 DataLazy::getBuffsRequired() const  
748  {  {
749      return m_buffsRequired;  
750        inline int max3(int a, int b, int c)
751        {
752            int t=(a>b?a:b);
753            return (t>c?t:c);
754    
755        }
756  }  }
757    
758    DataLazy::DataLazy(DataAbstract_ptr mask, DataAbstract_ptr left, DataAbstract_ptr right/*, double tol*/)
759            : parent(left->getFunctionSpace(), left->getShape()),
760            m_op(CONDEVAL),
761            m_axis_offset(0),
762            m_transpose(0),
763            m_tol(0)
764    {
765    
766  size_t     DataLazy_ptr lmask;
767  DataLazy::getMaxSampleSize() const     DataLazy_ptr lleft;
768       DataLazy_ptr lright;
769       if (!mask->isLazy())
770       {
771            lmask=DataLazy_ptr(new DataLazy(mask));
772       }
773       else
774       {
775            lmask=dynamic_pointer_cast<DataLazy>(mask);
776       }
777       if (!left->isLazy())
778       {
779            lleft=DataLazy_ptr(new DataLazy(left));
780       }
781       else
782       {
783            lleft=dynamic_pointer_cast<DataLazy>(left);
784       }
785       if (!right->isLazy())
786       {
787            lright=DataLazy_ptr(new DataLazy(right));
788       }
789       else
790       {
791            lright=dynamic_pointer_cast<DataLazy>(right);
792       }
793       m_readytype=lmask->m_readytype;
794       if ((lleft->m_readytype!=lright->m_readytype) || (lmask->m_readytype!=lleft->m_readytype))
795       {
796            throw DataException("Programmer Error - condEval arguments must have the same readytype");
797       }
798       m_left=lleft;
799       m_right=lright;
800       m_mask=lmask;
801       m_samplesize=getNumDPPSample()*getNoValues();
802       m_children=m_left->m_children+m_right->m_children+m_mask->m_children+1;
803       m_height=max3(m_left->m_height,m_right->m_height,m_mask->m_height)+1;
804       LazyNodeSetup();
805       SIZELIMIT
806    LAZYDEBUG(cout << "(8)Lazy created with " << m_samplesize << endl;)
807    }
808    
809    
810    
811    DataLazy::~DataLazy()
812  {  {
813      return m_maxsamplesize;     delete[] m_sampleids;
814  }  }
815    
816    
817  /*  /*
818    \brief Evaluates the expression using methods on Data.    \brief Evaluates the expression using methods on Data.
819    This does the work for the collapse method.    This does the work for the collapse method.
820    For reasons of efficiency do not call this method on DataExpanded nodes.    For reasons of efficiency do not call this method on DataExpanded nodes.
821  */  */
822  DataReady_ptr  DataReady_ptr
823  DataLazy::collapseToReady()  DataLazy::collapseToReady() const
824  {  {
825    if (m_readytype=='E')    if (m_readytype=='E')
826    { // this is more an efficiency concern than anything else    {     // this is more an efficiency concern than anything else
827      throw DataException("Programmer Error - do not use collapse on Expanded data.");      throw DataException("Programmer Error - do not use collapse on Expanded data.");
828    }    }
829    if (m_op==IDENTITY)    if (m_op==IDENTITY)
# Line 606  DataLazy::collapseToReady() Line 841  DataLazy::collapseToReady()
841    switch(m_op)    switch(m_op)
842    {    {
843      case ADD:      case ADD:
844      result=left+right;          result=left+right;
845      break;          break;
846      case SUB:            case SUB:          
847      result=left-right;          result=left-right;
848      break;          break;
849      case MUL:            case MUL:          
850      result=left*right;          result=left*right;
851      break;          break;
852      case DIV:            case DIV:          
853      result=left/right;          result=left/right;
854      break;          break;
855      case SIN:      case SIN:
856      result=left.sin();            result=left.sin();      
857      break;          break;
858      case COS:      case COS:
859      result=left.cos();          result=left.cos();
860      break;          break;
861      case TAN:      case TAN:
862      result=left.tan();          result=left.tan();
863      break;          break;
864      case ASIN:      case ASIN:
865      result=left.asin();          result=left.asin();
866      break;          break;
867      case ACOS:      case ACOS:
868      result=left.acos();          result=left.acos();
869      break;          break;
870      case ATAN:      case ATAN:
871      result=left.atan();          result=left.atan();
872      break;          break;
873      case SINH:      case SINH:
874      result=left.sinh();          result=left.sinh();
875      break;          break;
876      case COSH:      case COSH:
877      result=left.cosh();          result=left.cosh();
878      break;          break;
879      case TANH:      case TANH:
880      result=left.tanh();          result=left.tanh();
881      break;          break;
882      case ERF:      case ERF:
883      result=left.erf();          result=left.erf();
884      break;          break;
885     case ASINH:     case ASINH:
886      result=left.asinh();          result=left.asinh();
887      break;          break;
888     case ACOSH:     case ACOSH:
889      result=left.acosh();          result=left.acosh();
890      break;          break;
891     case ATANH:     case ATANH:
892      result=left.atanh();          result=left.atanh();
893      break;          break;
894      case LOG10:      case LOG10:
895      result=left.log10();          result=left.log10();
896      break;          break;
897      case LOG:      case LOG:
898      result=left.log();          result=left.log();
899      break;          break;
900      case SIGN:      case SIGN:
901      result=left.sign();          result=left.sign();
902      break;          break;
903      case ABS:      case ABS:
904      result=left.abs();          result=left.abs();
905      break;          break;
906      case NEG:      case NEG:
907      result=left.neg();          result=left.neg();
908      break;          break;
909      case POS:      case POS:
910      // it doesn't mean anything for delayed.          // it doesn't mean anything for delayed.
911      // it will just trigger a deep copy of the lazy object          // it will just trigger a deep copy of the lazy object
912      throw DataException("Programmer error - POS not supported for lazy data.");          throw DataException("Programmer error - POS not supported for lazy data.");
913      break;          break;
914      case EXP:      case EXP:
915      result=left.exp();          result=left.exp();
916      break;          break;
917      case SQRT:      case SQRT:
918      result=left.sqrt();          result=left.sqrt();
919      break;          break;
920      case RECIP:      case RECIP:
921      result=left.oneOver();          result=left.oneOver();
922      break;          break;
923      case GZ:      case GZ:
924      result=left.wherePositive();          result=left.wherePositive();
925      break;          break;
926      case LZ:      case LZ:
927      result=left.whereNegative();          result=left.whereNegative();
928      break;          break;
929      case GEZ:      case GEZ:
930      result=left.whereNonNegative();          result=left.whereNonNegative();
931      break;          break;
932      case LEZ:      case LEZ:
933      result=left.whereNonPositive();          result=left.whereNonPositive();
934      break;          break;
935      case NEZ:      case NEZ:
936      result=left.whereNonZero(m_tol);          result=left.whereNonZero(m_tol);
937      break;          break;
938      case EZ:      case EZ:
939      result=left.whereZero(m_tol);          result=left.whereZero(m_tol);
940      break;          break;
941      case SYM:      case SYM:
942      result=left.symmetric();          result=left.symmetric();
943      break;          break;
944      case NSYM:      case NSYM:
945      result=left.nonsymmetric();          result=left.antisymmetric();
946      break;          break;
947      case PROD:      case PROD:
948      result=C_GeneralTensorProduct(left,right,m_axis_offset, m_transpose);          result=C_GeneralTensorProduct(left,right,m_axis_offset, m_transpose);
949      break;          break;
950      case TRANS:      case TRANS:
951      result=left.transpose(m_axis_offset);          result=left.transpose(m_axis_offset);
952      break;          break;
953      case TRACE:      case TRACE:
954      result=left.trace(m_axis_offset);          result=left.trace(m_axis_offset);
955      break;          break;
956        case SWAP:
957            result=left.swapaxes(m_axis_offset, m_transpose);
958            break;
959        case MINVAL:
960            result=left.minval();
961            break;
962        case MAXVAL:
963            result=left.minval();
964            break;
965      default:      default:
966      throw DataException("Programmer error - collapseToReady does not know how to resolve operator "+opToString(m_op)+".");          throw DataException("Programmer error - collapseToReady does not know how to resolve operator "+opToString(m_op)+".");
967    }    }
968    return result.borrowReadyPtr();    return result.borrowReadyPtr();
969  }  }
# Line 731  DataLazy::collapseToReady() Line 975  DataLazy::collapseToReady()
975     the purpose of using DataLazy in the first place).     the purpose of using DataLazy in the first place).
976  */  */
977  void  void
978  DataLazy::collapse()  DataLazy::collapse() const
979  {  {
980    if (m_op==IDENTITY)    if (m_op==IDENTITY)
981    {    {
982      return;          return;
983    }    }
984    if (m_readytype=='E')    if (m_readytype=='E')
985    { // this is more an efficiency concern than anything else    {     // this is more an efficiency concern than anything else
986      throw DataException("Programmer Error - do not use collapse on Expanded data.");      throw DataException("Programmer Error - do not use collapse on Expanded data.");
987    }    }
988    m_id=collapseToReady();    m_id=collapseToReady();
989    m_op=IDENTITY;    m_op=IDENTITY;
990  }  }
991    
992  /*  // The result will be stored in m_samples
993    \brief Compute the value of the expression (unary operation) for the given sample.  // The return value is a pointer to the DataVector, offset is the offset within the return value
994    \return Vector which stores the value of the subexpression for the given sample.  const DataTypes::RealVectorType*
995    \param v A vector to store intermediate results.  DataLazy::resolveNodeSample(int tid, int sampleNo, size_t& roffset) const
996    \param offset Index in v to begin storing results.  {
997    \param sampleNo Sample number to evaluate.  LAZYDEBUG(cout << "Resolve sample " << toString() << endl;)
998    \param roffset (output parameter) the offset in the return vector where the result begins.          // collapse so we have a 'E' node or an IDENTITY for some other type
999      if (m_readytype!='E' && m_op!=IDENTITY)
1000    The return value will be an existing vector so do not deallocate it.    {
1001    If the result is stored in v it should be stored at the offset given.          collapse();
1002    Everything from offset to the end of v should be considered available for this method to use.    }
1003  */    if (m_op==IDENTITY)  
1004  DataTypes::ValueType*    {
1005  DataLazy::resolveUnary(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const      const RealVectorType& vec=m_id->getVectorRO();
1006        roffset=m_id->getPointOffset(sampleNo, 0);
1007    #ifdef LAZY_STACK_PROF
1008    int x;
1009    if (&x<stackend[omp_get_thread_num()])
1010  {  {
1011      // we assume that any collapsing has been done before we get here         stackend[omp_get_thread_num()]=&x;
1012      // since we only have one argument we don't need to think about only  }
1013      // processing single points.  #endif
1014        return &(vec);
1015      }
1016      if (m_readytype!='E')
1017      {
1018        throw DataException("Programmer Error - Collapse did not produce an expanded node.");
1019      }
1020      if (m_sampleids[tid]==sampleNo)
1021      {
1022            roffset=tid*m_samplesize;
1023            return &(m_samples);            // sample is already resolved
1024      }
1025      m_sampleids[tid]=sampleNo;
1026    
1027      switch (getOpgroup(m_op))
1028      {
1029      case G_UNARY:
1030      case G_UNARY_P: return resolveNodeUnary(tid, sampleNo, roffset);
1031      case G_BINARY: return resolveNodeBinary(tid, sampleNo, roffset);
1032      case G_NP1OUT: return resolveNodeNP1OUT(tid, sampleNo, roffset);
1033      case G_NP1OUT_P: return resolveNodeNP1OUT_P(tid, sampleNo, roffset);
1034      case G_TENSORPROD: return resolveNodeTProd(tid, sampleNo, roffset);
1035      case G_NP1OUT_2P: return resolveNodeNP1OUT_2P(tid, sampleNo, roffset);
1036      case G_REDUCTION: return resolveNodeReduction(tid, sampleNo, roffset);
1037      case G_CONDEVAL: return resolveNodeCondEval(tid, sampleNo, roffset);
1038      default:
1039        throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");
1040      }
1041    }
1042    
1043    const DataTypes::RealVectorType*
1044    DataLazy::resolveNodeUnary(int tid, int sampleNo, size_t& roffset) const
1045    {
1046            // we assume that any collapsing has been done before we get here
1047            // since we only have one argument we don't need to think about only
1048            // processing single points.
1049            // we will also know we won't get identity nodes
1050    if (m_readytype!='E')    if (m_readytype!='E')
1051    {    {
1052      throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");      throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");
1053    }    }
1054    const ValueType* vleft=m_left->resolveSample(v,offset,sampleNo,roffset);    if (m_op==IDENTITY)
1055    const double* left=&((*vleft)[roffset]);    {
1056    double* result=&(v[offset]);      throw DataException("Programmer error - resolveNodeUnary should not be called on identity nodes.");
1057    roffset=offset;    }
1058      const DataTypes::RealVectorType* leftres=m_left->resolveNodeSample(tid, sampleNo, roffset);
1059      const double* left=&((*leftres)[roffset]);
1060      roffset=m_samplesize*tid;
1061      double* result=&(m_samples[roffset]);
1062      escript::ESFunction operation=SINF;
1063    switch (m_op)    switch (m_op)
1064    {    {
1065      case SIN:        case SIN:
1066      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sin);      operation=SINF;
1067      break;      break;
1068      case COS:      case COS:
1069      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::cos);          operation=COSF;
1070      break;      break;
1071      case TAN:      case TAN:
1072      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::tan);          operation=TANF;
1073      break;      break;
1074      case ASIN:      case ASIN:
1075      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::asin);          operation=ASINF;
1076      break;      break;
1077      case ACOS:      case ACOS:
1078      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::acos);          operation=ACOSF;
1079      break;      break;
1080      case ATAN:      case ATAN:
1081      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::atan);          operation=ATANF;
1082      break;      break;
1083      case SINH:      case SINH:
1084      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sinh);          operation=SINHF;
1085      break;      break;
1086      case COSH:      case COSH:
1087      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::cosh);          operation=COSHF;
1088      break;      break;
1089      case TANH:      case TANH:
1090      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::tanh);          operation=TANHF;
1091      break;      break;
1092      case ERF:      case ERF:
1093  #if defined (_WIN32) && !defined(__INTEL_COMPILER)          operation=ERFF;
     throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::erf);  
1094      break;      break;
 #endif  
1095     case ASINH:     case ASINH:
1096  #if defined (_WIN32) && !defined(__INTEL_COMPILER)          operation=ASINHF;
     tensor_unary_operation(m_samplesize, left, result, escript::asinh_substitute);  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::asinh);  
 #endif    
1097      break;      break;
1098     case ACOSH:     case ACOSH:
1099  #if defined (_WIN32) && !defined(__INTEL_COMPILER)          operation=ACOSHF;
     tensor_unary_operation(m_samplesize, left, result, escript::acosh_substitute);  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::acosh);  
 #endif    
1100      break;      break;
1101     case ATANH:     case ATANH:
1102  #if defined (_WIN32) && !defined(__INTEL_COMPILER)          operation=ATANHF;
     tensor_unary_operation(m_samplesize, left, result, escript::atanh_substitute);  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::atanh);  
 #endif    
1103      break;      break;
1104      case LOG10:      case LOG10:
1105      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::log10);          operation=LOG10F;
1106      break;      break;
1107      case LOG:      case LOG:
1108      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::log);          operation=LOGF;
1109      break;      break;
1110      case SIGN:      case SIGN:
1111      tensor_unary_operation(m_samplesize, left, result, escript::fsign);          operation=SIGNF;
1112      break;      break;
1113      case ABS:      case ABS:
1114      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::fabs);          operation=ABSF;
1115      break;      break;
1116      case NEG:      case NEG:
1117      tensor_unary_operation(m_samplesize, left, result, negate<double>());          operation=NEGF;
1118      break;      break;
1119      case POS:      case POS:
1120      // it doesn't mean anything for delayed.          // it doesn't mean anything for delayed.
1121      // it will just trigger a deep copy of the lazy object          // it will just trigger a deep copy of the lazy object
1122      throw DataException("Programmer error - POS not supported for lazy data.");          throw DataException("Programmer error - POS not supported for lazy data.");
1123      break;          break;
1124      case EXP:      case EXP:
1125      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::exp);          operation=EXPF;
1126      break;      break;
1127      case SQRT:      case SQRT:
1128      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sqrt);          operation=SQRTF;
1129      break;      break;
1130      case RECIP:      case RECIP:
1131      tensor_unary_operation(m_samplesize, left, result, bind1st(divides<double>(),1.));          operation=INVF;
1132      break;      break;
1133      case GZ:      case GZ:
1134      tensor_unary_operation(m_samplesize, left, result, bind2nd(greater<double>(),0.0));          operation=GTZEROF;
1135      break;      break;
1136      case LZ:      case LZ:
1137      tensor_unary_operation(m_samplesize, left, result, bind2nd(less<double>(),0.0));          operation=LTZEROF;
1138      break;      break;
1139      case GEZ:      case GEZ:
1140      tensor_unary_operation(m_samplesize, left, result, bind2nd(greater_equal<double>(),0.0));          operation=GEZEROF;
1141      break;      break;
1142      case LEZ:      case LEZ:
1143      tensor_unary_operation(m_samplesize, left, result, bind2nd(less_equal<double>(),0.0));          operation=LEZEROF;
1144      break;      break;
1145  // There are actually G_UNARY_P but I don't see a compelling reason to treat them differently  // There are actually G_UNARY_P but I don't see a compelling reason to treat them differently
1146      case NEZ:      case NEZ:
1147      tensor_unary_operation(m_samplesize, left, result, bind2nd(AbsGT(),m_tol));          operation=NEQZEROF;
1148      break;      break;
1149      case EZ:      case EZ:
1150      tensor_unary_operation(m_samplesize, left, result, bind2nd(AbsLTE(),m_tol));          operation=EQZEROF;
1151      break;      break;
   
1152      default:      default:
1153      throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");          throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");
1154    }    }
1155    return &v;    tensor_unary_array_operation(m_samplesize,
1156                                 left,
1157                                 result,
1158                                 operation,
1159                                 m_tol);  
1160      return &(m_samples);
1161  }  }
1162    
1163    
1164    const DataTypes::RealVectorType*
1165    DataLazy::resolveNodeReduction(int tid, int sampleNo, size_t& roffset) const
1166    {
1167            // we assume that any collapsing has been done before we get here
1168            // since we only have one argument we don't need to think about only
1169            // processing single points.
1170            // we will also know we won't get identity nodes
1171      if (m_readytype!='E')
1172      {
1173        throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");
1174      }
1175      if (m_op==IDENTITY)
1176      {
1177        throw DataException("Programmer error - resolveNodeUnary should not be called on identity nodes.");
1178      }
1179      size_t loffset=0;
1180      const DataTypes::RealVectorType* leftres=m_left->resolveNodeSample(tid, sampleNo, loffset);
1181    
1182      roffset=m_samplesize*tid;
1183      unsigned int ndpps=getNumDPPSample();
1184      unsigned int psize=DataTypes::noValues(m_left->getShape());
1185      double* result=&(m_samples[roffset]);
1186      switch (m_op)
1187      {
1188        case MINVAL:
1189            {
1190              for (unsigned int z=0;z<ndpps;++z)
1191              {
1192                FMin op;
1193                *result=escript::reductionOpVector(*leftres, m_left->getShape(), loffset, op, numeric_limits<double>::max());
1194                loffset+=psize;
1195                result++;
1196              }
1197            }
1198            break;
1199        case MAXVAL:
1200            {
1201              for (unsigned int z=0;z<ndpps;++z)
1202              {
1203              FMax op;
1204              *result=escript::reductionOpVector(*leftres, m_left->getShape(), loffset, op, numeric_limits<double>::max()*-1);
1205              loffset+=psize;
1206              result++;
1207              }
1208            }
1209            break;
1210        default:
1211            throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");
1212      }
1213      return &(m_samples);
1214    }
1215    
1216    const DataTypes::RealVectorType*
1217    DataLazy::resolveNodeNP1OUT(int tid, int sampleNo, size_t& roffset) const
 /*  
   \brief Compute the value of the expression (unary operation) for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
 DataTypes::ValueType*  
 DataLazy::resolveNP1OUT(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const  
1218  {  {
1219      // we assume that any collapsing has been done before we get here          // we assume that any collapsing has been done before we get here
1220      // since we only have one argument we don't need to think about only          // since we only have one argument we don't need to think about only
1221      // processing single points.          // processing single points.
1222    if (m_readytype!='E')    if (m_readytype!='E')
1223    {    {
1224      throw DataException("Programmer error - resolveNP1OUT should only be called on expanded Data.");      throw DataException("Programmer error - resolveNodeNP1OUT should only be called on expanded Data.");
1225    }    }
1226      // since we can't write the result over the input, we need a result offset further along    if (m_op==IDENTITY)
1227    size_t subroffset=roffset+m_samplesize;    {
1228    const ValueType* vleft=m_left->resolveSample(v,offset,sampleNo,subroffset);      throw DataException("Programmer error - resolveNodeNP1OUT should not be called on identity nodes.");
1229    roffset=offset;    }
1230      size_t subroffset;
1231      const RealVectorType* leftres=m_left->resolveNodeSample(tid, sampleNo, subroffset);
1232      roffset=m_samplesize*tid;
1233      size_t loop=0;
1234      size_t numsteps=(m_readytype=='E')?getNumDPPSample():1;
1235      size_t step=getNoValues();
1236      size_t offset=roffset;
1237    switch (m_op)    switch (m_op)
1238    {    {
1239      case SYM:      case SYM:
1240      DataMaths::symmetric(*vleft,m_left->getShape(),subroffset, v, getShape(), offset);          for (loop=0;loop<numsteps;++loop)
1241      break;          {
1242                escript::symmetric(*leftres,m_left->getShape(),subroffset, m_samples, getShape(), offset);
1243                subroffset+=step;
1244                offset+=step;
1245            }
1246            break;
1247      case NSYM:      case NSYM:
1248      DataMaths::nonsymmetric(*vleft,m_left->getShape(),subroffset, v, getShape(), offset);          for (loop=0;loop<numsteps;++loop)
1249      break;          {
1250                escript::antisymmetric(*leftres,m_left->getShape(),subroffset, m_samples, getShape(), offset);
1251                subroffset+=step;
1252                offset+=step;
1253            }
1254            break;
1255      default:      default:
1256      throw DataException("Programmer error - resolveNP1OUT can not resolve operator "+opToString(m_op)+".");          throw DataException("Programmer error - resolveNP1OUT can not resolve operator "+opToString(m_op)+".");
1257    }    }
1258    return &v;    return &m_samples;
1259  }  }
1260    
1261  /*  const DataTypes::RealVectorType*
1262    \brief Compute the value of the expression (unary operation) for the given sample.  DataLazy::resolveNodeNP1OUT_P(int tid, int sampleNo, size_t& roffset) const
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
 DataTypes::ValueType*  
 DataLazy::resolveNP1OUT_P(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const  
1263  {  {
1264      // we assume that any collapsing has been done before we get here          // we assume that any collapsing has been done before we get here
1265      // since we only have one argument we don't need to think about only          // since we only have one argument we don't need to think about only
1266      // processing single points.          // processing single points.
1267    if (m_readytype!='E')    if (m_readytype!='E')
1268    {    {
1269      throw DataException("Programmer error - resolveNP1OUT_P should only be called on expanded Data.");      throw DataException("Programmer error - resolveNodeNP1OUT_P should only be called on expanded Data.");
1270    }    }
1271      // since we can't write the result over the input, we need a result offset further along    if (m_op==IDENTITY)
1272    size_t subroffset=roffset+m_samplesize;    {
1273    const ValueType* vleft=m_left->resolveSample(v,offset,sampleNo,subroffset);      throw DataException("Programmer error - resolveNodeNP1OUT_P should not be called on identity nodes.");
1274    roffset=offset;    }
1275      size_t subroffset;
1276      size_t offset;
1277      const RealVectorType* leftres=m_left->resolveNodeSample(tid, sampleNo, subroffset);
1278      roffset=m_samplesize*tid;
1279      offset=roffset;
1280      size_t loop=0;
1281      size_t numsteps=(m_readytype=='E')?getNumDPPSample():1;
1282      size_t outstep=getNoValues();
1283      size_t instep=m_left->getNoValues();
1284    switch (m_op)    switch (m_op)
1285    {    {
1286      case TRACE:      case TRACE:
1287           DataMaths::trace(*vleft,m_left->getShape(),subroffset, v,getShape(),offset,m_axis_offset);          for (loop=0;loop<numsteps;++loop)
1288      break;          {
1289                escript::trace(*leftres,m_left->getShape(),subroffset, m_samples ,getShape(),offset,m_axis_offset);
1290                subroffset+=instep;
1291                offset+=outstep;
1292            }
1293            break;
1294      case TRANS:      case TRANS:
1295           DataMaths::transpose(*vleft,m_left->getShape(),subroffset, v,getShape(),offset,m_axis_offset);          for (loop=0;loop<numsteps;++loop)
1296      break;          {
1297                escript::transpose(*leftres,m_left->getShape(),subroffset, m_samples, getShape(),offset,m_axis_offset);
1298                subroffset+=instep;
1299                offset+=outstep;
1300            }
1301            break;
1302      default:      default:
1303      throw DataException("Programmer error - resolveNP1OUTP can not resolve operator "+opToString(m_op)+".");          throw DataException("Programmer error - resolveNP1OUTP can not resolve operator "+opToString(m_op)+".");
1304    }    }
1305    return &v;    return &m_samples;
1306  }  }
1307    
1308    
1309  #define PROC_OP(TYPE,X)                               \  const DataTypes::RealVectorType*
1310      for (int i=0;i<steps;++i,resultp+=resultStep) \  DataLazy::resolveNodeNP1OUT_2P(int tid, int sampleNo, size_t& roffset) const
1311      { \  {
1312         tensor_binary_operation< TYPE >(chunksize, &((*left)[lroffset]), &((*right)[rroffset]), resultp, X); \    if (m_readytype!='E')
1313         lroffset+=leftStep; \    {
1314         rroffset+=rightStep; \      throw DataException("Programmer error - resolveNodeNP1OUT_2P should only be called on expanded Data.");
1315      }    }
1316      if (m_op==IDENTITY)
1317      {
1318        throw DataException("Programmer error - resolveNodeNP1OUT_2P should not be called on identity nodes.");
1319      }
1320      size_t subroffset;
1321      size_t offset;
1322      const RealVectorType* leftres=m_left->resolveNodeSample(tid, sampleNo, subroffset);
1323      roffset=m_samplesize*tid;
1324      offset=roffset;
1325      size_t loop=0;
1326      size_t numsteps=(m_readytype=='E')?getNumDPPSample():1;
1327      size_t outstep=getNoValues();
1328      size_t instep=m_left->getNoValues();
1329      switch (m_op)
1330      {
1331        case SWAP:
1332            for (loop=0;loop<numsteps;++loop)
1333            {
1334                escript::swapaxes(*leftres,m_left->getShape(),subroffset, m_samples, getShape(),offset, m_axis_offset, m_transpose);
1335                subroffset+=instep;
1336                offset+=outstep;
1337            }
1338            break;
1339        default:
1340            throw DataException("Programmer error - resolveNodeNP1OUT2P can not resolve operator "+opToString(m_op)+".");
1341      }
1342      return &m_samples;
1343    }
1344    
1345    const DataTypes::RealVectorType*
1346    DataLazy::resolveNodeCondEval(int tid, int sampleNo, size_t& roffset) const
1347    {
1348      if (m_readytype!='E')
1349      {
1350        throw DataException("Programmer error - resolveNodeCondEval should only be called on expanded Data.");
1351      }
1352      if (m_op!=CONDEVAL)
1353      {
1354        throw DataException("Programmer error - resolveNodeCondEval should only be called on CONDEVAL nodes.");
1355      }
1356      size_t subroffset;
1357    
1358      const RealVectorType* maskres=m_mask->resolveNodeSample(tid, sampleNo, subroffset);
1359      const RealVectorType* srcres=0;
1360      if ((*maskres)[subroffset]>0)
1361      {
1362            srcres=m_left->resolveNodeSample(tid, sampleNo, subroffset);
1363      }
1364      else
1365      {
1366            srcres=m_right->resolveNodeSample(tid, sampleNo, subroffset);
1367      }
1368    
1369      // Now we need to copy the result
1370    
1371      roffset=m_samplesize*tid;
1372      for (int i=0;i<m_samplesize;++i)
1373      {
1374            m_samples[roffset+i]=(*srcres)[subroffset+i];  
1375      }
1376    
1377      return &m_samples;
1378    }
1379    
 /*  
   \brief Compute the value of the expression (binary operation) for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
1380  // This method assumes that any subexpressions which evaluate to Constant or Tagged Data  // This method assumes that any subexpressions which evaluate to Constant or Tagged Data
1381  // have already been collapsed to IDENTITY. So we must have at least one expanded child.  // have already been collapsed to IDENTITY. So we must have at least one expanded child.
1382  // If both children are expanded, then we can process them in a single operation (we treat  // If both children are expanded, then we can process them in a single operation (we treat
# Line 998  DataLazy::resolveNP1OUT_P(ValueType& v, Line 1386  DataLazy::resolveNP1OUT_P(ValueType& v,
1386  // There is an additional complication when scalar operations are considered.  // There is an additional complication when scalar operations are considered.
1387  // For example, 2+Vector.  // For example, 2+Vector.
1388  // In this case each double within the point is treated individually  // In this case each double within the point is treated individually
1389  DataTypes::ValueType*  const DataTypes::RealVectorType*
1390  DataLazy::resolveBinary(ValueType& v,  size_t offset, int sampleNo, size_t& roffset) const  DataLazy::resolveNodeBinary(int tid, int sampleNo, size_t& roffset) const
1391  {  {
1392  LAZYDEBUG(cout << "Resolve binary: " << toString() << endl;)  LAZYDEBUG(cout << "Resolve binary: " << toString() << endl;)
1393    
1394    size_t lroffset=0, rroffset=0;    // offsets in the left and right result vectors    size_t lroffset=0, rroffset=0;        // offsets in the left and right result vectors
1395      // first work out which of the children are expanded          // first work out which of the children are expanded
1396    bool leftExp=(m_left->m_readytype=='E');    bool leftExp=(m_left->m_readytype=='E');
1397    bool rightExp=(m_right->m_readytype=='E');    bool rightExp=(m_right->m_readytype=='E');
1398    if (!leftExp && !rightExp)    if (!leftExp && !rightExp)
1399    {    {
1400      throw DataException("Programmer Error - please use collapse if neither argument has type 'E'.");          throw DataException("Programmer Error - please use collapse if neither argument has type 'E'.");
1401    }    }
1402    bool leftScalar=(m_left->getRank()==0);    bool leftScalar=(m_left->getRank()==0);
1403    bool rightScalar=(m_right->getRank()==0);    bool rightScalar=(m_right->getRank()==0);
1404    bool bigloops=((leftExp && rightExp) || (!leftExp && !rightExp)); // is processing in single step?    if ((m_left->getRank()!=m_right->getRank()) && (!leftScalar && !rightScalar))
1405    int steps=(bigloops?1:getNumDPPSample());    {
1406    size_t chunksize=(bigloops? m_samplesize : getNoValues());    // if bigloops, pretend the whole sample is a datapoint          throw DataException("resolveBinary - ranks of arguments must match unless one of them is scalar.");
1407    if (m_left->getRank()!=m_right->getRank())    // need to deal with scalar * ? ops    }
1408    {    size_t leftsize=m_left->getNoValues();
1409      if (!leftScalar && !rightScalar)    size_t rightsize=m_right->getNoValues();
1410      {    size_t chunksize=1;                   // how many doubles will be processed in one go
1411         throw DataException("resolveBinary - ranks of arguments must match unless one of them is scalar.");    int leftstep=0;               // how far should the left offset advance after each step
1412      }    int rightstep=0;
1413      steps=getNumDPPSample()*max(m_left->getNoValues(),m_right->getNoValues());    int numsteps=0;               // total number of steps for the inner loop
1414      chunksize=1;    // for scalar    int oleftstep=0;      // the o variables refer to the outer loop
1415    }        int orightstep=0;     // The outer loop is only required in cases where there is an extended scalar
1416    int leftStep=((leftExp && (!rightExp || rightScalar))? m_right->getNoValues() : 0);    int onumsteps=1;
1417    int rightStep=((rightExp && (!leftExp || leftScalar))? m_left->getNoValues() : 0);    
1418    int resultStep=max(leftStep,rightStep);   // only one (at most) should be !=0    bool LES=(leftExp && leftScalar);     // Left is an expanded scalar
1419      // Get the values of sub-expressions    bool RES=(rightExp && rightScalar);
1420    const ValueType* left=m_left->resolveSample(v,offset,sampleNo,lroffset);    bool LS=(!leftExp && leftScalar);     // left is a single scalar
1421    const ValueType* right=m_right->resolveSample(v,offset+m_samplesize,sampleNo,rroffset); // Note    bool RS=(!rightExp && rightScalar);
1422      // the right child starts further along.    bool LN=(!leftExp && !leftScalar);    // left is a single non-scalar
1423    double* resultp=&(v[offset]);     // results are stored at the vector offset we recieved    bool RN=(!rightExp && !rightScalar);
1424      bool LEN=(leftExp && !leftScalar);    // left is an expanded non-scalar
1425      bool REN=(rightExp && !rightScalar);
1426    
1427      if ((LES && RES) || (LEN && REN))     // both are Expanded scalars or both are expanded non-scalars
1428      {
1429            chunksize=m_left->getNumDPPSample()*leftsize;
1430            leftstep=0;
1431            rightstep=0;
1432            numsteps=1;
1433      }
1434      else if (LES || RES)
1435      {
1436            chunksize=1;
1437            if (LES)                // left is an expanded scalar
1438            {
1439                    if (RS)
1440                    {
1441                       leftstep=1;
1442                       rightstep=0;
1443                       numsteps=m_left->getNumDPPSample();
1444                    }
1445                    else            // RN or REN
1446                    {
1447                       leftstep=0;
1448                       oleftstep=1;
1449                       rightstep=1;
1450                       orightstep=(RN ? -(int)rightsize : 0);
1451                       numsteps=rightsize;
1452                       onumsteps=m_left->getNumDPPSample();
1453                    }
1454            }
1455            else            // right is an expanded scalar
1456            {
1457                    if (LS)
1458                    {
1459                       rightstep=1;
1460                       leftstep=0;
1461                       numsteps=m_right->getNumDPPSample();
1462                    }
1463                    else
1464                    {
1465                       rightstep=0;
1466                       orightstep=1;
1467                       leftstep=1;
1468                       oleftstep=(LN ? -(int)leftsize : 0);
1469                       numsteps=leftsize;
1470                       onumsteps=m_right->getNumDPPSample();
1471                    }
1472            }
1473      }
1474      else  // this leaves (LEN, RS), (LEN, RN) and their transposes
1475      {
1476            if (LEN)        // and Right will be a single value
1477            {
1478                    chunksize=rightsize;
1479                    leftstep=rightsize;
1480                    rightstep=0;
1481                    numsteps=m_left->getNumDPPSample();
1482                    if (RS)
1483                    {
1484                       numsteps*=leftsize;
1485                    }
1486            }
1487            else    // REN
1488            {
1489                    chunksize=leftsize;
1490                    rightstep=leftsize;
1491                    leftstep=0;
1492                    numsteps=m_right->getNumDPPSample();
1493                    if (LS)
1494                    {
1495                       numsteps*=rightsize;
1496                    }
1497            }
1498      }
1499    
1500      int resultStep=max(leftstep,rightstep);       // only one (at most) should be !=0
1501            // Get the values of sub-expressions
1502      const RealVectorType* left=m_left->resolveNodeSample(tid,sampleNo,lroffset);      
1503      const RealVectorType* right=m_right->resolveNodeSample(tid,sampleNo,rroffset);
1504    LAZYDEBUG(cout << "Post sub calls in " << toString() << endl;)
1505    LAZYDEBUG(cout << "shapes=" << DataTypes::shapeToString(m_left->getShape()) << "," << DataTypes::shapeToString(m_right->getShape()) << endl;)
1506    LAZYDEBUG(cout << "chunksize=" << chunksize << endl << "leftstep=" << leftstep << " rightstep=" << rightstep;)
1507    LAZYDEBUG(cout << " numsteps=" << numsteps << endl << "oleftstep=" << oleftstep << " orightstep=" << orightstep;)
1508    LAZYDEBUG(cout << "onumsteps=" << onumsteps << endl;)
1509    LAZYDEBUG(cout << " DPPS=" << m_left->getNumDPPSample() << "," <<m_right->getNumDPPSample() << endl;)
1510    LAZYDEBUG(cout << "" << LS << RS << LN << RN << LES << RES <<LEN << REN <<   endl;)
1511    
1512    LAZYDEBUG(cout << "Left res["<< lroffset<< "]=" << (*left)[lroffset] << endl;)
1513    LAZYDEBUG(cout << "Right res["<< rroffset<< "]=" << (*right)[rroffset] << endl;)
1514    
1515    
1516      roffset=m_samplesize*tid;
1517      double* resultp=&(m_samples[roffset]);                // results are stored at the vector offset we received
1518    switch(m_op)    switch(m_op)
1519    {    {
1520      case ADD:      case ADD:
1521          PROC_OP(NO_ARG,plus<double>());          //PROC_OP(NO_ARG,plus<double>());
1522      break;        escript::binaryOpVectorLazyHelper<real_t, real_t, real_t>(resultp,
1523                 &(*left)[0],
1524                 &(*right)[0],
1525                 chunksize,
1526                 onumsteps,
1527                 numsteps,
1528                 resultStep,
1529                 leftstep,
1530                 rightstep,
1531                 oleftstep,
1532                 orightstep,
1533                 lroffset,
1534                 rroffset,
1535                 escript::ESFunction::PLUSF);  
1536            break;
1537      case SUB:      case SUB:
1538      PROC_OP(NO_ARG,minus<double>());        escript::binaryOpVectorLazyHelper<real_t, real_t, real_t>(resultp,
1539      break;               &(*left)[0],
1540                 &(*right)[0],
1541                 chunksize,
1542                 onumsteps,
1543                 numsteps,
1544                 resultStep,
1545                 leftstep,
1546                 rightstep,
1547                 oleftstep,
1548                 orightstep,
1549                 lroffset,
1550                 rroffset,
1551                 escript::ESFunction::MINUSF);        
1552            //PROC_OP(NO_ARG,minus<double>());
1553            break;
1554      case MUL:      case MUL:
1555      PROC_OP(NO_ARG,multiplies<double>());          //PROC_OP(NO_ARG,multiplies<double>());
1556      break;        escript::binaryOpVectorLazyHelper<real_t, real_t, real_t>(resultp,
1557                 &(*left)[0],
1558                 &(*right)[0],
1559                 chunksize,
1560                 onumsteps,
1561                 numsteps,
1562                 resultStep,
1563                 leftstep,
1564                 rightstep,
1565                 oleftstep,
1566                 orightstep,
1567                 lroffset,
1568                 rroffset,
1569                 escript::ESFunction::MULTIPLIESF);      
1570            break;
1571      case DIV:      case DIV:
1572      PROC_OP(NO_ARG,divides<double>());          //PROC_OP(NO_ARG,divides<double>());
1573      break;        escript::binaryOpVectorLazyHelper<real_t, real_t, real_t>(resultp,
1574                 &(*left)[0],
1575                 &(*right)[0],
1576                 chunksize,
1577                 onumsteps,
1578                 numsteps,
1579                 resultStep,
1580                 leftstep,
1581                 rightstep,
1582                 oleftstep,
1583                 orightstep,
1584                 lroffset,
1585                 rroffset,
1586                 escript::ESFunction::DIVIDESF);          
1587            break;
1588      case POW:      case POW:
1589         PROC_OP(double (double,double),::pow);         //PROC_OP(double (double,double),::pow);
1590      break;        escript::binaryOpVectorLazyHelper<real_t, real_t, real_t>(resultp,
1591                 &(*left)[0],
1592                 &(*right)[0],
1593                 chunksize,
1594                 onumsteps,
1595                 numsteps,
1596                 resultStep,
1597                 leftstep,
1598                 rightstep,
1599                 oleftstep,
1600                 orightstep,
1601                 lroffset,
1602                 rroffset,
1603                 escript::ESFunction::POWF);          
1604            break;
1605      default:      default:
1606      throw DataException("Programmer error - resolveBinary can not resolve operator "+opToString(m_op)+".");          throw DataException("Programmer error - resolveBinary can not resolve operator "+opToString(m_op)+".");
1607    }    }
1608    roffset=offset;    LAZYDEBUG(cout << "Result res[" << roffset<< "]" << m_samples[roffset] << endl;)
1609    return &v;    return &m_samples;
1610  }  }
1611    
1612    
 /*  
   \brief Compute the value of the expression (tensor product) for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
1613  // This method assumes that any subexpressions which evaluate to Constant or Tagged Data  // This method assumes that any subexpressions which evaluate to Constant or Tagged Data
1614  // have already been collapsed to IDENTITY. So we must have at least one expanded child.  // have already been collapsed to IDENTITY. So we must have at least one expanded child.
1615  // unlike the other resolve helpers, we must treat these datapoints separately.  // unlike the other resolve helpers, we must treat these datapoints separately.
1616  DataTypes::ValueType*  const DataTypes::RealVectorType*
1617  DataLazy::resolveTProd(ValueType& v,  size_t offset, int sampleNo, size_t& roffset) const  DataLazy::resolveNodeTProd(int tid, int sampleNo, size_t& roffset) const
1618  {  {
1619  LAZYDEBUG(cout << "Resolve TensorProduct: " << toString() << endl;)  LAZYDEBUG(cout << "Resolve TensorProduct: " << toString() << endl;)
1620    
1621    size_t lroffset=0, rroffset=0;    // offsets in the left and right result vectors    size_t lroffset=0, rroffset=0;        // offsets in the left and right result vectors
1622      // first work out which of the children are expanded          // first work out which of the children are expanded
1623    bool leftExp=(m_left->m_readytype=='E');    bool leftExp=(m_left->m_readytype=='E');
1624    bool rightExp=(m_right->m_readytype=='E');    bool rightExp=(m_right->m_readytype=='E');
1625    int steps=getNumDPPSample();    int steps=getNumDPPSample();
1626    int leftStep=((leftExp && !rightExp)? m_right->getNoValues() : 0);    int leftStep=(leftExp? m_left->getNoValues() : 0);            // do not have scalars as input to this method
1627    int rightStep=((rightExp && !leftExp)? m_left->getNoValues() : 0);    int rightStep=(rightExp?m_right->getNoValues() : 0);
1628    int resultStep=max(leftStep,rightStep);   // only one (at most) should be !=0  
1629      // Get the values of sub-expressions (leave a gap of one sample for the result).    int resultStep=getNoValues();
1630    const ValueType* left=m_left->resolveSample(v,offset+m_samplesize,sampleNo,lroffset);    roffset=m_samplesize*tid;
1631    const ValueType* right=m_right->resolveSample(v,offset+2*m_samplesize,sampleNo,rroffset);    size_t offset=roffset;
1632    double* resultp=&(v[offset]);     // results are stored at the vector offset we recieved  
1633      const RealVectorType* left=m_left->resolveNodeSample(tid, sampleNo, lroffset);
1634    
1635      const RealVectorType* right=m_right->resolveNodeSample(tid, sampleNo, rroffset);
1636    
1637    LAZYDEBUG(cerr << "[Left shape]=" << DataTypes::shapeToString(m_left->getShape()) << "\n[Right shape]=" << DataTypes::shapeToString(m_right->getShape()) << " result=" <<DataTypes::shapeToString(getShape()) <<  endl;
1638    cout << getNoValues() << endl;)
1639    
1640    
1641    LAZYDEBUG(cerr << "Post sub calls: " << toString() << endl;)
1642    LAZYDEBUG(cout << "LeftExp=" << leftExp << " rightExp=" << rightExp << endl;)
1643    LAZYDEBUG(cout << "LeftR=" << m_left->getRank() << " rightExp=" << m_right->getRank() << endl;)
1644    LAZYDEBUG(cout << "LeftSize=" << m_left->getNoValues() << " RightSize=" << m_right->getNoValues() << endl;)
1645    LAZYDEBUG(cout << "m_samplesize=" << m_samplesize << endl;)
1646    LAZYDEBUG(cout << "outputshape=" << DataTypes::shapeToString(getShape()) << endl;)
1647    LAZYDEBUG(cout << "DPPS=" << m_right->getNumDPPSample() <<"."<<endl;)
1648    
1649      double* resultp=&(m_samples[offset]);         // results are stored at the vector offset we received
1650    switch(m_op)    switch(m_op)
1651    {    {
1652      case PROD:      case PROD:
1653      for (int i=0;i<steps;++i,resultp+=resultStep)          for (int i=0;i<steps;++i,resultp+=resultStep)
1654      {          {
1655            const double *ptr_0 = &((*left)[lroffset]);            const double *ptr_0 = &((*left)[lroffset]);
1656            const double *ptr_1 = &((*right)[rroffset]);            const double *ptr_1 = &((*right)[rroffset]);
1657            matrix_matrix_product(m_SL, m_SM, m_SR, ptr_0, ptr_1, resultp, m_transpose);  
1658        lroffset+=leftStep;  LAZYDEBUG(cout << DataTypes::pointToString(*left, m_left->getShape(),lroffset,"LEFT") << endl;)
1659        rroffset+=rightStep;  LAZYDEBUG(cout << DataTypes::pointToString(*right,m_right->getShape(),rroffset, "RIGHT") << endl;)
1660      }  
1661      break;            matrix_matrix_product(m_SL, m_SM, m_SR, ptr_0, ptr_1, resultp, m_transpose);
1662    
1663              lroffset+=leftStep;
1664              rroffset+=rightStep;
1665            }
1666            break;
1667      default:      default:
1668      throw DataException("Programmer error - resolveTProduct can not resolve operator "+opToString(m_op)+".");          throw DataException("Programmer error - resolveTProduct can not resolve operator "+opToString(m_op)+".");
1669    }    }
1670    roffset=offset;    roffset=offset;
1671    return &v;    return &m_samples;
1672  }  }
1673    
1674    
1675    const DataTypes::RealVectorType*
1676    DataLazy::resolveSample(int sampleNo, size_t& roffset) const
1677    {
1678    #ifdef _OPENMP
1679            int tid=omp_get_thread_num();
1680    #else
1681            int tid=0;
1682    #endif
1683    
1684  /*  #ifdef LAZY_STACK_PROF
1685    \brief Compute the value of the expression for the given sample.          stackstart[tid]=&tid;
1686    \return Vector which stores the value of the subexpression for the given sample.          stackend[tid]=&tid;
1687    \param v A vector to store intermediate results.          const DataTypes::RealVectorType* r=resolveNodeSample(tid, sampleNo, roffset);
1688    \param offset Index in v to begin storing results.          size_t d=(size_t)stackstart[tid]-(size_t)stackend[tid];
1689    \param sampleNo Sample number to evaluate.          #pragma omp critical
1690    \param roffset (output parameter) the offset in the return vector where the result begins.          if (d>maxstackuse)
1691            {
1692    cout << "Max resolve Stack use " << d << endl;
1693                    maxstackuse=d;
1694            }
1695            return r;
1696    #else
1697            return resolveNodeSample(tid, sampleNo, roffset);
1698    #endif
1699    }
1700    
1701    The return value will be an existing vector so do not deallocate it.  
1702  */  // This needs to do the work of the identity constructor
1703  // the vector and the offset are a place where the method could write its data if it wishes  void
1704  // it is not obligated to do so. For example, if it has its own storage already, it can use that.  DataLazy::resolveToIdentity()
 // Hence the return value to indicate where the data is actually stored.  
 // Regardless, the storage should be assumed to be used, even if it isn't.  
   
 // the roffset is the offset within the returned vector where the data begins  
 const DataTypes::ValueType*  
 DataLazy::resolveSample(ValueType& v, size_t offset, int sampleNo, size_t& roffset)  
1705  {  {
1706  LAZYDEBUG(cout << "Resolve sample " << toString() << endl;)     if (m_op==IDENTITY)
1707      // collapse so we have a 'E' node or an IDENTITY for some other type          return;
1708    if (m_readytype!='E' && m_op!=IDENTITY)     DataReady_ptr p=resolveNodeWorker();
1709    {     makeIdentity(p);
1710      collapse();  }
1711    }  
1712    if (m_op==IDENTITY)    void DataLazy::makeIdentity(const DataReady_ptr& p)
1713    {
1714       m_op=IDENTITY;
1715       m_axis_offset=0;
1716       m_transpose=0;
1717       m_SL=m_SM=m_SR=0;
1718       m_children=m_height=0;
1719       m_id=p;
1720       if(p->isConstant()) {m_readytype='C';}
1721       else if(p->isExpanded()) {m_readytype='E';}
1722       else if (p->isTagged()) {m_readytype='T';}
1723       else {throw DataException("Unknown DataReady instance in convertToIdentity constructor.");}
1724       m_samplesize=p->getNumDPPSample()*p->getNoValues();
1725       m_left.reset();
1726       m_right.reset();
1727    }
1728    
1729    
1730    DataReady_ptr
1731    DataLazy::resolve()
1732    {
1733        resolveToIdentity();
1734        return m_id;
1735    }
1736    
1737    
1738    /* This is really a static method but I think that caused problems in windows */
1739    void
1740    DataLazy::resolveGroupWorker(std::vector<DataLazy*>& dats)
1741    {
1742      if (dats.empty())
1743    {    {
1744      const ValueType& vec=m_id->getVector();          return;
     if (m_readytype=='C')  
     {  
     roffset=0;  
     return &(vec);  
     }  
     roffset=m_id->getPointOffset(sampleNo, 0);  
     return &(vec);  
1745    }    }
1746    if (m_readytype!='E')    vector<DataLazy*> work;
1747    {    FunctionSpace fs=dats[0]->getFunctionSpace();
1748      throw DataException("Programmer Error - Collapse did not produce an expanded node.");    bool match=true;
1749      for (int i=dats.size()-1;i>=0;--i)
1750      {
1751            if (dats[i]->m_readytype!='E')
1752            {
1753                    dats[i]->collapse();
1754            }
1755            if (dats[i]->m_op!=IDENTITY)
1756            {
1757                    work.push_back(dats[i]);
1758                    if (fs!=dats[i]->getFunctionSpace())
1759                    {
1760                            match=false;
1761                    }
1762            }
1763      }
1764      if (work.empty())
1765      {
1766            return;         // no work to do
1767      }
1768      if (match)    // all functionspaces match.  Yes I realise this is overly strict
1769      {             // it is possible that dats[0] is one of the objects which we discarded and
1770                    // all the other functionspaces match.
1771            vector<DataExpanded*> dep;
1772            vector<RealVectorType*> vecs;
1773            for (int i=0;i<work.size();++i)
1774            {
1775                    dep.push_back(new DataExpanded(fs,work[i]->getShape(), RealVectorType(work[i]->getNoValues())));
1776                    vecs.push_back(&(dep[i]->getVectorRW()));
1777            }
1778            int totalsamples=work[0]->getNumSamples();
1779            const RealVectorType* res=0; // Storage for answer
1780            int sample;
1781            #pragma omp parallel private(sample, res)
1782            {
1783                size_t roffset=0;
1784                #pragma omp for schedule(static)
1785                for (sample=0;sample<totalsamples;++sample)
1786                {
1787                    roffset=0;
1788                    int j;
1789                    for (j=work.size()-1;j>=0;--j)
1790                    {
1791    #ifdef _OPENMP
1792                        res=work[j]->resolveNodeSample(omp_get_thread_num(),sample,roffset);
1793    #else
1794                        res=work[j]->resolveNodeSample(0,sample,roffset);
1795    #endif
1796                        RealVectorType::size_type outoffset=dep[j]->getPointOffset(sample,0);
1797                        memcpy(&((*vecs[j])[outoffset]),&((*res)[roffset]),work[j]->m_samplesize*sizeof(RealVectorType::ElementType));
1798                    }
1799                }
1800            }
1801            // Now we need to load the new results as identity ops into the lazy nodes
1802            for (int i=work.size()-1;i>=0;--i)
1803            {
1804                work[i]->makeIdentity(REFCOUNTNS::dynamic_pointer_cast<DataReady>(dep[i]->getPtr()));
1805            }
1806    }    }
1807    switch (getOpgroup(m_op))    else  // functionspaces do not match
1808    {    {
1809    case G_UNARY:          for (int i=0;i<work.size();++i)
1810    case G_UNARY_P: return resolveUnary(v, offset,sampleNo,roffset);          {
1811    case G_BINARY: return resolveBinary(v, offset,sampleNo,roffset);                  work[i]->resolveToIdentity();
1812    case G_NP1OUT: return resolveNP1OUT(v, offset, sampleNo,roffset);          }
   case G_NP1OUT_P: return resolveNP1OUT_P(v, offset, sampleNo,roffset);  
   case G_TENSORPROD: return resolveTProd(v,offset, sampleNo,roffset);  
   default:  
     throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");  
1813    }    }
1814  }  }
1815    
1816    
1817  // To simplify the memory management, all threads operate on one large vector, rather than one each.  
1818  // Each sample is evaluated independently and copied into the result DataExpanded.  // This version of resolve uses storage in each node to hold results
1819  DataReady_ptr  DataReady_ptr
1820  DataLazy::resolve()  DataLazy::resolveNodeWorker()
1821  {  {
1822      if (m_readytype!='E')         // if the whole sub-expression is Constant or Tagged, then evaluate it normally
 LAZYDEBUG(cout << "Sample size=" << m_samplesize << endl;)  
 LAZYDEBUG(cout << "Buffers=" << m_buffsRequired << endl;)  
   
   if (m_readytype!='E')     // if the whole sub-expression is Constant or Tagged, then evaluate it normally  
1823    {    {
1824      collapse();      collapse();
1825    }    }
1826    if (m_op==IDENTITY)       // So a lazy expression of Constant or Tagged data will be returned here.    if (m_op==IDENTITY)           // So a lazy expression of Constant or Tagged data will be returned here.
1827    {    {
1828      return m_id;      return m_id;
1829    }    }
1830      // from this point on we must have m_op!=IDENTITY and m_readytype=='E'          // from this point on we must have m_op!=IDENTITY and m_readytype=='E'
1831    size_t threadbuffersize=m_maxsamplesize*(max(1,m_buffsRequired)); // Each thread needs to have enough    DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(),  RealVectorType(getNoValues()));
1832      // storage to evaluate its expression    RealVectorType& resvec=result->getVectorRW();
   int numthreads=1;  
 #ifdef _OPENMP  
   numthreads=getNumberOfThreads();  
 #endif  
   ValueType v(numthreads*threadbuffersize);  
 LAZYDEBUG(cout << "Buffer created with size=" << v.size() << endl;)  
   DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(),  ValueType(getNoValues()));  
   ValueType& resvec=result->getVector();  
1833    DataReady_ptr resptr=DataReady_ptr(result);    DataReady_ptr resptr=DataReady_ptr(result);
1834    
1835    int sample;    int sample;
   size_t outoffset;     // offset in the output data  
1836    int totalsamples=getNumSamples();    int totalsamples=getNumSamples();
1837    const ValueType* res=0;   // Vector storing the answer    const RealVectorType* res=0;       // Storage for answer
1838    size_t resoffset=0;       // where in the vector to find the answer  LAZYDEBUG(cout << "Total number of samples=" <<totalsamples << endl;)
1839    #pragma omp parallel for private(sample,resoffset,outoffset,res) schedule(static)    #pragma omp parallel private(sample,res)
1840    for (sample=0;sample<totalsamples;++sample)    {
1841    {          size_t roffset=0;
1842  LAZYDEBUG(cout << "################################# " << sample << endl;)  #ifdef LAZY_STACK_PROF
1843            stackstart[omp_get_thread_num()]=&roffset;
1844            stackend[omp_get_thread_num()]=&roffset;
1845    #endif
1846            #pragma omp for schedule(static)
1847            for (sample=0;sample<totalsamples;++sample)
1848            {
1849                    roffset=0;
1850  #ifdef _OPENMP  #ifdef _OPENMP
1851      res=resolveSample(v,threadbuffersize*omp_get_thread_num(),sample,resoffset);                  res=resolveNodeSample(omp_get_thread_num(),sample,roffset);
1852  #else  #else
1853      res=resolveSample(v,0,sample,resoffset);   // res would normally be v, but not if its a single IDENTITY op.                  res=resolveNodeSample(0,sample,roffset);
1854  #endif  #endif
1855  LAZYDEBUG(cerr << "-------------------------------- " << endl;)  LAZYDEBUG(cout << "Sample #" << sample << endl;)
1856      outoffset=result->getPointOffset(sample,0);  LAZYDEBUG(cout << "Final res[" << roffset<< "]=" << (*res)[roffset] << (*res)[roffset]<< endl; )
1857  LAZYDEBUG(cerr << "offset=" << outoffset << endl;)                  RealVectorType::size_type outoffset=result->getPointOffset(sample,0);
1858      for (unsigned int i=0;i<m_samplesize;++i,++outoffset,++resoffset)   // copy values into the output vector                  memcpy(&(resvec[outoffset]),&((*res)[roffset]),m_samplesize*sizeof(RealVectorType::ElementType));
1859      {          }
1860      resvec[outoffset]=(*res)[resoffset];    }
1861      }  #ifdef LAZY_STACK_PROF
1862  LAZYDEBUG(cerr << "*********************************" << endl;)    for (int i=0;i<getNumberOfThreads();++i)
1863      {
1864            size_t r=((size_t)stackstart[i] - (size_t)stackend[i]);
1865    //      cout << i << " " << stackstart[i] << " .. " << stackend[i] << " = " <<  r << endl;
1866            if (r>maxstackuse)
1867            {
1868                    maxstackuse=r;
1869            }
1870    }    }
1871      cout << "Max resolve Stack use=" << maxstackuse << endl;
1872    #endif
1873    return resptr;    return resptr;
1874  }  }
1875    
# Line 1224  std::string Line 1877  std::string
1877  DataLazy::toString() const  DataLazy::toString() const
1878  {  {
1879    ostringstream oss;    ostringstream oss;
1880    oss << "Lazy Data:";    oss << "Lazy Data: [depth=" << m_height<< "] ";
1881    intoString(oss);    switch (escriptParams.getLAZY_STR_FMT())
1882      {
1883      case 1:       // tree format
1884            oss << endl;
1885            intoTreeString(oss,"");
1886            break;
1887      case 2:       // just the depth
1888            break;
1889      default:
1890            intoString(oss);
1891            break;
1892      }
1893    return oss.str();    return oss.str();
1894  }  }
1895    
# Line 1233  DataLazy::toString() const Line 1897  DataLazy::toString() const
1897  void  void
1898  DataLazy::intoString(ostringstream& oss) const  DataLazy::intoString(ostringstream& oss) const
1899  {  {
1900    //    oss << "[" << m_children <<";"<<m_height <<"]";
1901    switch (getOpgroup(m_op))    switch (getOpgroup(m_op))
1902    {    {
1903    case G_IDENTITY:    case G_IDENTITY:
1904      if (m_id->isExpanded())          if (m_id->isExpanded())
1905      {          {
1906         oss << "E";             oss << "E";
1907      }          }
1908      else if (m_id->isTagged())          else if (m_id->isTagged())
1909      {          {
1910        oss << "T";            oss << "T";
1911      }          }
1912      else if (m_id->isConstant())          else if (m_id->isConstant())
1913      {          {
1914        oss << "C";            oss << "C";
1915      }          }
1916      else          else
1917      {          {
1918        oss << "?";            oss << "?";
1919      }          }
1920      oss << '@' << m_id.get();          oss << '@' << m_id.get();
1921      break;          break;
1922    case G_BINARY:    case G_BINARY:
1923      oss << '(';          oss << '(';
1924      m_left->intoString(oss);          m_left->intoString(oss);
1925      oss << ' ' << opToString(m_op) << ' ';          oss << ' ' << opToString(m_op) << ' ';
1926      m_right->intoString(oss);          m_right->intoString(oss);
1927      oss << ')';          oss << ')';
1928      break;          break;
1929    case G_UNARY:    case G_UNARY:
1930    case G_UNARY_P:    case G_UNARY_P:
1931    case G_NP1OUT:    case G_NP1OUT:
1932    case G_NP1OUT_P:    case G_NP1OUT_P:
1933      oss << opToString(m_op) << '(';    case G_REDUCTION:
1934      m_left->intoString(oss);          oss << opToString(m_op) << '(';
1935      oss << ')';          m_left->intoString(oss);
1936      break;          oss << ')';
1937            break;
1938    case G_TENSORPROD:    case G_TENSORPROD:
1939      oss << opToString(m_op) << '(';          oss << opToString(m_op) << '(';
1940      m_left->intoString(oss);          m_left->intoString(oss);
1941      oss << ", ";          oss << ", ";
1942      m_right->intoString(oss);          m_right->intoString(oss);
1943      oss << ')';          oss << ')';
1944      break;          break;
1945      case G_NP1OUT_2P:
1946            oss << opToString(m_op) << '(';
1947            m_left->intoString(oss);
1948            oss << ", " << m_axis_offset << ", " << m_transpose;
1949            oss << ')';
1950            break;
1951      case G_CONDEVAL:
1952            oss << opToString(m_op)<< '(' ;
1953            m_mask->intoString(oss);
1954            oss << " ? ";
1955            m_left->intoString(oss);
1956            oss << " : ";
1957            m_right->intoString(oss);
1958            oss << ')';
1959            break;
1960    default:    default:
1961      oss << "UNKNOWN";          oss << "UNKNOWN";
1962    }    }
1963  }  }
1964    
1965    
1966    void
1967    DataLazy::intoTreeString(ostringstream& oss, string indent) const
1968    {
1969      oss << '[' << m_rank << ':' << setw(3) << m_samplesize << "] " << indent;
1970      switch (getOpgroup(m_op))
1971      {
1972      case G_IDENTITY:
1973            if (m_id->isExpanded())
1974            {
1975               oss << "E";
1976            }
1977            else if (m_id->isTagged())
1978            {
1979              oss << "T";
1980            }
1981            else if (m_id->isConstant())
1982            {
1983              oss << "C";
1984            }
1985            else
1986            {
1987              oss << "?";
1988            }
1989            oss << '@' << m_id.get() << endl;
1990            break;
1991      case G_BINARY:
1992            oss << opToString(m_op) << endl;
1993            indent+='.';
1994            m_left->intoTreeString(oss, indent);
1995            m_right->intoTreeString(oss, indent);
1996            break;
1997      case G_UNARY:
1998      case G_UNARY_P:
1999      case G_NP1OUT:
2000      case G_NP1OUT_P:
2001      case G_REDUCTION:
2002            oss << opToString(m_op) << endl;
2003            indent+='.';
2004            m_left->intoTreeString(oss, indent);
2005            break;
2006      case G_TENSORPROD:
2007            oss << opToString(m_op) << endl;
2008            indent+='.';
2009            m_left->intoTreeString(oss, indent);
2010            m_right->intoTreeString(oss, indent);
2011            break;
2012      case G_NP1OUT_2P:
2013            oss << opToString(m_op) << ", " << m_axis_offset << ", " << m_transpose<< endl;
2014            indent+='.';
2015            m_left->intoTreeString(oss, indent);
2016            break;
2017      default:
2018            oss << "UNKNOWN";
2019      }
2020    }
2021    
2022    
2023  DataAbstract*  DataAbstract*
2024  DataLazy::deepCopy()  DataLazy::deepCopy() const
2025  {  {
2026    switch (getOpgroup(m_op))    switch (getOpgroup(m_op))
2027    {    {
2028    case G_IDENTITY:  return new DataLazy(m_id->deepCopy()->getPtr());    case G_IDENTITY:  return new DataLazy(m_id->deepCopy()->getPtr());
2029    case G_UNARY: return new DataLazy(m_left->deepCopy()->getPtr(),m_op);    case G_UNARY:
2030    case G_BINARY:    return new DataLazy(m_left->deepCopy()->getPtr(),m_right->deepCopy()->getPtr(),m_op);    case G_REDUCTION:      return new DataLazy(m_left->deepCopy()->getPtr(),m_op);
2031      case G_UNARY_P:       return new DataLazy(m_left->deepCopy()->getPtr(), m_op, m_tol);
2032      case G_BINARY:        return new DataLazy(m_left->deepCopy()->getPtr(),m_right->deepCopy()->getPtr(),m_op);
2033    case G_NP1OUT: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(),m_op);    case G_NP1OUT: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(),m_op);
2034    case G_TENSORPROD: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(), m_op, m_axis_offset, m_transpose);    case G_TENSORPROD: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(), m_op, m_axis_offset, m_transpose);
2035      case G_NP1OUT_P:   return new DataLazy(m_left->deepCopy()->getPtr(),m_op,  m_axis_offset);
2036      case G_NP1OUT_2P:  return new DataLazy(m_left->deepCopy()->getPtr(), m_op, m_axis_offset, m_transpose);
2037    default:    default:
2038      throw DataException("Programmer error - do not know how to deepcopy operator "+opToString(m_op)+".");          throw DataException("Programmer error - do not know how to deepcopy operator "+opToString(m_op)+".");
2039    }    }
2040  }  }
2041    
2042    
2043    
2044  // There is no single, natural interpretation of getLength on DataLazy.  // There is no single, natural interpretation of getLength on DataLazy.
2045  // Instances of DataReady can look at the size of their vectors.  // Instances of DataReady can look at the size of their vectors.
2046  // For lazy though, it could be the size the data would be if it were resolved;  // For lazy though, it could be the size the data would be if it were resolved;
2047  // or it could be some function of the lengths of the DataReady instances which  // or it could be some function of the lengths of the DataReady instances which
2048  // form part of the expression.  // form part of the expression.
2049  // Rather than have people making assumptions, I have disabled the method.  // Rather than have people making assumptions, I have disabled the method.
2050  DataTypes::ValueType::size_type  DataTypes::RealVectorType::size_type
2051  DataLazy::getLength() const  DataLazy::getLength() const
2052  {  {
2053    throw DataException("getLength() does not make sense for lazy data.");    throw DataException("getLength() does not make sense for lazy data.");
# Line 1318  DataLazy::getSlice(const DataTypes::Regi Line 2062  DataLazy::getSlice(const DataTypes::Regi
2062    
2063    
2064  // To do this we need to rely on our child nodes  // To do this we need to rely on our child nodes
2065  DataTypes::ValueType::size_type  DataTypes::RealVectorType::size_type
2066  DataLazy::getPointOffset(int sampleNo,  DataLazy::getPointOffset(int sampleNo,
2067                   int dataPointNo)                   int dataPointNo)
2068  {  {
2069    if (m_op==IDENTITY)    if (m_op==IDENTITY)
2070    {    {
2071      return m_id->getPointOffset(sampleNo,dataPointNo);          return m_id->getPointOffset(sampleNo,dataPointNo);
2072    }    }
2073    if (m_readytype!='E')    if (m_readytype!='E')
2074    {    {
2075      collapse();          collapse();
2076      return m_id->getPointOffset(sampleNo,dataPointNo);          return m_id->getPointOffset(sampleNo,dataPointNo);
2077    }    }
2078    // at this point we do not have an identity node and the expression will be Expanded    // at this point we do not have an identity node and the expression will be Expanded
2079    // so we only need to know which child to ask    // so we only need to know which child to ask
2080    if (m_left->m_readytype=='E')    if (m_left->m_readytype=='E')
2081    {    {
2082      return m_left->getPointOffset(sampleNo,dataPointNo);          return m_left->getPointOffset(sampleNo,dataPointNo);
2083    }    }
2084    else    else
2085    {    {
2086      return m_right->getPointOffset(sampleNo,dataPointNo);          return m_right->getPointOffset(sampleNo,dataPointNo);
2087    }    }
2088  }  }
2089    
2090  // To do this we need to rely on our child nodes  // To do this we need to rely on our child nodes
2091  DataTypes::ValueType::size_type  DataTypes::RealVectorType::size_type
2092  DataLazy::getPointOffset(int sampleNo,  DataLazy::getPointOffset(int sampleNo,
2093                   int dataPointNo) const                   int dataPointNo) const
2094  {  {
2095    if (m_op==IDENTITY)    if (m_op==IDENTITY)
2096    {    {
2097      return m_id->getPointOffset(sampleNo,dataPointNo);          return m_id->getPointOffset(sampleNo,dataPointNo);
2098    }    }
2099    if (m_readytype=='E')    if (m_readytype=='E')
2100    {    {
# Line 1358  DataLazy::getPointOffset(int sampleNo, Line 2102  DataLazy::getPointOffset(int sampleNo,
2102      // so we only need to know which child to ask      // so we only need to know which child to ask
2103      if (m_left->m_readytype=='E')      if (m_left->m_readytype=='E')
2104      {      {
2105      return m_left->getPointOffset(sampleNo,dataPointNo);          return m_left->getPointOffset(sampleNo,dataPointNo);
2106      }      }
2107      else      else
2108      {      {
2109      return m_right->getPointOffset(sampleNo,dataPointNo);          return m_right->getPointOffset(sampleNo,dataPointNo);
2110      }      }
2111    }    }
2112    if (m_readytype=='C')    if (m_readytype=='C')
2113    {    {
2114      return m_left->getPointOffset(sampleNo,dataPointNo); // which child doesn't matter          return m_left->getPointOffset(sampleNo,dataPointNo); // which child doesn't matter
2115    }    }
2116    throw DataException("Programmer error - getPointOffset on lazy data may require collapsing (but this object is marked const).");    throw DataException("Programmer error - getPointOffset on lazy data may require collapsing (but this object is marked const).");
2117  }  }
2118    
2119  // It would seem that DataTagged will need to be treated differently since even after setting all tags  
2120  // to zero, all the tags from all the DataTags would be in the result.  // I have decided to let Data:: handle this issue.
 // However since they all have the same value (0) whether they are there or not should not matter.  
 // So I have decided that for all types this method will create a constant 0.  
 // It can be promoted up as required.  
 // A possible efficiency concern might be expanded->constant->expanded which has an extra memory management  
 // but we can deal with that if it arrises.  
2121  void  void
2122  DataLazy::setToZero()  DataLazy::setToZero()
2123  {  {
2124    DataTypes::ValueType v(getNoValues(),0);  //   DataTypes::RealVectorType v(getNoValues(),0);
2125    m_id=DataReady_ptr(new DataConstant(getFunctionSpace(),getShape(),v));  //   m_id=DataReady_ptr(new DataConstant(getFunctionSpace(),getShape(),v));
2126    m_op=IDENTITY;  //   m_op=IDENTITY;
2127    m_right.reset();    //   m_right.reset();  
2128    m_left.reset();  //   m_left.reset();
2129    m_readytype='C';  //   m_readytype='C';
2130    m_buffsRequired=1;  //   m_buffsRequired=1;
2131    
2132      (void)privdebug;  // to stop the compiler complaining about unused privdebug
2133      throw DataException("Programmer error - setToZero not supported for DataLazy (DataLazy objects should be read only).");
2134  }  }
2135    
2136  }   // end namespace  bool
2137    DataLazy::actsExpanded() const
2138    {
2139            return (m_readytype=='E');
2140    }
2141    
2142    } // end namespace
2143    

Legend:
Removed from v.2147  
changed lines
  Added in v.6114

  ViewVC Help
Powered by ViewVC 1.1.26