/[escript]/branches/clazy/escriptcore/src/DataLazy.cpp
ViewVC logotype

Diff of /branches/clazy/escriptcore/src/DataLazy.cpp

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 2147 by jfenwick, Wed Dec 10 04:41:26 2008 UTC revision 3911 by jfenwick, Thu Jun 14 01:01:03 2012 UTC
# Line 1  Line 1 
1    
2  /*******************************************************  /*******************************************************
3  *  *
4  * Copyright (c) 2003-2008 by University of Queensland  * Copyright (c) 2003-2012 by University of Queensland
5  * Earth Systems Science Computational Center (ESSCC)  * Earth Systems Science Computational Center (ESSCC)
6  * http://www.uq.edu.au/esscc  * http://www.uq.edu.au/esscc
7  *  *
# Line 13  Line 13 
13    
14    
15  #include "DataLazy.h"  #include "DataLazy.h"
16  #ifdef USE_NETCDF  #include "esysUtils/Esys_MPI.h"
 #include <netcdfcpp.h>  
 #endif  
 #ifdef PASO_MPI  
 #include <mpi.h>  
 #endif  
17  #ifdef _OPENMP  #ifdef _OPENMP
18  #include <omp.h>  #include <omp.h>
19  #endif  #endif
# Line 28  Line 23 
23  #include "UnaryFuncs.h"     // for escript::fsign  #include "UnaryFuncs.h"     // for escript::fsign
24  #include "Utils.h"  #include "Utils.h"
25    
26  // #define LAZYDEBUG(X) X;  #include "EscriptParams.h"
27    
28    #ifdef USE_NETCDF
29    #include <netcdfcpp.h>
30    #endif
31    
32    #include <iomanip>      // for some fancy formatting in debug
33    
34    // #define LAZYDEBUG(X) if (privdebug){X;}
35  #define LAZYDEBUG(X)  #define LAZYDEBUG(X)
36    namespace
37    {
38    bool privdebug=false;
39    
40    #define ENABLEDEBUG privdebug=true;
41    #define DISABLEDEBUG privdebug=false;
42    }
43    
44    // #define SIZELIMIT if ((m_height>escript::escriptParams.getTOO_MANY_LEVELS()) || (m_children>escript::escriptParams.getTOO_MANY_NODES())) {cerr << "\n!!!!!!! SIZE LIMIT EXCEEDED " << m_children << ";" << m_height << endl << toString() << endl;resolveToIdentity();}
45    
46    // #define SIZELIMIT if ((m_height>escript::escriptParams.getTOO_MANY_LEVELS()) || (m_children>escript::escriptParams.getTOO_MANY_NODES())) {cerr << "SIZE LIMIT EXCEEDED " << m_height << endl;resolveToIdentity();}
47    
48    
49    #define SIZELIMIT if (m_height>escript::escriptParams.getTOO_MANY_LEVELS())  {if (escript::escriptParams.getLAZY_VERBOSE()){cerr << "SIZE LIMIT EXCEEDED height=" << m_height << endl;}resolveToIdentity();}
50    
51  /*  /*
52  How does DataLazy work?  How does DataLazy work?
# Line 93  namespace escript Line 110  namespace escript
110  namespace  namespace
111  {  {
112    
113    
114    // enabling this will print out when ever the maximum stacksize used by resolve increases
115    // it assumes _OPENMP is also in use
116    //#define LAZY_STACK_PROF
117    
118    
119    
120    #ifndef _OPENMP
121      #ifdef LAZY_STACK_PROF
122      #undef LAZY_STACK_PROF
123      #endif
124    #endif
125    
126    
127    #ifdef LAZY_STACK_PROF
128    std::vector<void*> stackstart(getNumberOfThreads());
129    std::vector<void*> stackend(getNumberOfThreads());
130    size_t maxstackuse=0;
131    #endif
132    
133  enum ES_opgroup  enum ES_opgroup
134  {  {
135     G_UNKNOWN,     G_UNKNOWN,
# Line 102  enum ES_opgroup Line 139  enum ES_opgroup
139     G_UNARY_P,       // pointwise operations with one argument, requiring a parameter     G_UNARY_P,       // pointwise operations with one argument, requiring a parameter
140     G_NP1OUT,        // non-pointwise op with one output     G_NP1OUT,        // non-pointwise op with one output
141     G_NP1OUT_P,      // non-pointwise op with one output requiring a parameter     G_NP1OUT_P,      // non-pointwise op with one output requiring a parameter
142     G_TENSORPROD     // general tensor product     G_TENSORPROD,    // general tensor product
143       G_NP1OUT_2P,     // non-pointwise op with one output requiring two params
144       G_REDUCTION,     // non-pointwise unary op with a scalar output
145       G_CONDEVAL
146  };  };
147    
148    
# Line 116  string ES_opstrings[]={"UNKNOWN","IDENTI Line 156  string ES_opstrings[]={"UNKNOWN","IDENTI
156              "1/","where>0","where<0","where>=0","where<=0", "where<>0","where=0",              "1/","where>0","where<0","where>=0","where<=0", "where<>0","where=0",
157              "symmetric","nonsymmetric",              "symmetric","nonsymmetric",
158              "prod",              "prod",
159              "transpose", "trace"};              "transpose", "trace",
160  int ES_opcount=40;              "swapaxes",
161                "minval", "maxval",
162                "condEval"};
163    int ES_opcount=44;
164  ES_opgroup opgroups[]={G_UNKNOWN,G_IDENTITY,G_BINARY,G_BINARY,G_BINARY,G_BINARY, G_BINARY,  ES_opgroup opgroups[]={G_UNKNOWN,G_IDENTITY,G_BINARY,G_BINARY,G_BINARY,G_BINARY, G_BINARY,
165              G_UNARY,G_UNARY,G_UNARY, //10              G_UNARY,G_UNARY,G_UNARY, //10
166              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,    // 17              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,    // 17
# Line 126  ES_opgroup opgroups[]={G_UNKNOWN,G_IDENT Line 169  ES_opgroup opgroups[]={G_UNKNOWN,G_IDENT
169              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY, G_UNARY_P, G_UNARY_P,      // 35              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY, G_UNARY_P, G_UNARY_P,      // 35
170              G_NP1OUT,G_NP1OUT,              G_NP1OUT,G_NP1OUT,
171              G_TENSORPROD,              G_TENSORPROD,
172              G_NP1OUT_P, G_NP1OUT_P};              G_NP1OUT_P, G_NP1OUT_P,
173                G_NP1OUT_2P,
174                G_REDUCTION, G_REDUCTION,
175                G_CONDEVAL};
176  inline  inline
177  ES_opgroup  ES_opgroup
178  getOpgroup(ES_optype op)  getOpgroup(ES_optype op)
# Line 171  resultShape(DataAbstract_ptr left, DataA Line 217  resultShape(DataAbstract_ptr left, DataA
217        {        {
218          throw DataException("Shapes not the name - shapes must match for (point)binary operations.");          throw DataException("Shapes not the name - shapes must match for (point)binary operations.");
219        }        }
220    
221        if (left->getRank()==0)   // we need to allow scalar * anything        if (left->getRank()==0)   // we need to allow scalar * anything
222        {        {
223          return right->getShape();          return right->getShape();
# Line 187  resultShape(DataAbstract_ptr left, DataA Line 234  resultShape(DataAbstract_ptr left, DataA
234  // return the shape for "op left"  // return the shape for "op left"
235    
236  DataTypes::ShapeType  DataTypes::ShapeType
237  resultShape(DataAbstract_ptr left, ES_optype op)  resultShape(DataAbstract_ptr left, ES_optype op, int axis_offset)
238  {  {
239      switch(op)      switch(op)
240      {      {
241          case TRANS:          case TRANS:
242          return left->getShape();         {            // for the scoping of variables
243            const DataTypes::ShapeType& s=left->getShape();
244            DataTypes::ShapeType sh;
245            int rank=left->getRank();
246            if (axis_offset<0 || axis_offset>rank)
247            {
248                stringstream e;
249                e << "Error - Data::transpose must have 0 <= axis_offset <= rank=" << rank;
250                throw DataException(e.str());
251            }
252            for (int i=0; i<rank; i++)
253            {
254               int index = (axis_offset+i)%rank;
255               sh.push_back(s[index]); // Append to new shape
256            }
257            return sh;
258           }
259      break;      break;
260      case TRACE:      case TRACE:
261          return DataTypes::scalarShape;         {
262            int rank=left->getRank();
263            if (rank<2)
264            {
265               throw DataException("Trace can only be computed for objects with rank 2 or greater.");
266            }
267            if ((axis_offset>rank-2) || (axis_offset<0))
268            {
269               throw DataException("Trace: axis offset must lie between 0 and rank-2 inclusive.");
270            }
271            if (rank==2)
272            {
273               return DataTypes::scalarShape;
274            }
275            else if (rank==3)
276            {
277               DataTypes::ShapeType sh;
278                   if (axis_offset==0)
279               {
280                    sh.push_back(left->getShape()[2]);
281                   }
282                   else     // offset==1
283               {
284                sh.push_back(left->getShape()[0]);
285                   }
286               return sh;
287            }
288            else if (rank==4)
289            {
290               DataTypes::ShapeType sh;
291               const DataTypes::ShapeType& s=left->getShape();
292                   if (axis_offset==0)
293               {
294                    sh.push_back(s[2]);
295                    sh.push_back(s[3]);
296                   }
297                   else if (axis_offset==1)
298               {
299                    sh.push_back(s[0]);
300                    sh.push_back(s[3]);
301                   }
302               else     // offset==2
303               {
304                sh.push_back(s[0]);
305                sh.push_back(s[1]);
306               }
307               return sh;
308            }
309            else        // unknown rank
310            {
311               throw DataException("Error - Data::trace can only be calculated for rank 2, 3 or 4 object.");
312            }
313           }
314      break;      break;
315          default:          default:
316      throw DataException("Programmer error - resultShape(left,op) can't compute shapes for operator "+opToString(op)+".");      throw DataException("Programmer error - resultShape(left,op) can't compute shapes for operator "+opToString(op)+".");
317      }      }
318  }  }
319    
320    DataTypes::ShapeType
321    SwapShape(DataAbstract_ptr left, const int axis0, const int axis1)
322    {
323         // This code taken from the Data.cpp swapaxes() method
324         // Some of the checks are probably redundant here
325         int axis0_tmp,axis1_tmp;
326         const DataTypes::ShapeType& s=left->getShape();
327         DataTypes::ShapeType out_shape;
328         // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
329         // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
330         int rank=left->getRank();
331         if (rank<2) {
332            throw DataException("Error - Data::swapaxes argument must have at least rank 2.");
333         }
334         if (axis0<0 || axis0>rank-1) {
335            stringstream e;
336            e << "Error - Data::swapaxes: axis0 must be between 0 and rank-1=" << (rank-1);
337            throw DataException(e.str());
338         }
339         if (axis1<0 || axis1>rank-1) {
340            stringstream e;
341            e << "Error - Data::swapaxes: axis1 must be between 0 and rank-1=" << (rank-1);
342            throw DataException(e.str());
343         }
344         if (axis0 == axis1) {
345             throw DataException("Error - Data::swapaxes: axis indices must be different.");
346         }
347         if (axis0 > axis1) {
348             axis0_tmp=axis1;
349             axis1_tmp=axis0;
350         } else {
351             axis0_tmp=axis0;
352             axis1_tmp=axis1;
353         }
354         for (int i=0; i<rank; i++) {
355           if (i == axis0_tmp) {
356              out_shape.push_back(s[axis1_tmp]);
357           } else if (i == axis1_tmp) {
358              out_shape.push_back(s[axis0_tmp]);
359           } else {
360              out_shape.push_back(s[i]);
361           }
362         }
363        return out_shape;
364    }
365    
366    
367  // determine the output shape for the general tensor product operation  // determine the output shape for the general tensor product operation
368  // the additional parameters return information required later for the product  // the additional parameters return information required later for the product
369  // the majority of this code is copy pasted from C_General_Tensor_Product  // the majority of this code is copy pasted from C_General_Tensor_Product
# Line 266  GTPShape(DataAbstract_ptr left, DataAbst Line 428  GTPShape(DataAbstract_ptr left, DataAbst
428    return shape2;    return shape2;
429  }  }
430    
   
 // determine the number of points in the result of "left op right"  
 // note that determining the resultLength for G_TENSORPROD is more complex and will not be processed here  
 // size_t  
 // resultLength(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)  
 // {  
 //    switch (getOpgroup(op))  
 //    {  
 //    case G_BINARY: return left->getLength();  
 //    case G_UNARY: return left->getLength();  
 //    case G_NP1OUT: return left->getLength();  
 //    default:  
 //  throw DataException("Programmer Error - attempt to getLength() for operator "+opToString(op)+".");  
 //    }  
 // }  
   
 // determine the number of samples requires to evaluate an expression combining left and right  
 // NP1OUT needs an extra buffer because we can't write the answers over the top of the input.  
 // The same goes for G_TENSORPROD  
 int  
 calcBuffs(const DataLazy_ptr& left, const DataLazy_ptr& right, ES_optype op)  
 {  
    switch(getOpgroup(op))  
    {  
    case G_IDENTITY: return 1;  
    case G_BINARY: return max(left->getBuffsRequired(),right->getBuffsRequired()+1);  
    case G_UNARY:  
    case G_UNARY_P: return max(left->getBuffsRequired(),1);  
    case G_NP1OUT: return 1+max(left->getBuffsRequired(),1);  
    case G_NP1OUT_P: return 1+max(left->getBuffsRequired(),1);  
    case G_TENSORPROD: return 1+max(left->getBuffsRequired(),right->getBuffsRequired()+1);  
    default:  
     throw DataException("Programmer Error - attempt to calcBuffs() for operator "+opToString(op)+".");  
    }  
 }  
   
   
431  }   // end anonymous namespace  }   // end anonymous namespace
432    
433    
# Line 318  opToString(ES_optype op) Line 443  opToString(ES_optype op)
443    return ES_opstrings[op];    return ES_opstrings[op];
444  }  }
445    
446    void DataLazy::LazyNodeSetup()
447    {
448    #ifdef _OPENMP
449        int numthreads=omp_get_max_threads();
450        m_samples.resize(numthreads*m_samplesize);
451        m_sampleids=new int[numthreads];
452        for (int i=0;i<numthreads;++i)
453        {
454            m_sampleids[i]=-1;  
455        }
456    #else
457        m_samples.resize(m_samplesize);
458        m_sampleids=new int[1];
459        m_sampleids[0]=-1;
460    #endif  // _OPENMP
461    }
462    
463    
464    // Creates an identity node
465  DataLazy::DataLazy(DataAbstract_ptr p)  DataLazy::DataLazy(DataAbstract_ptr p)
466      : parent(p->getFunctionSpace(),p->getShape()),      : parent(p->getFunctionSpace(),p->getShape())
467      m_op(IDENTITY),      ,m_sampleids(0),
468      m_axis_offset(0),      m_samples(1)
     m_transpose(0),  
     m_SL(0), m_SM(0), m_SR(0)  
469  {  {
470     if (p->isLazy())     if (p->isLazy())
471     {     {
# Line 335  DataLazy::DataLazy(DataAbstract_ptr p) Line 476  DataLazy::DataLazy(DataAbstract_ptr p)
476     }     }
477     else     else
478     {     {
479      m_id=dynamic_pointer_cast<DataReady>(p);      p->makeLazyShared();
480      if(p->isConstant()) {m_readytype='C';}      DataReady_ptr dr=dynamic_pointer_cast<DataReady>(p);
481      else if(p->isExpanded()) {m_readytype='E';}      makeIdentity(dr);
482      else if (p->isTagged()) {m_readytype='T';}  LAZYDEBUG(cout << "Wrapping " << dr.get() << " id=" << m_id.get() << endl;)
     else {throw DataException("Unknown DataReady instance in DataLazy constructor.");}  
483     }     }
    m_buffsRequired=1;  
    m_samplesize=getNumDPPSample()*getNoValues();  
    m_maxsamplesize=m_samplesize;  
484  LAZYDEBUG(cout << "(1)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(1)Lazy created with " << m_samplesize << endl;)
485  }  }
486    
   
   
   
487  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op)  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op)
488      : parent(left->getFunctionSpace(),left->getShape()),      : parent(left->getFunctionSpace(),(getOpgroup(op)!=G_REDUCTION)?left->getShape():DataTypes::scalarShape),
489      m_op(op),      m_op(op),
490      m_axis_offset(0),      m_axis_offset(0),
491      m_transpose(0),      m_transpose(0),
492      m_SL(0), m_SM(0), m_SR(0)      m_SL(0), m_SM(0), m_SR(0)
493  {  {
494     if ((getOpgroup(op)!=G_UNARY) && (getOpgroup(op)!=G_NP1OUT))     if ((getOpgroup(op)!=G_UNARY) && (getOpgroup(op)!=G_NP1OUT) && (getOpgroup(op)!=G_REDUCTION))
495     {     {
496      throw DataException("Programmer error - constructor DataLazy(left, op) will only process UNARY operations.");      throw DataException("Programmer error - constructor DataLazy(left, op) will only process UNARY operations.");
497     }     }
# Line 373  DataLazy::DataLazy(DataAbstract_ptr left Line 507  DataLazy::DataLazy(DataAbstract_ptr left
507     }     }
508     m_readytype=lleft->m_readytype;     m_readytype=lleft->m_readytype;
509     m_left=lleft;     m_left=lleft;
    m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point  
510     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
511     m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());     m_children=m_left->m_children+1;
512       m_height=m_left->m_height+1;
513       LazyNodeSetup();
514       SIZELIMIT
515  }  }
516    
517    
# Line 385  DataLazy::DataLazy(DataAbstract_ptr left Line 521  DataLazy::DataLazy(DataAbstract_ptr left
521      m_op(op),      m_op(op),
522      m_SL(0), m_SM(0), m_SR(0)      m_SL(0), m_SM(0), m_SR(0)
523  {  {
524    LAZYDEBUG(cout << "Forming operator with " << left.get() << " " << right.get() << endl;)
525     if ((getOpgroup(op)!=G_BINARY))     if ((getOpgroup(op)!=G_BINARY))
526     {     {
527      throw DataException("Programmer error - constructor DataLazy(left, right, op) will only process BINARY operations.");      throw DataException("Programmer error - constructor DataLazy(left, right, op) will only process BINARY operations.");
# Line 401  DataLazy::DataLazy(DataAbstract_ptr left Line 538  DataLazy::DataLazy(DataAbstract_ptr left
538     {     {
539      Data tmp(Data(right),getFunctionSpace());      Data tmp(Data(right),getFunctionSpace());
540      right=tmp.borrowDataPtr();      right=tmp.borrowDataPtr();
541    LAZYDEBUG(cout << "Right interpolation required " << right.get() << endl;)
542     }     }
543     left->operandCheck(*right);     left->operandCheck(*right);
544    
545     if (left->isLazy())          // the children need to be DataLazy. Wrap them in IDENTITY if required     if (left->isLazy())          // the children need to be DataLazy. Wrap them in IDENTITY if required
546     {     {
547      m_left=dynamic_pointer_cast<DataLazy>(left);      m_left=dynamic_pointer_cast<DataLazy>(left);
548    LAZYDEBUG(cout << "Left is " << m_left->toString() << endl;)
549     }     }
550     else     else
551     {     {
552      m_left=DataLazy_ptr(new DataLazy(left));      m_left=DataLazy_ptr(new DataLazy(left));
553    LAZYDEBUG(cout << "Left " << left.get() << " wrapped " << m_left->m_id.get() << endl;)
554     }     }
555     if (right->isLazy())     if (right->isLazy())
556     {     {
557      m_right=dynamic_pointer_cast<DataLazy>(right);      m_right=dynamic_pointer_cast<DataLazy>(right);
558    LAZYDEBUG(cout << "Right is " << m_right->toString() << endl;)
559     }     }
560     else     else
561     {     {
562      m_right=DataLazy_ptr(new DataLazy(right));      m_right=DataLazy_ptr(new DataLazy(right));
563    LAZYDEBUG(cout << "Right " << right.get() << " wrapped " << m_right->m_id.get() << endl;)
564     }     }
565     char lt=m_left->m_readytype;     char lt=m_left->m_readytype;
566     char rt=m_right->m_readytype;     char rt=m_right->m_readytype;
# Line 435  DataLazy::DataLazy(DataAbstract_ptr left Line 577  DataLazy::DataLazy(DataAbstract_ptr left
577      m_readytype='C';      m_readytype='C';
578     }     }
579     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
580     m_maxsamplesize=max(max(m_samplesize,m_right->getMaxSampleSize()),m_left->getMaxSampleSize());       m_children=m_left->m_children+m_right->m_children+2;
581     m_buffsRequired=calcBuffs(m_left, m_right,m_op);     m_height=max(m_left->m_height,m_right->m_height)+1;
582       LazyNodeSetup();
583       SIZELIMIT
584  LAZYDEBUG(cout << "(3)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(3)Lazy created with " << m_samplesize << endl;)
585  }  }
586    
# Line 466  DataLazy::DataLazy(DataAbstract_ptr left Line 610  DataLazy::DataLazy(DataAbstract_ptr left
610      Data tmp(Data(right),getFunctionSpace());      Data tmp(Data(right),getFunctionSpace());
611      right=tmp.borrowDataPtr();      right=tmp.borrowDataPtr();
612     }     }
613     left->operandCheck(*right);  //    left->operandCheck(*right);
614    
615     if (left->isLazy())          // the children need to be DataLazy. Wrap them in IDENTITY if required     if (left->isLazy())          // the children need to be DataLazy. Wrap them in IDENTITY if required
616     {     {
# Line 499  DataLazy::DataLazy(DataAbstract_ptr left Line 643  DataLazy::DataLazy(DataAbstract_ptr left
643      m_readytype='C';      m_readytype='C';
644     }     }
645     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
646     m_maxsamplesize=max(max(m_samplesize,m_right->getMaxSampleSize()),m_left->getMaxSampleSize());       m_children=m_left->m_children+m_right->m_children+2;
647     m_buffsRequired=calcBuffs(m_left, m_right,m_op);     m_height=max(m_left->m_height,m_right->m_height)+1;
648       LazyNodeSetup();
649       SIZELIMIT
650  LAZYDEBUG(cout << "(4)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(4)Lazy created with " << m_samplesize << endl;)
651  }  }
652    
653    
654  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, int axis_offset)  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, int axis_offset)
655      : parent(left->getFunctionSpace(), resultShape(left,op)),      : parent(left->getFunctionSpace(), resultShape(left,op, axis_offset)),
656      m_op(op),      m_op(op),
657      m_axis_offset(axis_offset),      m_axis_offset(axis_offset),
658      m_transpose(0),      m_transpose(0),
# Line 527  DataLazy::DataLazy(DataAbstract_ptr left Line 673  DataLazy::DataLazy(DataAbstract_ptr left
673     }     }
674     m_readytype=lleft->m_readytype;     m_readytype=lleft->m_readytype;
675     m_left=lleft;     m_left=lleft;
    m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point  
676     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
677     m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());     m_children=m_left->m_children+1;
678       m_height=m_left->m_height+1;
679       LazyNodeSetup();
680       SIZELIMIT
681  LAZYDEBUG(cout << "(5)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(5)Lazy created with " << m_samplesize << endl;)
682  }  }
683    
# Line 555  DataLazy::DataLazy(DataAbstract_ptr left Line 703  DataLazy::DataLazy(DataAbstract_ptr left
703     }     }
704     m_readytype=lleft->m_readytype;     m_readytype=lleft->m_readytype;
705     m_left=lleft;     m_left=lleft;
    m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point  
706     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
707     m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());     m_children=m_left->m_children+1;
708       m_height=m_left->m_height+1;
709       LazyNodeSetup();
710       SIZELIMIT
711  LAZYDEBUG(cout << "(6)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(6)Lazy created with " << m_samplesize << endl;)
712  }  }
713    
714  DataLazy::~DataLazy()  
715    DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, const int axis0, const int axis1)
716        : parent(left->getFunctionSpace(), SwapShape(left,axis0,axis1)),
717        m_op(op),
718        m_axis_offset(axis0),
719        m_transpose(axis1),
720        m_tol(0)
721  {  {
722       if ((getOpgroup(op)!=G_NP1OUT_2P))
723       {
724        throw DataException("Programmer error - constructor DataLazy(left, op, tol) will only process UNARY operations which require two integer parameters.");
725       }
726       DataLazy_ptr lleft;
727       if (!left->isLazy())
728       {
729        lleft=DataLazy_ptr(new DataLazy(left));
730       }
731       else
732       {
733        lleft=dynamic_pointer_cast<DataLazy>(left);
734       }
735       m_readytype=lleft->m_readytype;
736       m_left=lleft;
737       m_samplesize=getNumDPPSample()*getNoValues();
738       m_children=m_left->m_children+1;
739       m_height=m_left->m_height+1;
740       LazyNodeSetup();
741       SIZELIMIT
742    LAZYDEBUG(cout << "(7)Lazy created with " << m_samplesize << endl;)
743  }  }
744    
745    
746  int  namespace
 DataLazy::getBuffsRequired() const  
747  {  {
748      return m_buffsRequired;  
749        inline int max3(int a, int b, int c)
750        {
751        int t=(a>b?a:b);
752        return (t>c?t:c);
753    
754        }
755  }  }
756    
757    DataLazy::DataLazy(DataAbstract_ptr mask, DataAbstract_ptr left, DataAbstract_ptr right/*, double tol*/)
758        : parent(left->getFunctionSpace(), left->getShape()),
759        m_op(CONDEVAL),
760        m_axis_offset(0),
761        m_transpose(0),
762        m_tol(0)
763    {
764    
765  size_t     DataLazy_ptr lmask;
766  DataLazy::getMaxSampleSize() const     DataLazy_ptr lleft;
767       DataLazy_ptr lright;
768       if (!mask->isLazy())
769       {
770        lmask=DataLazy_ptr(new DataLazy(mask));
771       }
772       else
773       {
774        lmask=dynamic_pointer_cast<DataLazy>(mask);
775       }
776       if (!left->isLazy())
777       {
778        lleft=DataLazy_ptr(new DataLazy(left));
779       }
780       else
781       {
782        lleft=dynamic_pointer_cast<DataLazy>(left);
783       }
784       if (!right->isLazy())
785       {
786        lright=DataLazy_ptr(new DataLazy(right));
787       }
788       else
789       {
790        lright=dynamic_pointer_cast<DataLazy>(right);
791       }
792       m_readytype=lmask->m_readytype;
793       if ((lleft->m_readytype!=lright->m_readytype) || (lmask->m_readytype!=lleft->m_readytype))
794       {
795        throw DataException("Programmer Error - condEval arguments must have the same readytype");
796       }
797       m_left=lleft;
798       m_right=lright;
799       m_mask=lmask;
800       m_samplesize=getNumDPPSample()*getNoValues();
801       m_children=m_left->m_children+m_right->m_children+m_mask->m_children+1;
802       m_height=max3(m_left->m_height,m_right->m_height,m_mask->m_height)+1;
803       LazyNodeSetup();
804       SIZELIMIT
805    LAZYDEBUG(cout << "(8)Lazy created with " << m_samplesize << endl;)
806    }
807    
808    
809    
810    DataLazy::~DataLazy()
811  {  {
812      return m_maxsamplesize;     delete[] m_sampleids;
813  }  }
814    
815    
816  /*  /*
817    \brief Evaluates the expression using methods on Data.    \brief Evaluates the expression using methods on Data.
818    This does the work for the collapse method.    This does the work for the collapse method.
# Line 718  DataLazy::collapseToReady() Line 952  DataLazy::collapseToReady()
952      case TRACE:      case TRACE:
953      result=left.trace(m_axis_offset);      result=left.trace(m_axis_offset);
954      break;      break;
955        case SWAP:
956        result=left.swapaxes(m_axis_offset, m_transpose);
957        break;
958        case MINVAL:
959        result=left.minval();
960        break;
961        case MAXVAL:
962        result=left.minval();
963        break;
964      default:      default:
965      throw DataException("Programmer error - collapseToReady does not know how to resolve operator "+opToString(m_op)+".");      throw DataException("Programmer error - collapseToReady does not know how to resolve operator "+opToString(m_op)+".");
966    }    }
# Line 745  DataLazy::collapse() Line 988  DataLazy::collapse()
988    m_op=IDENTITY;    m_op=IDENTITY;
989  }  }
990    
991  /*  
992    \brief Compute the value of the expression (unary operation) for the given sample.  
993    \return Vector which stores the value of the subexpression for the given sample.  
994    \param v A vector to store intermediate results.  
995    \param offset Index in v to begin storing results.  
996    \param sampleNo Sample number to evaluate.  #define PROC_OP(TYPE,X)                               \
997    \param roffset (output parameter) the offset in the return vector where the result begins.      for (int j=0;j<onumsteps;++j)\
998        {\
999    The return value will be an existing vector so do not deallocate it.        for (int i=0;i<numsteps;++i,resultp+=resultStep) \
1000    If the result is stored in v it should be stored at the offset given.        { \
1001    Everything from offset to the end of v should be considered available for this method to use.  LAZYDEBUG(cout << "[left,right]=[" << lroffset << "," << rroffset << "]" << endl;)\
1002  */  LAZYDEBUG(cout << "{left,right}={" << (*left)[lroffset] << "," << (*right)[rroffset] << "}\n";)\
1003  DataTypes::ValueType*           tensor_binary_operation< TYPE >(chunksize, &((*left)[lroffset]), &((*right)[rroffset]), resultp, X); \
1004  DataLazy::resolveUnary(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const  LAZYDEBUG(cout << " result=      " << resultp[0] << endl;) \
1005             lroffset+=leftstep; \
1006             rroffset+=rightstep; \
1007          }\
1008          lroffset+=oleftstep;\
1009          rroffset+=orightstep;\
1010        }
1011    
1012    
1013    // The result will be stored in m_samples
1014    // The return value is a pointer to the DataVector, offset is the offset within the return value
1015    const DataTypes::ValueType*
1016    DataLazy::resolveNodeSample(int tid, int sampleNo, size_t& roffset)
1017    {
1018    LAZYDEBUG(cout << "Resolve sample " << toString() << endl;)
1019        // collapse so we have a 'E' node or an IDENTITY for some other type
1020      if (m_readytype!='E' && m_op!=IDENTITY)
1021      {
1022        collapse();
1023      }
1024      if (m_op==IDENTITY)  
1025      {
1026        const ValueType& vec=m_id->getVectorRO();
1027        roffset=m_id->getPointOffset(sampleNo, 0);
1028    #ifdef LAZY_STACK_PROF
1029    int x;
1030    if (&x<stackend[omp_get_thread_num()])
1031    {
1032           stackend[omp_get_thread_num()]=&x;
1033    }
1034    #endif
1035        return &(vec);
1036      }
1037      if (m_readytype!='E')
1038      {
1039        throw DataException("Programmer Error - Collapse did not produce an expanded node.");
1040      }
1041      if (m_sampleids[tid]==sampleNo)
1042      {
1043        roffset=tid*m_samplesize;
1044        return &(m_samples);        // sample is already resolved
1045      }
1046      m_sampleids[tid]=sampleNo;
1047    
1048      switch (getOpgroup(m_op))
1049      {
1050      case G_UNARY:
1051      case G_UNARY_P: return resolveNodeUnary(tid, sampleNo, roffset);
1052      case G_BINARY: return resolveNodeBinary(tid, sampleNo, roffset);
1053      case G_NP1OUT: return resolveNodeNP1OUT(tid, sampleNo, roffset);
1054      case G_NP1OUT_P: return resolveNodeNP1OUT_P(tid, sampleNo, roffset);
1055      case G_TENSORPROD: return resolveNodeTProd(tid, sampleNo, roffset);
1056      case G_NP1OUT_2P: return resolveNodeNP1OUT_2P(tid, sampleNo, roffset);
1057      case G_REDUCTION: return resolveNodeReduction(tid, sampleNo, roffset);
1058      case G_CONDEVAL: return resolveNodeCondEval(tid, sampleNo, roffset);
1059      default:
1060        throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");
1061      }
1062    }
1063    
1064    const DataTypes::ValueType*
1065    DataLazy::resolveNodeUnary(int tid, int sampleNo, size_t& roffset)
1066  {  {
1067      // we assume that any collapsing has been done before we get here      // we assume that any collapsing has been done before we get here
1068      // since we only have one argument we don't need to think about only      // since we only have one argument we don't need to think about only
1069      // processing single points.      // processing single points.
1070        // we will also know we won't get identity nodes
1071    if (m_readytype!='E')    if (m_readytype!='E')
1072    {    {
1073      throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");      throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");
1074    }    }
1075    const ValueType* vleft=m_left->resolveSample(v,offset,sampleNo,roffset);    if (m_op==IDENTITY)
1076    const double* left=&((*vleft)[roffset]);    {
1077    double* result=&(v[offset]);      throw DataException("Programmer error - resolveNodeUnary should not be called on identity nodes.");
1078    roffset=offset;    }
1079      const DataTypes::ValueType* leftres=m_left->resolveNodeSample(tid, sampleNo, roffset);
1080      const double* left=&((*leftres)[roffset]);
1081      roffset=m_samplesize*tid;
1082      double* result=&(m_samples[roffset]);
1083    switch (m_op)    switch (m_op)
1084    {    {
1085      case SIN:        case SIN:  
# Line 880  DataLazy::resolveUnary(ValueType& v, siz Line 1189  DataLazy::resolveUnary(ValueType& v, siz
1189      default:      default:
1190      throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");      throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");
1191    }    }
1192    return &v;    return &(m_samples);
1193  }  }
1194    
1195    
1196    const DataTypes::ValueType*
1197    DataLazy::resolveNodeReduction(int tid, int sampleNo, size_t& roffset)
1198    {
1199        // we assume that any collapsing has been done before we get here
1200        // since we only have one argument we don't need to think about only
1201        // processing single points.
1202        // we will also know we won't get identity nodes
1203      if (m_readytype!='E')
1204      {
1205        throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");
1206      }
1207      if (m_op==IDENTITY)
1208      {
1209        throw DataException("Programmer error - resolveNodeUnary should not be called on identity nodes.");
1210      }
1211      size_t loffset=0;
1212      const DataTypes::ValueType* leftres=m_left->resolveNodeSample(tid, sampleNo, loffset);
1213    
1214      roffset=m_samplesize*tid;
1215      unsigned int ndpps=getNumDPPSample();
1216      unsigned int psize=DataTypes::noValues(getShape());
1217      double* result=&(m_samples[roffset]);
1218      switch (m_op)
1219      {
1220        case MINVAL:
1221        {
1222          for (unsigned int z=0;z<ndpps;++z)
1223          {
1224            FMin op;
1225            *result=DataMaths::reductionOp(*leftres, m_left->getShape(), loffset, op, numeric_limits<double>::max());
1226            loffset+=psize;
1227            result++;
1228          }
1229        }
1230        break;
1231        case MAXVAL:
1232        {
1233          for (unsigned int z=0;z<ndpps;++z)
1234          {
1235          FMax op;
1236          *result=DataMaths::reductionOp(*leftres, m_left->getShape(), loffset, op, numeric_limits<double>::max()*-1);
1237          loffset+=psize;
1238          result++;
1239          }
1240        }
1241        break;
1242        default:
1243        throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");
1244      }
1245      return &(m_samples);
1246    }
1247    
1248    const DataTypes::ValueType*
1249    DataLazy::resolveNodeNP1OUT(int tid, int sampleNo, size_t& roffset)
 /*  
   \brief Compute the value of the expression (unary operation) for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
 DataTypes::ValueType*  
 DataLazy::resolveNP1OUT(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const  
1250  {  {
1251      // we assume that any collapsing has been done before we get here      // we assume that any collapsing has been done before we get here
1252      // since we only have one argument we don't need to think about only      // since we only have one argument we don't need to think about only
1253      // processing single points.      // processing single points.
1254    if (m_readytype!='E')    if (m_readytype!='E')
1255    {    {
1256      throw DataException("Programmer error - resolveNP1OUT should only be called on expanded Data.");      throw DataException("Programmer error - resolveNodeNP1OUT should only be called on expanded Data.");
1257    }    }
1258      // since we can't write the result over the input, we need a result offset further along    if (m_op==IDENTITY)
1259    size_t subroffset=roffset+m_samplesize;    {
1260    const ValueType* vleft=m_left->resolveSample(v,offset,sampleNo,subroffset);      throw DataException("Programmer error - resolveNodeNP1OUT should not be called on identity nodes.");
1261    roffset=offset;    }
1262      size_t subroffset;
1263      const ValueType* leftres=m_left->resolveNodeSample(tid, sampleNo, subroffset);
1264      roffset=m_samplesize*tid;
1265      size_t loop=0;
1266      size_t numsteps=(m_readytype=='E')?getNumDPPSample():1;
1267      size_t step=getNoValues();
1268      size_t offset=roffset;
1269    switch (m_op)    switch (m_op)
1270    {    {
1271      case SYM:      case SYM:
1272      DataMaths::symmetric(*vleft,m_left->getShape(),subroffset, v, getShape(), offset);      for (loop=0;loop<numsteps;++loop)
1273        {
1274            DataMaths::symmetric(*leftres,m_left->getShape(),subroffset, m_samples, getShape(), offset);
1275            subroffset+=step;
1276            offset+=step;
1277        }
1278      break;      break;
1279      case NSYM:      case NSYM:
1280      DataMaths::nonsymmetric(*vleft,m_left->getShape(),subroffset, v, getShape(), offset);      for (loop=0;loop<numsteps;++loop)
1281        {
1282            DataMaths::nonsymmetric(*leftres,m_left->getShape(),subroffset, m_samples, getShape(), offset);
1283            subroffset+=step;
1284            offset+=step;
1285        }
1286      break;      break;
1287      default:      default:
1288      throw DataException("Programmer error - resolveNP1OUT can not resolve operator "+opToString(m_op)+".");      throw DataException("Programmer error - resolveNP1OUT can not resolve operator "+opToString(m_op)+".");
1289    }    }
1290    return &v;    return &m_samples;
1291  }  }
1292    
1293  /*  const DataTypes::ValueType*
1294    \brief Compute the value of the expression (unary operation) for the given sample.  DataLazy::resolveNodeNP1OUT_P(int tid, int sampleNo, size_t& roffset)
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
 DataTypes::ValueType*  
 DataLazy::resolveNP1OUT_P(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const  
1295  {  {
1296      // we assume that any collapsing has been done before we get here      // we assume that any collapsing has been done before we get here
1297      // since we only have one argument we don't need to think about only      // since we only have one argument we don't need to think about only
1298      // processing single points.      // processing single points.
1299    if (m_readytype!='E')    if (m_readytype!='E')
1300    {    {
1301      throw DataException("Programmer error - resolveNP1OUT_P should only be called on expanded Data.");      throw DataException("Programmer error - resolveNodeNP1OUT_P should only be called on expanded Data.");
1302    }    }
1303      // since we can't write the result over the input, we need a result offset further along    if (m_op==IDENTITY)
1304    size_t subroffset=roffset+m_samplesize;    {
1305    const ValueType* vleft=m_left->resolveSample(v,offset,sampleNo,subroffset);      throw DataException("Programmer error - resolveNodeNP1OUT_P should not be called on identity nodes.");
1306    roffset=offset;    }
1307      size_t subroffset;
1308      size_t offset;
1309      const ValueType* leftres=m_left->resolveNodeSample(tid, sampleNo, subroffset);
1310      roffset=m_samplesize*tid;
1311      offset=roffset;
1312      size_t loop=0;
1313      size_t numsteps=(m_readytype=='E')?getNumDPPSample():1;
1314      size_t outstep=getNoValues();
1315      size_t instep=m_left->getNoValues();
1316    switch (m_op)    switch (m_op)
1317    {    {
1318      case TRACE:      case TRACE:
1319           DataMaths::trace(*vleft,m_left->getShape(),subroffset, v,getShape(),offset,m_axis_offset);      for (loop=0;loop<numsteps;++loop)
1320        {
1321                DataMaths::trace(*leftres,m_left->getShape(),subroffset, m_samples ,getShape(),offset,m_axis_offset);
1322            subroffset+=instep;
1323            offset+=outstep;
1324        }
1325      break;      break;
1326      case TRANS:      case TRANS:
1327           DataMaths::transpose(*vleft,m_left->getShape(),subroffset, v,getShape(),offset,m_axis_offset);      for (loop=0;loop<numsteps;++loop)
1328        {
1329                DataMaths::transpose(*leftres,m_left->getShape(),subroffset, m_samples, getShape(),offset,m_axis_offset);
1330            subroffset+=instep;
1331            offset+=outstep;
1332        }
1333      break;      break;
1334      default:      default:
1335      throw DataException("Programmer error - resolveNP1OUTP can not resolve operator "+opToString(m_op)+".");      throw DataException("Programmer error - resolveNP1OUTP can not resolve operator "+opToString(m_op)+".");
1336    }    }
1337    return &v;    return &m_samples;
1338  }  }
1339    
1340    
1341  #define PROC_OP(TYPE,X)                               \  const DataTypes::ValueType*
1342      for (int i=0;i<steps;++i,resultp+=resultStep) \  DataLazy::resolveNodeNP1OUT_2P(int tid, int sampleNo, size_t& roffset)
1343      { \  {
1344         tensor_binary_operation< TYPE >(chunksize, &((*left)[lroffset]), &((*right)[rroffset]), resultp, X); \    if (m_readytype!='E')
1345         lroffset+=leftStep; \    {
1346         rroffset+=rightStep; \      throw DataException("Programmer error - resolveNodeNP1OUT_2P should only be called on expanded Data.");
1347      }
1348      if (m_op==IDENTITY)
1349      {
1350        throw DataException("Programmer error - resolveNodeNP1OUT_2P should not be called on identity nodes.");
1351      }
1352      size_t subroffset;
1353      size_t offset;
1354      const ValueType* leftres=m_left->resolveNodeSample(tid, sampleNo, subroffset);
1355      roffset=m_samplesize*tid;
1356      offset=roffset;
1357      size_t loop=0;
1358      size_t numsteps=(m_readytype=='E')?getNumDPPSample():1;
1359      size_t outstep=getNoValues();
1360      size_t instep=m_left->getNoValues();
1361      switch (m_op)
1362      {
1363        case SWAP:
1364        for (loop=0;loop<numsteps;++loop)
1365        {
1366                DataMaths::swapaxes(*leftres,m_left->getShape(),subroffset, m_samples, getShape(),offset, m_axis_offset, m_transpose);
1367            subroffset+=instep;
1368            offset+=outstep;
1369      }      }
1370        break;
1371        default:
1372        throw DataException("Programmer error - resolveNodeNP1OUT2P can not resolve operator "+opToString(m_op)+".");
1373      }
1374      return &m_samples;
1375    }
1376    
1377    const DataTypes::ValueType*
1378    DataLazy::resolveNodeCondEval(int tid, int sampleNo, size_t& roffset)
1379    {
1380      if (m_readytype!='E')
1381      {
1382        throw DataException("Programmer error - resolveNodeCondEval should only be called on expanded Data.");
1383      }
1384      if (m_op!=CONDEVAL)
1385      {
1386        throw DataException("Programmer error - resolveNodeCondEval should only be called on CONDEVAL nodes.");
1387      }
1388      size_t subroffset;
1389    
1390      const ValueType* maskres=m_mask->resolveNodeSample(tid, sampleNo, subroffset);
1391      const ValueType* srcres=0;
1392      if ((*maskres)[subroffset]>0)
1393      {
1394        srcres=m_left->resolveNodeSample(tid, sampleNo, subroffset);
1395      }
1396      else
1397      {
1398        srcres=m_right->resolveNodeSample(tid, sampleNo, subroffset);
1399      }
1400    
1401      // Now we need to copy the result
1402    
1403      roffset=m_samplesize*tid;
1404      for (int i=0;i<m_samplesize;++i)
1405      {
1406        m_samples[roffset+i]=(*srcres)[subroffset+i];  
1407      }
1408    
1409      return &m_samples;
1410    }
1411    
 /*  
   \brief Compute the value of the expression (binary operation) for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
1412  // This method assumes that any subexpressions which evaluate to Constant or Tagged Data  // This method assumes that any subexpressions which evaluate to Constant or Tagged Data
1413  // have already been collapsed to IDENTITY. So we must have at least one expanded child.  // have already been collapsed to IDENTITY. So we must have at least one expanded child.
1414  // If both children are expanded, then we can process them in a single operation (we treat  // If both children are expanded, then we can process them in a single operation (we treat
# Line 998  DataLazy::resolveNP1OUT_P(ValueType& v, Line 1418  DataLazy::resolveNP1OUT_P(ValueType& v,
1418  // There is an additional complication when scalar operations are considered.  // There is an additional complication when scalar operations are considered.
1419  // For example, 2+Vector.  // For example, 2+Vector.
1420  // In this case each double within the point is treated individually  // In this case each double within the point is treated individually
1421  DataTypes::ValueType*  const DataTypes::ValueType*
1422  DataLazy::resolveBinary(ValueType& v,  size_t offset, int sampleNo, size_t& roffset) const  DataLazy::resolveNodeBinary(int tid, int sampleNo, size_t& roffset)
1423  {  {
1424  LAZYDEBUG(cout << "Resolve binary: " << toString() << endl;)  LAZYDEBUG(cout << "Resolve binary: " << toString() << endl;)
1425    
# Line 1013  LAZYDEBUG(cout << "Resolve binary: " << Line 1433  LAZYDEBUG(cout << "Resolve binary: " <<
1433    }    }
1434    bool leftScalar=(m_left->getRank()==0);    bool leftScalar=(m_left->getRank()==0);
1435    bool rightScalar=(m_right->getRank()==0);    bool rightScalar=(m_right->getRank()==0);
1436    bool bigloops=((leftExp && rightExp) || (!leftExp && !rightExp)); // is processing in single step?    if ((m_left->getRank()!=m_right->getRank()) && (!leftScalar && !rightScalar))
1437    int steps=(bigloops?1:getNumDPPSample());    {
1438    size_t chunksize=(bigloops? m_samplesize : getNoValues());    // if bigloops, pretend the whole sample is a datapoint      throw DataException("resolveBinary - ranks of arguments must match unless one of them is scalar.");
1439    if (m_left->getRank()!=m_right->getRank())    // need to deal with scalar * ? ops    }
1440    {    size_t leftsize=m_left->getNoValues();
1441      if (!leftScalar && !rightScalar)    size_t rightsize=m_right->getNoValues();
1442      {    size_t chunksize=1;           // how many doubles will be processed in one go
1443         throw DataException("resolveBinary - ranks of arguments must match unless one of them is scalar.");    int leftstep=0;       // how far should the left offset advance after each step
1444      }    int rightstep=0;
1445      steps=getNumDPPSample()*max(m_left->getNoValues(),m_right->getNoValues());    int numsteps=0;       // total number of steps for the inner loop
1446      chunksize=1;    // for scalar    int oleftstep=0;  // the o variables refer to the outer loop
1447    }        int orightstep=0; // The outer loop is only required in cases where there is an extended scalar
1448    int leftStep=((leftExp && (!rightExp || rightScalar))? m_right->getNoValues() : 0);    int onumsteps=1;
1449    int rightStep=((rightExp && (!leftExp || leftScalar))? m_left->getNoValues() : 0);    
1450    int resultStep=max(leftStep,rightStep);   // only one (at most) should be !=0    bool LES=(leftExp && leftScalar); // Left is an expanded scalar
1451      bool RES=(rightExp && rightScalar);
1452      bool LS=(!leftExp && leftScalar); // left is a single scalar
1453      bool RS=(!rightExp && rightScalar);
1454      bool LN=(!leftExp && !leftScalar);    // left is a single non-scalar
1455      bool RN=(!rightExp && !rightScalar);
1456      bool LEN=(leftExp && !leftScalar);    // left is an expanded non-scalar
1457      bool REN=(rightExp && !rightScalar);
1458    
1459      if ((LES && RES) || (LEN && REN)) // both are Expanded scalars or both are expanded non-scalars
1460      {
1461        chunksize=m_left->getNumDPPSample()*leftsize;
1462        leftstep=0;
1463        rightstep=0;
1464        numsteps=1;
1465      }
1466      else if (LES || RES)
1467      {
1468        chunksize=1;
1469        if (LES)        // left is an expanded scalar
1470        {
1471            if (RS)
1472            {
1473               leftstep=1;
1474               rightstep=0;
1475               numsteps=m_left->getNumDPPSample();
1476            }
1477            else        // RN or REN
1478            {
1479               leftstep=0;
1480               oleftstep=1;
1481               rightstep=1;
1482               orightstep=(RN ? -(int)rightsize : 0);
1483               numsteps=rightsize;
1484               onumsteps=m_left->getNumDPPSample();
1485            }
1486        }
1487        else        // right is an expanded scalar
1488        {
1489            if (LS)
1490            {
1491               rightstep=1;
1492               leftstep=0;
1493               numsteps=m_right->getNumDPPSample();
1494            }
1495            else
1496            {
1497               rightstep=0;
1498               orightstep=1;
1499               leftstep=1;
1500               oleftstep=(LN ? -(int)leftsize : 0);
1501               numsteps=leftsize;
1502               onumsteps=m_right->getNumDPPSample();
1503            }
1504        }
1505      }
1506      else  // this leaves (LEN, RS), (LEN, RN) and their transposes
1507      {
1508        if (LEN)    // and Right will be a single value
1509        {
1510            chunksize=rightsize;
1511            leftstep=rightsize;
1512            rightstep=0;
1513            numsteps=m_left->getNumDPPSample();
1514            if (RS)
1515            {
1516               numsteps*=leftsize;
1517            }
1518        }
1519        else    // REN
1520        {
1521            chunksize=leftsize;
1522            rightstep=leftsize;
1523            leftstep=0;
1524            numsteps=m_right->getNumDPPSample();
1525            if (LS)
1526            {
1527               numsteps*=rightsize;
1528            }
1529        }
1530      }
1531    
1532      int resultStep=max(leftstep,rightstep);   // only one (at most) should be !=0
1533      // Get the values of sub-expressions      // Get the values of sub-expressions
1534    const ValueType* left=m_left->resolveSample(v,offset,sampleNo,lroffset);    const ValueType* left=m_left->resolveNodeSample(tid,sampleNo,lroffset);  
1535    const ValueType* right=m_right->resolveSample(v,offset+m_samplesize,sampleNo,rroffset); // Note    const ValueType* right=m_right->resolveNodeSample(tid,sampleNo,rroffset);
1536      // the right child starts further along.  LAZYDEBUG(cout << "Post sub calls in " << toString() << endl;)
1537    double* resultp=&(v[offset]);     // results are stored at the vector offset we recieved  LAZYDEBUG(cout << "shapes=" << DataTypes::shapeToString(m_left->getShape()) << "," << DataTypes::shapeToString(m_right->getShape()) << endl;)
1538    LAZYDEBUG(cout << "chunksize=" << chunksize << endl << "leftstep=" << leftstep << " rightstep=" << rightstep;)
1539    LAZYDEBUG(cout << " numsteps=" << numsteps << endl << "oleftstep=" << oleftstep << " orightstep=" << orightstep;)
1540    LAZYDEBUG(cout << "onumsteps=" << onumsteps << endl;)
1541    LAZYDEBUG(cout << " DPPS=" << m_left->getNumDPPSample() << "," <<m_right->getNumDPPSample() << endl;)
1542    LAZYDEBUG(cout << "" << LS << RS << LN << RN << LES << RES <<LEN << REN <<   endl;)
1543    
1544    LAZYDEBUG(cout << "Left res["<< lroffset<< "]=" << (*left)[lroffset] << endl;)
1545    LAZYDEBUG(cout << "Right res["<< rroffset<< "]=" << (*right)[rroffset] << endl;)
1546    
1547    
1548      roffset=m_samplesize*tid;
1549      double* resultp=&(m_samples[roffset]);        // results are stored at the vector offset we recieved
1550    switch(m_op)    switch(m_op)
1551    {    {
1552      case ADD:      case ADD:
# Line 1053  LAZYDEBUG(cout << "Resolve binary: " << Line 1567  LAZYDEBUG(cout << "Resolve binary: " <<
1567      default:      default:
1568      throw DataException("Programmer error - resolveBinary can not resolve operator "+opToString(m_op)+".");      throw DataException("Programmer error - resolveBinary can not resolve operator "+opToString(m_op)+".");
1569    }    }
1570    roffset=offset;    LAZYDEBUG(cout << "Result res[" << roffset<< "]" << m_samples[roffset] << endl;)
1571    return &v;    return &m_samples;
1572  }  }
1573    
1574    
 /*  
   \brief Compute the value of the expression (tensor product) for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
1575  // This method assumes that any subexpressions which evaluate to Constant or Tagged Data  // This method assumes that any subexpressions which evaluate to Constant or Tagged Data
1576  // have already been collapsed to IDENTITY. So we must have at least one expanded child.  // have already been collapsed to IDENTITY. So we must have at least one expanded child.
1577  // unlike the other resolve helpers, we must treat these datapoints separately.  // unlike the other resolve helpers, we must treat these datapoints separately.
1578  DataTypes::ValueType*  const DataTypes::ValueType*
1579  DataLazy::resolveTProd(ValueType& v,  size_t offset, int sampleNo, size_t& roffset) const  DataLazy::resolveNodeTProd(int tid, int sampleNo, size_t& roffset)
1580  {  {
1581  LAZYDEBUG(cout << "Resolve TensorProduct: " << toString() << endl;)  LAZYDEBUG(cout << "Resolve TensorProduct: " << toString() << endl;)
1582    
# Line 1083  LAZYDEBUG(cout << "Resolve TensorProduct Line 1585  LAZYDEBUG(cout << "Resolve TensorProduct
1585    bool leftExp=(m_left->m_readytype=='E');    bool leftExp=(m_left->m_readytype=='E');
1586    bool rightExp=(m_right->m_readytype=='E');    bool rightExp=(m_right->m_readytype=='E');
1587    int steps=getNumDPPSample();    int steps=getNumDPPSample();
1588    int leftStep=((leftExp && !rightExp)? m_right->getNoValues() : 0);    int leftStep=(leftExp? m_left->getNoValues() : 0);        // do not have scalars as input to this method
1589    int rightStep=((rightExp && !leftExp)? m_left->getNoValues() : 0);    int rightStep=(rightExp?m_right->getNoValues() : 0);
1590    int resultStep=max(leftStep,rightStep);   // only one (at most) should be !=0  
1591      // Get the values of sub-expressions (leave a gap of one sample for the result).    int resultStep=getNoValues();
1592    const ValueType* left=m_left->resolveSample(v,offset+m_samplesize,sampleNo,lroffset);    roffset=m_samplesize*tid;
1593    const ValueType* right=m_right->resolveSample(v,offset+2*m_samplesize,sampleNo,rroffset);    size_t offset=roffset;
1594    double* resultp=&(v[offset]);     // results are stored at the vector offset we recieved  
1595      const ValueType* left=m_left->resolveNodeSample(tid, sampleNo, lroffset);
1596    
1597      const ValueType* right=m_right->resolveNodeSample(tid, sampleNo, rroffset);
1598    
1599    LAZYDEBUG(cerr << "[Left shape]=" << DataTypes::shapeToString(m_left->getShape()) << "\n[Right shape]=" << DataTypes::shapeToString(m_right->getShape()) << " result=" <<DataTypes::shapeToString(getShape()) <<  endl;
1600    cout << getNoValues() << endl;)
1601    
1602    
1603    LAZYDEBUG(cerr << "Post sub calls: " << toString() << endl;)
1604    LAZYDEBUG(cout << "LeftExp=" << leftExp << " rightExp=" << rightExp << endl;)
1605    LAZYDEBUG(cout << "LeftR=" << m_left->getRank() << " rightExp=" << m_right->getRank() << endl;)
1606    LAZYDEBUG(cout << "LeftSize=" << m_left->getNoValues() << " RightSize=" << m_right->getNoValues() << endl;)
1607    LAZYDEBUG(cout << "m_samplesize=" << m_samplesize << endl;)
1608    LAZYDEBUG(cout << "outputshape=" << DataTypes::shapeToString(getShape()) << endl;)
1609    LAZYDEBUG(cout << "DPPS=" << m_right->getNumDPPSample() <<"."<<endl;)
1610    
1611      double* resultp=&(m_samples[offset]);     // results are stored at the vector offset we recieved
1612    switch(m_op)    switch(m_op)
1613    {    {
1614      case PROD:      case PROD:
# Line 1097  LAZYDEBUG(cout << "Resolve TensorProduct Line 1616  LAZYDEBUG(cout << "Resolve TensorProduct
1616      {      {
1617            const double *ptr_0 = &((*left)[lroffset]);            const double *ptr_0 = &((*left)[lroffset]);
1618            const double *ptr_1 = &((*right)[rroffset]);            const double *ptr_1 = &((*right)[rroffset]);
1619    
1620    LAZYDEBUG(cout << DataTypes::pointToString(*left, m_left->getShape(),lroffset,"LEFT") << endl;)
1621    LAZYDEBUG(cout << DataTypes::pointToString(*right,m_right->getShape(),rroffset, "RIGHT") << endl;)
1622    
1623            matrix_matrix_product(m_SL, m_SM, m_SR, ptr_0, ptr_1, resultp, m_transpose);            matrix_matrix_product(m_SL, m_SM, m_SR, ptr_0, ptr_1, resultp, m_transpose);
1624    
1625        lroffset+=leftStep;        lroffset+=leftStep;
1626        rroffset+=rightStep;        rroffset+=rightStep;
1627      }      }
# Line 1106  LAZYDEBUG(cout << "Resolve TensorProduct Line 1630  LAZYDEBUG(cout << "Resolve TensorProduct
1630      throw DataException("Programmer error - resolveTProduct can not resolve operator "+opToString(m_op)+".");      throw DataException("Programmer error - resolveTProduct can not resolve operator "+opToString(m_op)+".");
1631    }    }
1632    roffset=offset;    roffset=offset;
1633    return &v;    return &m_samples;
1634    }
1635    
1636    
1637    const DataTypes::ValueType*
1638    DataLazy::resolveSample(int sampleNo, size_t& roffset)
1639    {
1640    #ifdef _OPENMP
1641        int tid=omp_get_thread_num();
1642    #else
1643        int tid=0;
1644    #endif
1645    
1646    #ifdef LAZY_STACK_PROF
1647        stackstart[tid]=&tid;
1648        stackend[tid]=&tid;
1649        const DataTypes::ValueType* r=resolveNodeSample(tid, sampleNo, roffset);
1650        size_t d=(size_t)stackstart[tid]-(size_t)stackend[tid];
1651        #pragma omp critical
1652        if (d>maxstackuse)
1653        {
1654    cout << "Max resolve Stack use " << d << endl;
1655            maxstackuse=d;
1656        }
1657        return r;
1658    #else
1659        return resolveNodeSample(tid, sampleNo, roffset);
1660    #endif
1661  }  }
1662    
1663    
1664    // This needs to do the work of the identity constructor
1665    void
1666    DataLazy::resolveToIdentity()
1667    {
1668       if (m_op==IDENTITY)
1669        return;
1670       DataReady_ptr p=resolveNodeWorker();
1671       makeIdentity(p);
1672    }
1673    
1674  /*  void DataLazy::makeIdentity(const DataReady_ptr& p)
1675    \brief Compute the value of the expression for the given sample.  {
1676    \return Vector which stores the value of the subexpression for the given sample.     m_op=IDENTITY;
1677    \param v A vector to store intermediate results.     m_axis_offset=0;
1678    \param offset Index in v to begin storing results.     m_transpose=0;
1679    \param sampleNo Sample number to evaluate.     m_SL=m_SM=m_SR=0;
1680    \param roffset (output parameter) the offset in the return vector where the result begins.     m_children=m_height=0;
1681       m_id=p;
1682       if(p->isConstant()) {m_readytype='C';}
1683       else if(p->isExpanded()) {m_readytype='E';}
1684       else if (p->isTagged()) {m_readytype='T';}
1685       else {throw DataException("Unknown DataReady instance in convertToIdentity constructor.");}
1686       m_samplesize=p->getNumDPPSample()*p->getNoValues();
1687       m_left.reset();
1688       m_right.reset();
1689    }
1690    
   The return value will be an existing vector so do not deallocate it.  
 */  
 // the vector and the offset are a place where the method could write its data if it wishes  
 // it is not obligated to do so. For example, if it has its own storage already, it can use that.  
 // Hence the return value to indicate where the data is actually stored.  
 // Regardless, the storage should be assumed to be used, even if it isn't.  
1691    
1692  // the roffset is the offset within the returned vector where the data begins  DataReady_ptr
1693  const DataTypes::ValueType*  DataLazy::resolve()
 DataLazy::resolveSample(ValueType& v, size_t offset, int sampleNo, size_t& roffset)  
1694  {  {
1695  LAZYDEBUG(cout << "Resolve sample " << toString() << endl;)      resolveToIdentity();
1696      // collapse so we have a 'E' node or an IDENTITY for some other type      return m_id;
1697    if (m_readytype!='E' && m_op!=IDENTITY)  }
1698    
1699    
1700    /* This is really a static method but I think that caused problems in windows */
1701    void
1702    DataLazy::resolveGroupWorker(std::vector<DataLazy*>& dats)
1703    {
1704      if (dats.empty())
1705    {    {
1706      collapse();      return;
1707    }    }
1708    if (m_op==IDENTITY)      vector<DataLazy*> work;
1709      FunctionSpace fs=dats[0]->getFunctionSpace();
1710      bool match=true;
1711      for (int i=dats.size()-1;i>=0;--i)
1712    {    {
1713      const ValueType& vec=m_id->getVector();      if (dats[i]->m_readytype!='E')
1714      if (m_readytype=='C')      {
1715      {          dats[i]->collapse();
1716      roffset=0;      }
1717      return &(vec);      if (dats[i]->m_op!=IDENTITY)
1718      }      {
1719      roffset=m_id->getPointOffset(sampleNo, 0);          work.push_back(dats[i]);
1720      return &(vec);          if (fs!=dats[i]->getFunctionSpace())
1721            {
1722                match=false;
1723            }
1724        }
1725    }    }
1726    if (m_readytype!='E')    if (work.empty())
1727    {    {
1728      throw DataException("Programmer Error - Collapse did not produce an expanded node.");      return;     // no work to do
1729    }    }
1730    switch (getOpgroup(m_op))    if (match)    // all functionspaces match.  Yes I realise this is overly strict
1731      {     // it is possible that dats[0] is one of the objects which we discarded and
1732            // all the other functionspaces match.
1733        vector<DataExpanded*> dep;
1734        vector<ValueType*> vecs;
1735        for (int i=0;i<work.size();++i)
1736        {
1737            dep.push_back(new DataExpanded(fs,work[i]->getShape(), ValueType(work[i]->getNoValues())));
1738            vecs.push_back(&(dep[i]->getVectorRW()));
1739        }
1740        int totalsamples=work[0]->getNumSamples();
1741        const ValueType* res=0; // Storage for answer
1742        int sample;
1743        #pragma omp parallel private(sample, res)
1744        {
1745            size_t roffset=0;
1746            #pragma omp for schedule(static)
1747            for (sample=0;sample<totalsamples;++sample)
1748            {
1749            roffset=0;
1750            int j;
1751            for (j=work.size()-1;j>=0;--j)
1752            {
1753    #ifdef _OPENMP
1754                    res=work[j]->resolveNodeSample(omp_get_thread_num(),sample,roffset);
1755    #else
1756                    res=work[j]->resolveNodeSample(0,sample,roffset);
1757    #endif
1758                    DataVector::size_type outoffset=dep[j]->getPointOffset(sample,0);
1759                    memcpy(&((*vecs[j])[outoffset]),&((*res)[roffset]),work[j]->m_samplesize*sizeof(DataVector::ElementType));
1760            }
1761            }
1762        }
1763        // Now we need to load the new results as identity ops into the lazy nodes
1764        for (int i=work.size()-1;i>=0;--i)
1765        {
1766            work[i]->makeIdentity(boost::dynamic_pointer_cast<DataReady>(dep[i]->getPtr()));
1767        }
1768      }
1769      else  // functionspaces do not match
1770    {    {
1771    case G_UNARY:      for (int i=0;i<work.size();++i)
1772    case G_UNARY_P: return resolveUnary(v, offset,sampleNo,roffset);      {
1773    case G_BINARY: return resolveBinary(v, offset,sampleNo,roffset);          work[i]->resolveToIdentity();
1774    case G_NP1OUT: return resolveNP1OUT(v, offset, sampleNo,roffset);      }
   case G_NP1OUT_P: return resolveNP1OUT_P(v, offset, sampleNo,roffset);  
   case G_TENSORPROD: return resolveTProd(v,offset, sampleNo,roffset);  
   default:  
     throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");  
1775    }    }
1776  }  }
1777    
1778    
1779  // To simplify the memory management, all threads operate on one large vector, rather than one each.  
1780  // Each sample is evaluated independently and copied into the result DataExpanded.  // This version of resolve uses storage in each node to hold results
1781  DataReady_ptr  DataReady_ptr
1782  DataLazy::resolve()  DataLazy::resolveNodeWorker()
1783  {  {
   
 LAZYDEBUG(cout << "Sample size=" << m_samplesize << endl;)  
 LAZYDEBUG(cout << "Buffers=" << m_buffsRequired << endl;)  
   
1784    if (m_readytype!='E')     // if the whole sub-expression is Constant or Tagged, then evaluate it normally    if (m_readytype!='E')     // if the whole sub-expression is Constant or Tagged, then evaluate it normally
1785    {    {
1786      collapse();      collapse();
# Line 1183  LAZYDEBUG(cout << "Buffers=" << m_buffsR Line 1790  LAZYDEBUG(cout << "Buffers=" << m_buffsR
1790      return m_id;      return m_id;
1791    }    }
1792      // from this point on we must have m_op!=IDENTITY and m_readytype=='E'      // from this point on we must have m_op!=IDENTITY and m_readytype=='E'
   size_t threadbuffersize=m_maxsamplesize*(max(1,m_buffsRequired)); // Each thread needs to have enough  
     // storage to evaluate its expression  
   int numthreads=1;  
 #ifdef _OPENMP  
   numthreads=getNumberOfThreads();  
 #endif  
   ValueType v(numthreads*threadbuffersize);  
 LAZYDEBUG(cout << "Buffer created with size=" << v.size() << endl;)  
1793    DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(),  ValueType(getNoValues()));    DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(),  ValueType(getNoValues()));
1794    ValueType& resvec=result->getVector();    ValueType& resvec=result->getVectorRW();
1795    DataReady_ptr resptr=DataReady_ptr(result);    DataReady_ptr resptr=DataReady_ptr(result);
1796    
1797    int sample;    int sample;
   size_t outoffset;     // offset in the output data  
1798    int totalsamples=getNumSamples();    int totalsamples=getNumSamples();
1799    const ValueType* res=0;   // Vector storing the answer    const ValueType* res=0;   // Storage for answer
1800    size_t resoffset=0;       // where in the vector to find the answer  LAZYDEBUG(cout << "Total number of samples=" <<totalsamples << endl;)
1801    #pragma omp parallel for private(sample,resoffset,outoffset,res) schedule(static)    #pragma omp parallel private(sample,res)
1802    for (sample=0;sample<totalsamples;++sample)    {
1803    {      size_t roffset=0;
1804  LAZYDEBUG(cout << "################################# " << sample << endl;)  #ifdef LAZY_STACK_PROF
1805        stackstart[omp_get_thread_num()]=&roffset;
1806        stackend[omp_get_thread_num()]=&roffset;
1807    #endif
1808        #pragma omp for schedule(static)
1809        for (sample=0;sample<totalsamples;++sample)
1810        {
1811            roffset=0;
1812  #ifdef _OPENMP  #ifdef _OPENMP
1813      res=resolveSample(v,threadbuffersize*omp_get_thread_num(),sample,resoffset);              res=resolveNodeSample(omp_get_thread_num(),sample,roffset);
1814  #else  #else
1815      res=resolveSample(v,0,sample,resoffset);   // res would normally be v, but not if its a single IDENTITY op.              res=resolveNodeSample(0,sample,roffset);
1816  #endif  #endif
1817  LAZYDEBUG(cerr << "-------------------------------- " << endl;)  LAZYDEBUG(cout << "Sample #" << sample << endl;)
1818      outoffset=result->getPointOffset(sample,0);  LAZYDEBUG(cout << "Final res[" << roffset<< "]=" << (*res)[roffset] << (*res)[roffset]<< endl; )
1819  LAZYDEBUG(cerr << "offset=" << outoffset << endl;)              DataVector::size_type outoffset=result->getPointOffset(sample,0);
1820      for (unsigned int i=0;i<m_samplesize;++i,++outoffset,++resoffset)   // copy values into the output vector              memcpy(&(resvec[outoffset]),&((*res)[roffset]),m_samplesize*sizeof(DataVector::ElementType));
1821      {      }
1822      resvec[outoffset]=(*res)[resoffset];    }
1823      }  #ifdef LAZY_STACK_PROF
1824  LAZYDEBUG(cerr << "*********************************" << endl;)    for (int i=0;i<getNumberOfThreads();++i)
1825      {
1826        size_t r=((size_t)stackstart[i] - (size_t)stackend[i]);
1827    //  cout << i << " " << stackstart[i] << " .. " << stackend[i] << " = " <<  r << endl;
1828        if (r>maxstackuse)
1829        {
1830            maxstackuse=r;
1831        }
1832    }    }
1833      cout << "Max resolve Stack use=" << maxstackuse << endl;
1834    #endif
1835    return resptr;    return resptr;
1836  }  }
1837    
# Line 1224  std::string Line 1839  std::string
1839  DataLazy::toString() const  DataLazy::toString() const
1840  {  {
1841    ostringstream oss;    ostringstream oss;
1842    oss << "Lazy Data:";    oss << "Lazy Data: [depth=" << m_height<< "] ";
1843    intoString(oss);    switch (escriptParams.getLAZY_STR_FMT())
1844      {
1845      case 1:   // tree format
1846        oss << endl;
1847        intoTreeString(oss,"");
1848        break;
1849      case 2:   // just the depth
1850        break;
1851      default:
1852        intoString(oss);
1853        break;
1854      }
1855    return oss.str();    return oss.str();
1856  }  }
1857    
# Line 1233  DataLazy::toString() const Line 1859  DataLazy::toString() const
1859  void  void
1860  DataLazy::intoString(ostringstream& oss) const  DataLazy::intoString(ostringstream& oss) const
1861  {  {
1862    //    oss << "[" << m_children <<";"<<m_height <<"]";
1863    switch (getOpgroup(m_op))    switch (getOpgroup(m_op))
1864    {    {
1865    case G_IDENTITY:    case G_IDENTITY:
# Line 1265  DataLazy::intoString(ostringstream& oss) Line 1892  DataLazy::intoString(ostringstream& oss)
1892    case G_UNARY_P:    case G_UNARY_P:
1893    case G_NP1OUT:    case G_NP1OUT:
1894    case G_NP1OUT_P:    case G_NP1OUT_P:
1895      case G_REDUCTION:
1896      oss << opToString(m_op) << '(';      oss << opToString(m_op) << '(';
1897      m_left->intoString(oss);      m_left->intoString(oss);
1898      oss << ')';      oss << ')';
# Line 1276  DataLazy::intoString(ostringstream& oss) Line 1904  DataLazy::intoString(ostringstream& oss)
1904      m_right->intoString(oss);      m_right->intoString(oss);
1905      oss << ')';      oss << ')';
1906      break;      break;
1907      case G_NP1OUT_2P:
1908        oss << opToString(m_op) << '(';
1909        m_left->intoString(oss);
1910        oss << ", " << m_axis_offset << ", " << m_transpose;
1911        oss << ')';
1912        break;
1913      case G_CONDEVAL:
1914        oss << opToString(m_op)<< '(' ;
1915        m_mask->intoString(oss);
1916        oss << " ? ";
1917        m_left->intoString(oss);
1918        oss << " : ";
1919        m_right->intoString(oss);
1920        oss << ')';
1921        break;
1922      default:
1923        oss << "UNKNOWN";
1924      }
1925    }
1926    
1927    
1928    void
1929    DataLazy::intoTreeString(ostringstream& oss, string indent) const
1930    {
1931      oss << '[' << m_rank << ':' << setw(3) << m_samplesize << "] " << indent;
1932      switch (getOpgroup(m_op))
1933      {
1934      case G_IDENTITY:
1935        if (m_id->isExpanded())
1936        {
1937           oss << "E";
1938        }
1939        else if (m_id->isTagged())
1940        {
1941          oss << "T";
1942        }
1943        else if (m_id->isConstant())
1944        {
1945          oss << "C";
1946        }
1947        else
1948        {
1949          oss << "?";
1950        }
1951        oss << '@' << m_id.get() << endl;
1952        break;
1953      case G_BINARY:
1954        oss << opToString(m_op) << endl;
1955        indent+='.';
1956        m_left->intoTreeString(oss, indent);
1957        m_right->intoTreeString(oss, indent);
1958        break;
1959      case G_UNARY:
1960      case G_UNARY_P:
1961      case G_NP1OUT:
1962      case G_NP1OUT_P:
1963      case G_REDUCTION:
1964        oss << opToString(m_op) << endl;
1965        indent+='.';
1966        m_left->intoTreeString(oss, indent);
1967        break;
1968      case G_TENSORPROD:
1969        oss << opToString(m_op) << endl;
1970        indent+='.';
1971        m_left->intoTreeString(oss, indent);
1972        m_right->intoTreeString(oss, indent);
1973        break;
1974      case G_NP1OUT_2P:
1975        oss << opToString(m_op) << ", " << m_axis_offset << ", " << m_transpose<< endl;
1976        indent+='.';
1977        m_left->intoTreeString(oss, indent);
1978        break;
1979    default:    default:
1980      oss << "UNKNOWN";      oss << "UNKNOWN";
1981    }    }
1982  }  }
1983    
1984    
1985  DataAbstract*  DataAbstract*
1986  DataLazy::deepCopy()  DataLazy::deepCopy()
1987  {  {
1988    switch (getOpgroup(m_op))    switch (getOpgroup(m_op))
1989    {    {
1990    case G_IDENTITY:  return new DataLazy(m_id->deepCopy()->getPtr());    case G_IDENTITY:  return new DataLazy(m_id->deepCopy()->getPtr());
1991    case G_UNARY: return new DataLazy(m_left->deepCopy()->getPtr(),m_op);    case G_UNARY:
1992      case G_REDUCTION:      return new DataLazy(m_left->deepCopy()->getPtr(),m_op);
1993      case G_UNARY_P:   return new DataLazy(m_left->deepCopy()->getPtr(), m_op, m_tol);
1994    case G_BINARY:    return new DataLazy(m_left->deepCopy()->getPtr(),m_right->deepCopy()->getPtr(),m_op);    case G_BINARY:    return new DataLazy(m_left->deepCopy()->getPtr(),m_right->deepCopy()->getPtr(),m_op);
1995    case G_NP1OUT: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(),m_op);    case G_NP1OUT: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(),m_op);
1996    case G_TENSORPROD: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(), m_op, m_axis_offset, m_transpose);    case G_TENSORPROD: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(), m_op, m_axis_offset, m_transpose);
1997      case G_NP1OUT_P:   return new DataLazy(m_left->deepCopy()->getPtr(),m_op,  m_axis_offset);
1998      case G_NP1OUT_2P:  return new DataLazy(m_left->deepCopy()->getPtr(), m_op, m_axis_offset, m_transpose);
1999    default:    default:
2000      throw DataException("Programmer error - do not know how to deepcopy operator "+opToString(m_op)+".");      throw DataException("Programmer error - do not know how to deepcopy operator "+opToString(m_op)+".");
2001    }    }
2002  }  }
2003    
2004    
2005    
2006  // There is no single, natural interpretation of getLength on DataLazy.  // There is no single, natural interpretation of getLength on DataLazy.
2007  // Instances of DataReady can look at the size of their vectors.  // Instances of DataReady can look at the size of their vectors.
2008  // For lazy though, it could be the size the data would be if it were resolved;  // For lazy though, it could be the size the data would be if it were resolved;
# Line 1372  DataLazy::getPointOffset(int sampleNo, Line 2078  DataLazy::getPointOffset(int sampleNo,
2078    throw DataException("Programmer error - getPointOffset on lazy data may require collapsing (but this object is marked const).");    throw DataException("Programmer error - getPointOffset on lazy data may require collapsing (but this object is marked const).");
2079  }  }
2080    
2081  // It would seem that DataTagged will need to be treated differently since even after setting all tags  
2082  // to zero, all the tags from all the DataTags would be in the result.  // I have decided to let Data:: handle this issue.
 // However since they all have the same value (0) whether they are there or not should not matter.  
 // So I have decided that for all types this method will create a constant 0.  
 // It can be promoted up as required.  
 // A possible efficiency concern might be expanded->constant->expanded which has an extra memory management  
 // but we can deal with that if it arrises.  
2083  void  void
2084  DataLazy::setToZero()  DataLazy::setToZero()
2085  {  {
2086    DataTypes::ValueType v(getNoValues(),0);  //   DataTypes::ValueType v(getNoValues(),0);
2087    m_id=DataReady_ptr(new DataConstant(getFunctionSpace(),getShape(),v));  //   m_id=DataReady_ptr(new DataConstant(getFunctionSpace(),getShape(),v));
2088    m_op=IDENTITY;  //   m_op=IDENTITY;
2089    m_right.reset();    //   m_right.reset();  
2090    m_left.reset();  //   m_left.reset();
2091    m_readytype='C';  //   m_readytype='C';
2092    m_buffsRequired=1;  //   m_buffsRequired=1;
2093    
2094      privdebug=privdebug;  // to stop the compiler complaining about unused privdebug
2095      throw DataException("Programmer error - setToZero not supported for DataLazy (DataLazy objects should be read only).");
2096    }
2097    
2098    bool
2099    DataLazy::actsExpanded() const
2100    {
2101        return (m_readytype=='E');
2102  }  }
2103    
2104  }   // end namespace  }   // end namespace

Legend:
Removed from v.2147  
changed lines
  Added in v.3911

  ViewVC Help
Powered by ViewVC 1.1.26