/[escript]/branches/clazy/escriptcore/src/DataLazy.cpp
ViewVC logotype

Diff of /branches/clazy/escriptcore/src/DataLazy.cpp

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

trunk/escript/src/DataLazy.cpp revision 2737 by jfenwick, Tue Nov 3 00:44:00 2009 UTC trunk/escriptcore/src/DataLazy.cpp revision 4657 by jfenwick, Thu Feb 6 06:12:20 2014 UTC
# Line 1  Line 1 
1    
2  /*******************************************************  /*****************************************************************************
3  *  *
4  * Copyright (c) 2003-2009 by University of Queensland  * Copyright (c) 2003-2014 by University of Queensland
5  * Earth Systems Science Computational Center (ESSCC)  * http://www.uq.edu.au
 * http://www.uq.edu.au/esscc  
6  *  *
7  * Primary Business: Queensland, Australia  * Primary Business: Queensland, Australia
8  * Licensed under the Open Software License version 3.0  * Licensed under the Open Software License version 3.0
9  * http://www.opensource.org/licenses/osl-3.0.php  * http://www.opensource.org/licenses/osl-3.0.php
10  *  *
11  *******************************************************/  * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12    * Development 2012-2013 by School of Earth Sciences
13    * Development from 2014 by Centre for Geoscience Computing (GeoComp)
14    *
15    *****************************************************************************/
16    
17    
18  #include "DataLazy.h"  #include "DataLazy.h"
19  #ifdef USE_NETCDF  #include "esysUtils/Esys_MPI.h"
 #include <netcdfcpp.h>  
 #endif  
 #ifdef PASO_MPI  
 #include <mpi.h>  
 #endif  
20  #ifdef _OPENMP  #ifdef _OPENMP
21  #include <omp.h>  #include <omp.h>
22  #endif  #endif
# Line 30  Line 28 
28    
29  #include "EscriptParams.h"  #include "EscriptParams.h"
30    
31    #ifdef USE_NETCDF
32    #include <netcdfcpp.h>
33    #endif
34    
35  #include <iomanip>      // for some fancy formatting in debug  #include <iomanip>      // for some fancy formatting in debug
36    
37  // #define LAZYDEBUG(X) if (privdebug){X;}  // #define LAZYDEBUG(X) if (privdebug){X;}
# Line 44  bool privdebug=false; Line 46  bool privdebug=false;
46    
47  // #define SIZELIMIT if ((m_height>escript::escriptParams.getTOO_MANY_LEVELS()) || (m_children>escript::escriptParams.getTOO_MANY_NODES())) {cerr << "\n!!!!!!! SIZE LIMIT EXCEEDED " << m_children << ";" << m_height << endl << toString() << endl;resolveToIdentity();}  // #define SIZELIMIT if ((m_height>escript::escriptParams.getTOO_MANY_LEVELS()) || (m_children>escript::escriptParams.getTOO_MANY_NODES())) {cerr << "\n!!!!!!! SIZE LIMIT EXCEEDED " << m_children << ";" << m_height << endl << toString() << endl;resolveToIdentity();}
48    
49  #define SIZELIMIT if ((m_height>escript::escriptParams.getTOO_MANY_LEVELS()) || (m_children>escript::escriptParams.getTOO_MANY_NODES())) {resolveToIdentity();}  // #define SIZELIMIT if ((m_height>escript::escriptParams.getTOO_MANY_LEVELS()) || (m_children>escript::escriptParams.getTOO_MANY_NODES())) {cerr << "SIZE LIMIT EXCEEDED " << m_height << endl;resolveToIdentity();}
50    
51    
52    #define SIZELIMIT if (m_height>escript::escriptParams.getTOO_MANY_LEVELS())  {if (escript::escriptParams.getLAZY_VERBOSE()){cerr << "SIZE LIMIT EXCEEDED height=" << m_height << endl;}resolveToIdentity();}
53    
54  /*  /*
55  How does DataLazy work?  How does DataLazy work?
56  ~~~~~~~~~~~~~~~~~~~~~~~  ~~~~~~~~~~~~~~~~~~~~~~~
# Line 58  A special operation, IDENTITY, stores an Line 62  A special operation, IDENTITY, stores an
62  This means that all "internal" nodes in the structure are instances of DataLazy.  This means that all "internal" nodes in the structure are instances of DataLazy.
63    
64  Each operation has a string representation as well as an opgroup - eg G_IDENTITY, G_BINARY, ...  Each operation has a string representation as well as an opgroup - eg G_IDENTITY, G_BINARY, ...
65  Note that IDENITY is not considered a unary operation.  Note that IDENTITY is not considered a unary operation.
66    
67  I am avoiding calling the structure formed a tree because it is not guaranteed to be one (eg c=a+a).  I am avoiding calling the structure formed a tree because it is not guaranteed to be one (eg c=a+a).
68  It must however form a DAG (directed acyclic graph).  It must however form a DAG (directed acyclic graph).
# Line 67  I will refer to individual DataLazy obje Line 71  I will refer to individual DataLazy obje
71  Each node also stores:  Each node also stores:
72  - m_readytype \in {'E','T','C','?'} ~ indicates what sort of DataReady would be produced if the expression was  - m_readytype \in {'E','T','C','?'} ~ indicates what sort of DataReady would be produced if the expression was
73      evaluated.      evaluated.
74  - m_buffsrequired ~ the larged number of samples which would need to be kept simultaneously in order to  - m_buffsrequired ~ the large number of samples which would need to be kept simultaneously in order to
75      evaluate the expression.      evaluate the expression.
76  - m_samplesize ~ the number of doubles stored in a sample.  - m_samplesize ~ the number of doubles stored in a sample.
77    
# Line 109  namespace escript Line 113  namespace escript
113  namespace  namespace
114  {  {
115    
116    
117    // enabling this will print out when ever the maximum stacksize used by resolve increases
118    // it assumes _OPENMP is also in use
119    //#define LAZY_STACK_PROF
120    
121    
122    
123    #ifndef _OPENMP
124      #ifdef LAZY_STACK_PROF
125      #undef LAZY_STACK_PROF
126      #endif
127    #endif
128    
129    
130    #ifdef LAZY_STACK_PROF
131    std::vector<void*> stackstart(getNumberOfThreads());
132    std::vector<void*> stackend(getNumberOfThreads());
133    size_t maxstackuse=0;
134    #endif
135    
136  enum ES_opgroup  enum ES_opgroup
137  {  {
138     G_UNKNOWN,     G_UNKNOWN,
# Line 120  enum ES_opgroup Line 144  enum ES_opgroup
144     G_NP1OUT_P,      // non-pointwise op with one output requiring a parameter     G_NP1OUT_P,      // non-pointwise op with one output requiring a parameter
145     G_TENSORPROD,    // general tensor product     G_TENSORPROD,    // general tensor product
146     G_NP1OUT_2P,     // non-pointwise op with one output requiring two params     G_NP1OUT_2P,     // non-pointwise op with one output requiring two params
147     G_REDUCTION      // non-pointwise unary op with a scalar output     G_REDUCTION,     // non-pointwise unary op with a scalar output
148       G_CONDEVAL
149  };  };
150    
151    
# Line 136  string ES_opstrings[]={"UNKNOWN","IDENTI Line 161  string ES_opstrings[]={"UNKNOWN","IDENTI
161              "prod",              "prod",
162              "transpose", "trace",              "transpose", "trace",
163              "swapaxes",              "swapaxes",
164              "minval", "maxval"};              "minval", "maxval",
165  int ES_opcount=43;              "condEval"};
166    int ES_opcount=44;
167  ES_opgroup opgroups[]={G_UNKNOWN,G_IDENTITY,G_BINARY,G_BINARY,G_BINARY,G_BINARY, G_BINARY,  ES_opgroup opgroups[]={G_UNKNOWN,G_IDENTITY,G_BINARY,G_BINARY,G_BINARY,G_BINARY, G_BINARY,
168              G_UNARY,G_UNARY,G_UNARY, //10              G_UNARY,G_UNARY,G_UNARY, //10
169              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,    // 17              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,    // 17
# Line 148  ES_opgroup opgroups[]={G_UNKNOWN,G_IDENT Line 174  ES_opgroup opgroups[]={G_UNKNOWN,G_IDENT
174              G_TENSORPROD,              G_TENSORPROD,
175              G_NP1OUT_P, G_NP1OUT_P,              G_NP1OUT_P, G_NP1OUT_P,
176              G_NP1OUT_2P,              G_NP1OUT_2P,
177              G_REDUCTION, G_REDUCTION};              G_REDUCTION, G_REDUCTION,
178                G_CONDEVAL};
179  inline  inline
180  ES_opgroup  ES_opgroup
181  getOpgroup(ES_optype op)  getOpgroup(ES_optype op)
# Line 169  resultFS(DataAbstract_ptr left, DataAbst Line 196  resultFS(DataAbstract_ptr left, DataAbst
196    FunctionSpace r=right->getFunctionSpace();    FunctionSpace r=right->getFunctionSpace();
197    if (l!=r)    if (l!=r)
198    {    {
199      if (r.probeInterpolation(l))      signed char res=r.getDomain()->preferredInterpolationOnDomain(r.getTypeCode(), l.getTypeCode());
200        if (res==1)
201      {      {
202      return l;      return l;
203      }      }
204      if (l.probeInterpolation(r))      if (res==-1)
205      {      {
206      return r;      return r;
207      }      }
# Line 221  resultShape(DataAbstract_ptr left, ES_op Line 249  resultShape(DataAbstract_ptr left, ES_op
249          int rank=left->getRank();          int rank=left->getRank();
250          if (axis_offset<0 || axis_offset>rank)          if (axis_offset<0 || axis_offset>rank)
251          {          {
252                 throw DataException("Error - Data::transpose must have 0 <= axis_offset <= rank=" + rank);              stringstream e;
253              }              e << "Error - Data::transpose must have 0 <= axis_offset <= rank=" << rank;
254              for (int i=0; i<rank; i++)              throw DataException(e.str());
255            }
256            for (int i=0; i<rank; i++)
257          {          {
258             int index = (axis_offset+i)%rank;             int index = (axis_offset+i)%rank;
259                 sh.push_back(s[index]); // Append to new shape             sh.push_back(s[index]); // Append to new shape
260              }          }
261          return sh;          return sh;
262         }         }
263      break;      break;
# Line 306  SwapShape(DataAbstract_ptr left, const i Line 336  SwapShape(DataAbstract_ptr left, const i
336          throw DataException("Error - Data::swapaxes argument must have at least rank 2.");          throw DataException("Error - Data::swapaxes argument must have at least rank 2.");
337       }       }
338       if (axis0<0 || axis0>rank-1) {       if (axis0<0 || axis0>rank-1) {
339          throw DataException("Error - Data::swapaxes: axis0 must be between 0 and rank-1=" + rank-1);          stringstream e;
340            e << "Error - Data::swapaxes: axis0 must be between 0 and rank-1=" << (rank-1);
341            throw DataException(e.str());
342       }       }
343       if (axis1<0 || axis1>rank-1) {       if (axis1<0 || axis1>rank-1) {
344           throw DataException("Error - Data::swapaxes: axis1 must be between 0 and rank-1=" + rank-1);          stringstream e;
345            e << "Error - Data::swapaxes: axis1 must be between 0 and rank-1=" << (rank-1);
346            throw DataException(e.str());
347       }       }
348       if (axis0 == axis1) {       if (axis0 == axis1) {
349           throw DataException("Error - Data::swapaxes: axis indices must be different.");           throw DataException("Error - Data::swapaxes: axis indices must be different.");
# Line 398  GTPShape(DataAbstract_ptr left, DataAbst Line 432  GTPShape(DataAbstract_ptr left, DataAbst
432    return shape2;    return shape2;
433  }  }
434    
 // determine the number of samples requires to evaluate an expression combining left and right  
 // NP1OUT needs an extra buffer because we can't write the answers over the top of the input.  
 // The same goes for G_TENSORPROD  
 // It might seem that pointwise binary ops (G_BINARY) could be written over the top of the lefts.  
 // This would be true were it not for the possibility that the LHS could be a scalar which needs to be examined  
 // multiple times  
 int  
 calcBuffs(const DataLazy_ptr& left, const DataLazy_ptr& right, ES_optype op)  
 {  
    switch(getOpgroup(op))  
    {  
    case G_IDENTITY: return 1;  
    case G_BINARY: return 1+max(left->getBuffsRequired(),right->getBuffsRequired()+1);  
    case G_REDUCTION:  
    case G_UNARY:  
    case G_UNARY_P: return max(left->getBuffsRequired(),1);  
    case G_NP1OUT: return 1+max(left->getBuffsRequired(),1);  
    case G_NP1OUT_P: return 1+max(left->getBuffsRequired(),1);  
    case G_TENSORPROD: return 1+max(left->getBuffsRequired(),right->getBuffsRequired()+1);  
    case G_NP1OUT_2P: return 1+max(left->getBuffsRequired(),1);  
    default:  
     throw DataException("Programmer Error - attempt to calcBuffs() for operator "+opToString(op)+".");  
    }  
 }  
   
   
435  }   // end anonymous namespace  }   // end anonymous namespace
436    
437    
# Line 439  opToString(ES_optype op) Line 447  opToString(ES_optype op)
447    return ES_opstrings[op];    return ES_opstrings[op];
448  }  }
449    
 #ifdef LAZY_NODE_STORAGE  
450  void DataLazy::LazyNodeSetup()  void DataLazy::LazyNodeSetup()
451  {  {
452  #ifdef _OPENMP  #ifdef _OPENMP
# Line 456  void DataLazy::LazyNodeSetup() Line 463  void DataLazy::LazyNodeSetup()
463      m_sampleids[0]=-1;      m_sampleids[0]=-1;
464  #endif  // _OPENMP  #endif  // _OPENMP
465  }  }
 #endif   // LAZY_NODE_STORAGE  
466    
467    
468  // Creates an identity node  // Creates an identity node
469  DataLazy::DataLazy(DataAbstract_ptr p)  DataLazy::DataLazy(DataAbstract_ptr p)
470      : parent(p->getFunctionSpace(),p->getShape())      : parent(p->getFunctionSpace(),p->getShape())
 #ifdef LAZY_NODE_STORAGE  
471      ,m_sampleids(0),      ,m_sampleids(0),
472      m_samples(1)      m_samples(1)
 #endif  
473  {  {
474     if (p->isLazy())     if (p->isLazy())
475     {     {
# Line 507  DataLazy::DataLazy(DataAbstract_ptr left Line 511  DataLazy::DataLazy(DataAbstract_ptr left
511     }     }
512     m_readytype=lleft->m_readytype;     m_readytype=lleft->m_readytype;
513     m_left=lleft;     m_left=lleft;
    m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point  
514     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
    m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());  
515     m_children=m_left->m_children+1;     m_children=m_left->m_children+1;
516     m_height=m_left->m_height+1;     m_height=m_left->m_height+1;
 #ifdef LAZY_NODE_STORAGE  
517     LazyNodeSetup();     LazyNodeSetup();
 #endif  
518     SIZELIMIT     SIZELIMIT
519  }  }
520    
# Line 581  LAZYDEBUG(cout << "Right " << right.get( Line 581  LAZYDEBUG(cout << "Right " << right.get(
581      m_readytype='C';      m_readytype='C';
582     }     }
583     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
    m_maxsamplesize=max(max(m_samplesize,m_right->getMaxSampleSize()),m_left->getMaxSampleSize());    
    m_buffsRequired=calcBuffs(m_left, m_right,m_op);  
584     m_children=m_left->m_children+m_right->m_children+2;     m_children=m_left->m_children+m_right->m_children+2;
585     m_height=max(m_left->m_height,m_right->m_height)+1;     m_height=max(m_left->m_height,m_right->m_height)+1;
 #ifdef LAZY_NODE_STORAGE  
586     LazyNodeSetup();     LazyNodeSetup();
 #endif  
587     SIZELIMIT     SIZELIMIT
588  LAZYDEBUG(cout << "(3)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(3)Lazy created with " << m_samplesize << endl;)
589  }  }
# Line 651  DataLazy::DataLazy(DataAbstract_ptr left Line 647  DataLazy::DataLazy(DataAbstract_ptr left
647      m_readytype='C';      m_readytype='C';
648     }     }
649     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
    m_maxsamplesize=max(max(m_samplesize,m_right->getMaxSampleSize()),m_left->getMaxSampleSize());    
    m_buffsRequired=calcBuffs(m_left, m_right,m_op);  
650     m_children=m_left->m_children+m_right->m_children+2;     m_children=m_left->m_children+m_right->m_children+2;
651     m_height=max(m_left->m_height,m_right->m_height)+1;     m_height=max(m_left->m_height,m_right->m_height)+1;
 #ifdef LAZY_NODE_STORAGE  
652     LazyNodeSetup();     LazyNodeSetup();
 #endif  
653     SIZELIMIT     SIZELIMIT
654  LAZYDEBUG(cout << "(4)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(4)Lazy created with " << m_samplesize << endl;)
655  }  }
# Line 685  DataLazy::DataLazy(DataAbstract_ptr left Line 677  DataLazy::DataLazy(DataAbstract_ptr left
677     }     }
678     m_readytype=lleft->m_readytype;     m_readytype=lleft->m_readytype;
679     m_left=lleft;     m_left=lleft;
    m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point  
680     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
    m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());  
681     m_children=m_left->m_children+1;     m_children=m_left->m_children+1;
682     m_height=m_left->m_height+1;     m_height=m_left->m_height+1;
 #ifdef LAZY_NODE_STORAGE  
683     LazyNodeSetup();     LazyNodeSetup();
 #endif  
684     SIZELIMIT     SIZELIMIT
685  LAZYDEBUG(cout << "(5)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(5)Lazy created with " << m_samplesize << endl;)
686  }  }
# Line 719  DataLazy::DataLazy(DataAbstract_ptr left Line 707  DataLazy::DataLazy(DataAbstract_ptr left
707     }     }
708     m_readytype=lleft->m_readytype;     m_readytype=lleft->m_readytype;
709     m_left=lleft;     m_left=lleft;
    m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point  
710     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
    m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());  
711     m_children=m_left->m_children+1;     m_children=m_left->m_children+1;
712     m_height=m_left->m_height+1;     m_height=m_left->m_height+1;
 #ifdef LAZY_NODE_STORAGE  
713     LazyNodeSetup();     LazyNodeSetup();
 #endif  
714     SIZELIMIT     SIZELIMIT
715  LAZYDEBUG(cout << "(6)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(6)Lazy created with " << m_samplesize << endl;)
716  }  }
# Line 754  DataLazy::DataLazy(DataAbstract_ptr left Line 738  DataLazy::DataLazy(DataAbstract_ptr left
738     }     }
739     m_readytype=lleft->m_readytype;     m_readytype=lleft->m_readytype;
740     m_left=lleft;     m_left=lleft;
    m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point  
741     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
    m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());  
742     m_children=m_left->m_children+1;     m_children=m_left->m_children+1;
743     m_height=m_left->m_height+1;     m_height=m_left->m_height+1;
 #ifdef LAZY_NODE_STORAGE  
744     LazyNodeSetup();     LazyNodeSetup();
 #endif  
745     SIZELIMIT     SIZELIMIT
746  LAZYDEBUG(cout << "(7)Lazy created with " << m_samplesize << endl;)  LAZYDEBUG(cout << "(7)Lazy created with " << m_samplesize << endl;)
747  }  }
748    
749  DataLazy::~DataLazy()  
750    namespace
751  {  {
 #ifdef LAZY_NODE_SETUP  
    delete[] m_sampleids;  
    delete[] m_samples;  
 #endif  
 }  
752    
753        inline int max3(int a, int b, int c)
754        {
755        int t=(a>b?a:b);
756        return (t>c?t:c);
757    
758  int      }
 DataLazy::getBuffsRequired() const  
 {  
     return m_buffsRequired;  
759  }  }
760    
761    DataLazy::DataLazy(DataAbstract_ptr mask, DataAbstract_ptr left, DataAbstract_ptr right/*, double tol*/)
762  size_t      : parent(left->getFunctionSpace(), left->getShape()),
763  DataLazy::getMaxSampleSize() const      m_op(CONDEVAL),
764        m_axis_offset(0),
765        m_transpose(0),
766        m_tol(0)
767  {  {
768      return m_maxsamplesize;  
769       DataLazy_ptr lmask;
770       DataLazy_ptr lleft;
771       DataLazy_ptr lright;
772       if (!mask->isLazy())
773       {
774        lmask=DataLazy_ptr(new DataLazy(mask));
775       }
776       else
777       {
778        lmask=dynamic_pointer_cast<DataLazy>(mask);
779       }
780       if (!left->isLazy())
781       {
782        lleft=DataLazy_ptr(new DataLazy(left));
783       }
784       else
785       {
786        lleft=dynamic_pointer_cast<DataLazy>(left);
787       }
788       if (!right->isLazy())
789       {
790        lright=DataLazy_ptr(new DataLazy(right));
791       }
792       else
793       {
794        lright=dynamic_pointer_cast<DataLazy>(right);
795       }
796       m_readytype=lmask->m_readytype;
797       if ((lleft->m_readytype!=lright->m_readytype) || (lmask->m_readytype!=lleft->m_readytype))
798       {
799        throw DataException("Programmer Error - condEval arguments must have the same readytype");
800       }
801       m_left=lleft;
802       m_right=lright;
803       m_mask=lmask;
804       m_samplesize=getNumDPPSample()*getNoValues();
805       m_children=m_left->m_children+m_right->m_children+m_mask->m_children+1;
806       m_height=max3(m_left->m_height,m_right->m_height,m_mask->m_height)+1;
807       LazyNodeSetup();
808       SIZELIMIT
809    LAZYDEBUG(cout << "(8)Lazy created with " << m_samplesize << endl;)
810  }  }
811    
812    
813    
814  size_t  DataLazy::~DataLazy()
 DataLazy::getSampleBufferSize() const  
815  {  {
816      return m_maxsamplesize*(max(1,m_buffsRequired));     delete[] m_sampleids;
817  }  }
818    
819    
820  /*  /*
821    \brief Evaluates the expression using methods on Data.    \brief Evaluates the expression using methods on Data.
822    This does the work for the collapse method.    This does the work for the collapse method.
823    For reasons of efficiency do not call this method on DataExpanded nodes.    For reasons of efficiency do not call this method on DataExpanded nodes.
824  */  */
825  DataReady_ptr  DataReady_ptr
826  DataLazy::collapseToReady()  DataLazy::collapseToReady() const
827  {  {
828    if (m_readytype=='E')    if (m_readytype=='E')
829    { // this is more an efficiency concern than anything else    { // this is more an efficiency concern than anything else
# Line 957  DataLazy::collapseToReady() Line 978  DataLazy::collapseToReady()
978     the purpose of using DataLazy in the first place).     the purpose of using DataLazy in the first place).
979  */  */
980  void  void
981  DataLazy::collapse()  DataLazy::collapse() const
982  {  {
983    if (m_op==IDENTITY)    if (m_op==IDENTITY)
984    {    {
# Line 971  DataLazy::collapse() Line 992  DataLazy::collapse()
992    m_op=IDENTITY;    m_op=IDENTITY;
993  }  }
994    
 /*  
   \brief Compute the value of the expression (unary operation) for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
 DataTypes::ValueType*  
 DataLazy::resolveUnary(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const  
 {  
     // we assume that any collapsing has been done before we get here  
     // since we only have one argument we don't need to think about only  
     // processing single points.  
   if (m_readytype!='E')  
   {  
     throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");  
   }  
   const ValueType* vleft=m_left->resolveVectorSample(v,offset,sampleNo,roffset);  
   const double* left=&((*vleft)[roffset]);  
   double* result=&(v[offset]);  
   roffset=offset;  
   switch (m_op)  
   {  
     case SIN:    
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sin);  
     break;  
     case COS:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::cos);  
     break;  
     case TAN:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::tan);  
     break;  
     case ASIN:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::asin);  
     break;  
     case ACOS:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::acos);  
     break;  
     case ATAN:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::atan);  
     break;  
     case SINH:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sinh);  
     break;  
     case COSH:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::cosh);  
     break;  
     case TANH:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::tanh);  
     break;  
     case ERF:  
 #if defined (_WIN32) && !defined(__INTEL_COMPILER)  
     throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::erf);  
     break;  
 #endif  
    case ASINH:  
 #if defined (_WIN32) && !defined(__INTEL_COMPILER)  
     tensor_unary_operation(m_samplesize, left, result, escript::asinh_substitute);  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::asinh);  
 #endif    
     break;  
    case ACOSH:  
 #if defined (_WIN32) && !defined(__INTEL_COMPILER)  
     tensor_unary_operation(m_samplesize, left, result, escript::acosh_substitute);  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::acosh);  
 #endif    
     break;  
    case ATANH:  
 #if defined (_WIN32) && !defined(__INTEL_COMPILER)  
     tensor_unary_operation(m_samplesize, left, result, escript::atanh_substitute);  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::atanh);  
 #endif    
     break;  
     case LOG10:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::log10);  
     break;  
     case LOG:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::log);  
     break;  
     case SIGN:  
     tensor_unary_operation(m_samplesize, left, result, escript::fsign);  
     break;  
     case ABS:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::fabs);  
     break;  
     case NEG:  
     tensor_unary_operation(m_samplesize, left, result, negate<double>());  
     break;  
     case POS:  
     // it doesn't mean anything for delayed.  
     // it will just trigger a deep copy of the lazy object  
     throw DataException("Programmer error - POS not supported for lazy data.");  
     break;  
     case EXP:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::exp);  
     break;  
     case SQRT:  
     tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sqrt);  
     break;  
     case RECIP:  
     tensor_unary_operation(m_samplesize, left, result, bind1st(divides<double>(),1.));  
     break;  
     case GZ:  
     tensor_unary_operation(m_samplesize, left, result, bind2nd(greater<double>(),0.0));  
     break;  
     case LZ:  
     tensor_unary_operation(m_samplesize, left, result, bind2nd(less<double>(),0.0));  
     break;  
     case GEZ:  
     tensor_unary_operation(m_samplesize, left, result, bind2nd(greater_equal<double>(),0.0));  
     break;  
     case LEZ:  
     tensor_unary_operation(m_samplesize, left, result, bind2nd(less_equal<double>(),0.0));  
     break;  
 // There are actually G_UNARY_P but I don't see a compelling reason to treat them differently  
     case NEZ:  
     tensor_unary_operation(m_samplesize, left, result, bind2nd(AbsGT(),m_tol));  
     break;  
     case EZ:  
     tensor_unary_operation(m_samplesize, left, result, bind2nd(AbsLTE(),m_tol));  
     break;  
   
     default:  
     throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");  
   }  
   return &v;  
 }  
   
995    
 /*  
   \brief Compute the value of the expression (reduction operation) for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
 DataTypes::ValueType*  
 DataLazy::resolveReduction(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const  
 {  
     // we assume that any collapsing has been done before we get here  
     // since we only have one argument we don't need to think about only  
     // processing single points.  
   if (m_readytype!='E')  
   {  
     throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");  
   }  
   const ValueType* vleft=m_left->resolveVectorSample(v,offset,sampleNo,roffset);  
   double* result=&(v[offset]);  
   roffset=offset;  
   unsigned int ndpps=getNumDPPSample();  
   unsigned int psize=DataTypes::noValues(getShape());  
   switch (m_op)  
   {  
     case MINVAL:  
     {  
       for (unsigned int z=0;z<ndpps;++z)  
       {  
          FMin op;  
          *result=DataMaths::reductionOp(*vleft, m_left->getShape(), roffset, op, numeric_limits<double>::max());  
          roffset+=psize;  
          result++;  
       }  
     }  
     break;  
     case MAXVAL:  
     {  
       for (unsigned int z=0;z<ndpps;++z)  
       {  
          FMax op;  
          *result=DataMaths::reductionOp(*vleft, m_left->getShape(), roffset, op, numeric_limits<double>::max()*-1);  
          roffset+=psize;  
          result++;  
       }  
     }  
     break;  
     default:  
     throw DataException("Programmer error - resolveReduction can not resolve operator "+opToString(m_op)+".");  
   }  
   return &v;  
 }  
996    
997    
998    
 /*  
   \brief Compute the value of the expression (unary operation) for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
 DataTypes::ValueType*  
 DataLazy::resolveNP1OUT(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const  
 {  
     // we assume that any collapsing has been done before we get here  
     // since we only have one argument we don't need to think about only  
     // processing single points.  
   if (m_readytype!='E')  
   {  
     throw DataException("Programmer error - resolveNP1OUT should only be called on expanded Data.");  
   }  
     // since we can't write the result over the input, we need a result offset further along  
   size_t subroffset=roffset+m_samplesize;  
 LAZYDEBUG(cerr << "subroffset=" << subroffset << endl;)  
   const ValueType* vleft=m_left->resolveVectorSample(v,offset+m_left->m_samplesize,sampleNo,subroffset);  
   roffset=offset;  
   size_t loop=0;  
   size_t numsteps=(m_readytype=='E')?getNumDPPSample():1;  
   size_t step=getNoValues();  
   switch (m_op)  
   {  
     case SYM:  
     for (loop=0;loop<numsteps;++loop)  
     {  
         DataMaths::symmetric(*vleft,m_left->getShape(),subroffset, v, getShape(), offset);  
         subroffset+=step;  
         offset+=step;  
     }  
     break;  
     case NSYM:  
     for (loop=0;loop<numsteps;++loop)  
     {  
         DataMaths::nonsymmetric(*vleft,m_left->getShape(),subroffset, v, getShape(), offset);  
         subroffset+=step;  
         offset+=step;  
     }  
     break;  
     default:  
     throw DataException("Programmer error - resolveNP1OUT can not resolve operator "+opToString(m_op)+".");  
   }  
   return &v;  
 }  
   
 /*  
   \brief Compute the value of the expression (unary operation) for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
 DataTypes::ValueType*  
 DataLazy::resolveNP1OUT_P(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const  
 {  
     // we assume that any collapsing has been done before we get here  
     // since we only have one argument we don't need to think about only  
     // processing single points.  
   if (m_readytype!='E')  
   {  
     throw DataException("Programmer error - resolveNP1OUT_P should only be called on expanded Data.");  
   }  
     // since we can't write the result over the input, we need a result offset further along  
   size_t subroffset;  
   const ValueType* vleft=m_left->resolveVectorSample(v,offset+m_left->m_samplesize,sampleNo,subroffset);  
 LAZYDEBUG(cerr << "srcsamplesize=" << offset+m_left->m_samplesize << " beg=" << subroffset << endl;)  
 LAZYDEBUG(cerr << "Offset for 5800=" << getPointOffset(5800/getNumDPPSample(),5800%getNumDPPSample()) << endl;)  
   roffset=offset;  
   size_t loop=0;  
   size_t numsteps=(m_readytype=='E')?getNumDPPSample():1;  
   size_t outstep=getNoValues();  
   size_t instep=m_left->getNoValues();  
 LAZYDEBUG(cerr << "instep=" << instep << " outstep=" << outstep<< " numsteps=" << numsteps << endl;)  
   switch (m_op)  
   {  
     case TRACE:  
     for (loop=0;loop<numsteps;++loop)  
     {  
 size_t zz=sampleNo*getNumDPPSample()+loop;  
 if (zz==5800)  
 {  
 LAZYDEBUG(cerr << "point=" <<  zz<< endl;)  
 LAZYDEBUG(cerr << "Input to  trace=" << DataTypes::pointToString(*vleft,m_left->getShape(),subroffset,"") << endl;)  
 LAZYDEBUG(cerr << "Offset for point=" << getPointOffset(5800/getNumDPPSample(),5800%getNumDPPSample()) << " vs ";)  
 LAZYDEBUG(cerr << subroffset << endl;)  
 LAZYDEBUG(cerr << "output=" << offset << endl;)  
 }  
             DataMaths::trace(*vleft,m_left->getShape(),subroffset, v ,getShape(),offset,m_axis_offset);  
 if (zz==5800)  
 {  
 LAZYDEBUG(cerr << "Result of trace=" << DataTypes::pointToString(v,getShape(),offset,"") << endl;)  
 }  
         subroffset+=instep;  
         offset+=outstep;  
     }  
     break;  
     case TRANS:  
     for (loop=0;loop<numsteps;++loop)  
     {  
             DataMaths::transpose(*vleft,m_left->getShape(),subroffset, v,getShape(),offset,m_axis_offset);  
         subroffset+=instep;  
         offset+=outstep;  
     }  
     break;  
     default:  
     throw DataException("Programmer error - resolveNP1OUTP can not resolve operator "+opToString(m_op)+".");  
   }  
   return &v;  
 }  
   
   
 /*  
   \brief Compute the value of the expression (unary operation with int params) for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
 DataTypes::ValueType*  
 DataLazy::resolveNP1OUT_2P(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const  
 {  
     // we assume that any collapsing has been done before we get here  
     // since we only have one argument we don't need to think about only  
     // processing single points.  
   if (m_readytype!='E')  
   {  
     throw DataException("Programmer error - resolveNP1OUT_2P should only be called on expanded Data.");  
   }  
     // since we can't write the result over the input, we need a result offset further along  
   size_t subroffset;  
   const ValueType* vleft=m_left->resolveVectorSample(v,offset+m_left->m_samplesize,sampleNo,subroffset);  
 LAZYDEBUG(cerr << "srcsamplesize=" << offset+m_left->m_samplesize << " beg=" << subroffset << endl;)  
 LAZYDEBUG(cerr << "Offset for 5800=" << getPointOffset(5800/getNumDPPSample(),5800%getNumDPPSample()) << endl;)  
   roffset=offset;  
   size_t loop=0;  
   size_t numsteps=(m_readytype=='E')?getNumDPPSample():1;  
   size_t outstep=getNoValues();  
   size_t instep=m_left->getNoValues();  
 LAZYDEBUG(cerr << "instep=" << instep << " outstep=" << outstep<< " numsteps=" << numsteps << endl;)  
   switch (m_op)  
   {  
     case SWAP:  
     for (loop=0;loop<numsteps;++loop)  
     {  
             DataMaths::swapaxes(*vleft,m_left->getShape(),subroffset, v,getShape(),offset,m_axis_offset, m_transpose);  
         subroffset+=instep;  
         offset+=outstep;  
     }  
     break;  
     default:  
     throw DataException("Programmer error - resolveNP1OUT2P can not resolve operator "+opToString(m_op)+".");  
   }  
   return &v;  
 }  
   
   
999    
1000  #define PROC_OP(TYPE,X)                               \  #define PROC_OP(TYPE,X)                               \
1001      for (int j=0;j<onumsteps;++j)\      for (int j=0;j<onumsteps;++j)\
# Line 1361  LAZYDEBUG(cout << " result=      " << re Line 1013  LAZYDEBUG(cout << " result=      " << re
1013        rroffset+=orightstep;\        rroffset+=orightstep;\
1014      }      }
1015    
 /*  
   \brief Compute the value of the expression (binary operation) for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
 // This method assumes that any subexpressions which evaluate to Constant or Tagged Data  
 // have already been collapsed to IDENTITY. So we must have at least one expanded child.  
 // If both children are expanded, then we can process them in a single operation (we treat  
 // the whole sample as one big datapoint.  
 // If one of the children is not expanded, then we need to treat each point in the sample  
 // individually.  
 // There is an additional complication when scalar operations are considered.  
 // For example, 2+Vector.  
 // In this case each double within the point is treated individually  
 DataTypes::ValueType*  
 DataLazy::resolveBinary(ValueType& v,  size_t offset, int sampleNo, size_t& roffset) const  
 {  
 LAZYDEBUG(cout << "Resolve binary: " << toString() << endl;)  
   
   size_t lroffset=0, rroffset=0;    // offsets in the left and right result vectors  
     // first work out which of the children are expanded  
   bool leftExp=(m_left->m_readytype=='E');  
   bool rightExp=(m_right->m_readytype=='E');  
   if (!leftExp && !rightExp)  
   {  
     throw DataException("Programmer Error - please use collapse if neither argument has type 'E'.");  
   }  
   bool leftScalar=(m_left->getRank()==0);  
   bool rightScalar=(m_right->getRank()==0);  
   if ((m_left->getRank()!=m_right->getRank()) && (!leftScalar && !rightScalar))  
   {  
     throw DataException("resolveBinary - ranks of arguments must match unless one of them is scalar.");  
   }  
   size_t leftsize=m_left->getNoValues();  
   size_t rightsize=m_right->getNoValues();  
   size_t chunksize=1;           // how many doubles will be processed in one go  
   int leftstep=0;       // how far should the left offset advance after each step  
   int rightstep=0;  
   int numsteps=0;       // total number of steps for the inner loop  
   int oleftstep=0;  // the o variables refer to the outer loop  
   int orightstep=0; // The outer loop is only required in cases where there is an extended scalar  
   int onumsteps=1;  
     
   bool LES=(leftExp && leftScalar); // Left is an expanded scalar  
   bool RES=(rightExp && rightScalar);  
   bool LS=(!leftExp && leftScalar); // left is a single scalar  
   bool RS=(!rightExp && rightScalar);  
   bool LN=(!leftExp && !leftScalar);    // left is a single non-scalar  
   bool RN=(!rightExp && !rightScalar);  
   bool LEN=(leftExp && !leftScalar);    // left is an expanded non-scalar  
   bool REN=(rightExp && !rightScalar);  
   
   if ((LES && RES) || (LEN && REN)) // both are Expanded scalars or both are expanded non-scalars  
   {  
     chunksize=m_left->getNumDPPSample()*leftsize;  
     leftstep=0;  
     rightstep=0;  
     numsteps=1;  
   }  
   else if (LES || RES)  
   {  
     chunksize=1;  
     if (LES)        // left is an expanded scalar  
     {  
         if (RS)  
         {  
            leftstep=1;  
            rightstep=0;  
            numsteps=m_left->getNumDPPSample();  
         }  
         else        // RN or REN  
         {  
            leftstep=0;  
            oleftstep=1;  
            rightstep=1;  
            orightstep=(RN ? -(int)rightsize : 0);  
            numsteps=rightsize;  
            onumsteps=m_left->getNumDPPSample();  
         }  
     }  
     else        // right is an expanded scalar  
     {  
         if (LS)  
         {  
            rightstep=1;  
            leftstep=0;  
            numsteps=m_right->getNumDPPSample();  
         }  
         else  
         {  
            rightstep=0;  
            orightstep=1;  
            leftstep=1;  
            oleftstep=(LN ? -(int)leftsize : 0);  
            numsteps=leftsize;  
            onumsteps=m_right->getNumDPPSample();  
         }  
     }  
   }  
   else  // this leaves (LEN, RS), (LEN, RN) and their transposes  
   {  
     if (LEN)    // and Right will be a single value  
     {  
         chunksize=rightsize;  
         leftstep=rightsize;  
         rightstep=0;  
         numsteps=m_left->getNumDPPSample();  
         if (RS)  
         {  
            numsteps*=leftsize;  
         }  
     }  
     else    // REN  
     {  
         chunksize=leftsize;  
         rightstep=leftsize;  
         leftstep=0;  
         numsteps=m_right->getNumDPPSample();  
         if (LS)  
         {  
            numsteps*=rightsize;  
         }  
     }  
   }  
   
   int resultStep=max(leftstep,rightstep);   // only one (at most) should be !=0  
     // Get the values of sub-expressions  
   const ValueType* left=m_left->resolveVectorSample(v,offset+getMaxSampleSize(),sampleNo,lroffset); // see note on  
     // calcBufss for why we can't put offset as the 2nd param above  
   const ValueType* right=m_right->resolveVectorSample(v,offset+2*getMaxSampleSize(),sampleNo,rroffset); // Note  
     // the right child starts further along.  
 LAZYDEBUG(cout << "Post sub calls in " << toString() << endl;)  
 LAZYDEBUG(cout << "shapes=" << DataTypes::shapeToString(m_left->getShape()) << "," << DataTypes::shapeToString(m_right->getShape()) << endl;)  
 LAZYDEBUG(cout << "chunksize=" << chunksize << endl << "leftstep=" << leftstep << " rightstep=" << rightstep;)  
 LAZYDEBUG(cout << " numsteps=" << numsteps << endl << "oleftstep=" << oleftstep << " orightstep=" << orightstep;)  
 LAZYDEBUG(cout << "onumsteps=" << onumsteps << endl;)  
 LAZYDEBUG(cout << " DPPS=" << m_left->getNumDPPSample() << "," <<m_right->getNumDPPSample() << endl;)  
 LAZYDEBUG(cout << "" << LS << RS << LN << RN << LES << RES <<LEN << REN <<   endl;)  
   
   
   double* resultp=&(v[offset]);     // results are stored at the vector offset we recieved  
   switch(m_op)  
   {  
     case ADD:  
         PROC_OP(NO_ARG,plus<double>());  
     break;  
     case SUB:  
     PROC_OP(NO_ARG,minus<double>());  
     break;  
     case MUL:  
     PROC_OP(NO_ARG,multiplies<double>());  
     break;  
     case DIV:  
     PROC_OP(NO_ARG,divides<double>());  
     break;  
     case POW:  
        PROC_OP(double (double,double),::pow);  
     break;  
     default:  
     throw DataException("Programmer error - resolveBinary can not resolve operator "+opToString(m_op)+".");  
   }  
   roffset=offset;  
   return &v;  
 }  
   
   
   
 /*  
   \brief Compute the value of the expression (tensor product) for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
   If the result is stored in v it should be stored at the offset given.  
   Everything from offset to the end of v should be considered available for this method to use.  
 */  
 // This method assumes that any subexpressions which evaluate to Constant or Tagged Data  
 // have already been collapsed to IDENTITY. So we must have at least one expanded child.  
 // unlike the other resolve helpers, we must treat these datapoints separately.  
 DataTypes::ValueType*  
 DataLazy::resolveTProd(ValueType& v,  size_t offset, int sampleNo, size_t& roffset) const  
 {  
 LAZYDEBUG(cout << "Resolve TensorProduct: " << toString()  << " to offset " << offset<< endl;)  
   
   size_t lroffset=0, rroffset=0;    // offsets in the left and right result vectors  
     // first work out which of the children are expanded  
   bool leftExp=(m_left->m_readytype=='E');  
   bool rightExp=(m_right->m_readytype=='E');  
   int steps=getNumDPPSample();  
 /*  int leftStep=((leftExp && !rightExp)? m_right->getNoValues() : 0);  
   int rightStep=((rightExp && !leftExp)? m_left->getNoValues() : 0);*/  
   int leftStep=(leftExp? m_left->getNoValues() : 0);        // do not have scalars as input to this method  
   int rightStep=(rightExp?m_right->getNoValues() : 0);  
   
   int resultStep=getNoValues();  
     // Get the values of sub-expressions (leave a gap of one sample for the result).  
   int gap=offset+m_samplesize;    
   
 LAZYDEBUG(cout << "Query left with offset=" << gap << endl;)  
   
   const ValueType* left=m_left->resolveVectorSample(v,gap,sampleNo,lroffset);  
   gap+=m_left->getMaxSampleSize();  
   
   
 LAZYDEBUG(cout << "Query right with offset=" << gap << endl;)  
   
   
   const ValueType* right=m_right->resolveVectorSample(v,gap,sampleNo,rroffset);  
   
 LAZYDEBUG(cerr << "[Left shape]=" << DataTypes::shapeToString(m_left->getShape()) << "\n[Right shape]=" << DataTypes::shapeToString(m_right->getShape()) << " result=" <<DataTypes::shapeToString(getShape()) <<  endl;  
 cout << getNoValues() << endl;)  
 LAZYDEBUG(cerr << "Result of left=";)  
 LAZYDEBUG(cerr << "[" << lroffset << " .. " << lroffset+m_left->getNoValues() << "]" << endl;  
   
 for (int i=lroffset, limit=lroffset+(leftExp?m_left->getNoValues()*m_left->getNumDPPSample():m_left->getNoValues());i<limit;++i)  
 {  
 cout << "[" << setw(2) << i-lroffset << "] " << setw(10) << (*left)[i] << " ";  
 if (i%4==0) cout << endl;  
 })  
 LAZYDEBUG(cerr << "\nResult of right=" << endl;)  
 LAZYDEBUG(  
 for (int i=rroffset, limit=rroffset+(rightExp?m_right->getNoValues()*m_right->getNumDPPSample():m_right->getNoValues());i<limit;++i)  
 {  
 cerr << "[" <<  setw(2)<< i-rroffset << "] " << setw(10) << (*right)[i] << " ";  
 if (i%4==0) cout << endl;  
 }  
 cerr << endl;  
 )  
 LAZYDEBUG(cerr << "Post sub calls: " << toString() << endl;)  
 LAZYDEBUG(cout << "LeftExp=" << leftExp << " rightExp=" << rightExp << endl;)  
 LAZYDEBUG(cout << "LeftR=" << m_left->getRank() << " rightExp=" << m_right->getRank() << endl;)  
 LAZYDEBUG(cout << "LeftSize=" << m_left->getNoValues() << " RightSize=" << m_right->getNoValues() << endl;)  
 LAZYDEBUG(cout << "m_samplesize=" << m_samplesize << endl;)  
 LAZYDEBUG(cout << "outputshape=" << DataTypes::shapeToString(getShape()) << endl;)  
 LAZYDEBUG(cout << "DPPS=" << m_right->getNumDPPSample() <<"."<<endl;)  
   
   double* resultp=&(v[offset]);     // results are stored at the vector offset we recieved  
   switch(m_op)  
   {  
     case PROD:  
     for (int i=0;i<steps;++i,resultp+=resultStep)  
     {  
   
 LAZYDEBUG(cout << "lroffset=" << lroffset << "rroffset=" << rroffset << endl;)  
 LAZYDEBUG(cout << "l*=" << left << " r*=" << right << endl;)  
 LAZYDEBUG(cout << "m_SL=" << m_SL << " m_SM=" << m_SM << " m_SR=" << m_SR << endl;)  
   
           const double *ptr_0 = &((*left)[lroffset]);  
           const double *ptr_1 = &((*right)[rroffset]);  
   
 LAZYDEBUG(cout << DataTypes::pointToString(*left, m_left->getShape(),lroffset,"LEFT") << endl;)  
 LAZYDEBUG(cout << DataTypes::pointToString(*right,m_right->getShape(),rroffset, "RIGHT") << endl;)  
   
           matrix_matrix_product(m_SL, m_SM, m_SR, ptr_0, ptr_1, resultp, m_transpose);  
   
 LAZYDEBUG(cout << "Results--\n";  
 {  
   DataVector dv(getNoValues());  
 for (int z=0;z<getNoValues();++z)  
 {  
   cout << "[" << setw(2) << z<< "] " << setw(10) << resultp[z] << " ";  
   if (z%4==0) cout << endl;  
   dv[z]=resultp[z];  
 }  
 cout << endl << DataTypes::pointToString(dv,getShape(),0,"RESLT");  
 cout << "\nWritten to: " << resultp << " resultStep=" << resultStep << endl;  
 }  
 )  
       lroffset+=leftStep;  
       rroffset+=rightStep;  
     }  
     break;  
     default:  
     throw DataException("Programmer error - resolveTProduct can not resolve operator "+opToString(m_op)+".");  
   }  
   roffset=offset;  
   return &v;  
 }  
   
   
 #ifdef LAZY_NODE_STORAGE  
1016    
1017  // The result will be stored in m_samples  // The result will be stored in m_samples
1018  // The return value is a pointer to the DataVector, offset is the offset within the return value  // The return value is a pointer to the DataVector, offset is the offset within the return value
1019  const DataTypes::ValueType*  const DataTypes::ValueType*
1020  DataLazy::resolveNodeSample(int tid, int sampleNo, size_t& roffset)  DataLazy::resolveNodeSample(int tid, int sampleNo, size_t& roffset) const
1021  {  {
1022  LAZYDEBUG(cout << "Resolve sample " << toString() << endl;)  LAZYDEBUG(cout << "Resolve sample " << toString() << endl;)
1023      // collapse so we have a 'E' node or an IDENTITY for some other type      // collapse so we have a 'E' node or an IDENTITY for some other type
# Line 1667  LAZYDEBUG(cout << "Resolve sample " << t Line 1028  LAZYDEBUG(cout << "Resolve sample " << t
1028    if (m_op==IDENTITY)      if (m_op==IDENTITY)  
1029    {    {
1030      const ValueType& vec=m_id->getVectorRO();      const ValueType& vec=m_id->getVectorRO();
 //     if (m_readytype=='C')  
 //     {  
 //  roffset=0;      // all samples read from the same position  
 //  return &(m_samples);  
 //     }  
1031      roffset=m_id->getPointOffset(sampleNo, 0);      roffset=m_id->getPointOffset(sampleNo, 0);
1032    #ifdef LAZY_STACK_PROF
1033    int x;
1034    if (&x<stackend[omp_get_thread_num()])
1035    {
1036           stackend[omp_get_thread_num()]=&x;
1037    }
1038    #endif
1039      return &(vec);      return &(vec);
1040    }    }
1041    if (m_readytype!='E')    if (m_readytype!='E')
# Line 1685  LAZYDEBUG(cout << "Resolve sample " << t Line 1048  LAZYDEBUG(cout << "Resolve sample " << t
1048      return &(m_samples);        // sample is already resolved      return &(m_samples);        // sample is already resolved
1049    }    }
1050    m_sampleids[tid]=sampleNo;    m_sampleids[tid]=sampleNo;
1051    
1052    switch (getOpgroup(m_op))    switch (getOpgroup(m_op))
1053    {    {
1054    case G_UNARY:    case G_UNARY:
# Line 1695  LAZYDEBUG(cout << "Resolve sample " << t Line 1059  LAZYDEBUG(cout << "Resolve sample " << t
1059    case G_TENSORPROD: return resolveNodeTProd(tid, sampleNo, roffset);    case G_TENSORPROD: return resolveNodeTProd(tid, sampleNo, roffset);
1060    case G_NP1OUT_2P: return resolveNodeNP1OUT_2P(tid, sampleNo, roffset);    case G_NP1OUT_2P: return resolveNodeNP1OUT_2P(tid, sampleNo, roffset);
1061    case G_REDUCTION: return resolveNodeReduction(tid, sampleNo, roffset);    case G_REDUCTION: return resolveNodeReduction(tid, sampleNo, roffset);
1062      case G_CONDEVAL: return resolveNodeCondEval(tid, sampleNo, roffset);
1063    default:    default:
1064      throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");      throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");
1065    }    }
1066  }  }
1067    
1068  const DataTypes::ValueType*  const DataTypes::ValueType*
1069  DataLazy::resolveNodeUnary(int tid, int sampleNo, size_t& roffset)  DataLazy::resolveNodeUnary(int tid, int sampleNo, size_t& roffset) const
1070  {  {
1071      // we assume that any collapsing has been done before we get here      // we assume that any collapsing has been done before we get here
1072      // since we only have one argument we don't need to think about only      // since we only have one argument we don't need to think about only
# Line 1833  DataLazy::resolveNodeUnary(int tid, int Line 1198  DataLazy::resolveNodeUnary(int tid, int
1198    
1199    
1200  const DataTypes::ValueType*  const DataTypes::ValueType*
1201  DataLazy::resolveNodeReduction(int tid, int sampleNo, size_t& roffset)  DataLazy::resolveNodeReduction(int tid, int sampleNo, size_t& roffset) const
1202  {  {
1203      // we assume that any collapsing has been done before we get here      // we assume that any collapsing has been done before we get here
1204      // since we only have one argument we don't need to think about only      // since we only have one argument we don't need to think about only
# Line 1852  DataLazy::resolveNodeReduction(int tid, Line 1217  DataLazy::resolveNodeReduction(int tid,
1217    
1218    roffset=m_samplesize*tid;    roffset=m_samplesize*tid;
1219    unsigned int ndpps=getNumDPPSample();    unsigned int ndpps=getNumDPPSample();
1220    unsigned int psize=DataTypes::noValues(getShape());    unsigned int psize=DataTypes::noValues(m_left->getShape());
1221    double* result=&(m_samples[roffset]);    double* result=&(m_samples[roffset]);
1222    switch (m_op)    switch (m_op)
1223    {    {
# Line 1885  DataLazy::resolveNodeReduction(int tid, Line 1250  DataLazy::resolveNodeReduction(int tid,
1250  }  }
1251    
1252  const DataTypes::ValueType*  const DataTypes::ValueType*
1253  DataLazy::resolveNodeNP1OUT(int tid, int sampleNo, size_t& roffset)  DataLazy::resolveNodeNP1OUT(int tid, int sampleNo, size_t& roffset) const
1254  {  {
1255      // we assume that any collapsing has been done before we get here      // we assume that any collapsing has been done before we get here
1256      // since we only have one argument we don't need to think about only      // since we only have one argument we don't need to think about only
# Line 1930  DataLazy::resolveNodeNP1OUT(int tid, int Line 1295  DataLazy::resolveNodeNP1OUT(int tid, int
1295  }  }
1296    
1297  const DataTypes::ValueType*  const DataTypes::ValueType*
1298  DataLazy::resolveNodeNP1OUT_P(int tid, int sampleNo, size_t& roffset)  DataLazy::resolveNodeNP1OUT_P(int tid, int sampleNo, size_t& roffset) const
1299  {  {
1300      // we assume that any collapsing has been done before we get here      // we assume that any collapsing has been done before we get here
1301      // since we only have one argument we don't need to think about only      // since we only have one argument we don't need to think about only
# Line 1978  DataLazy::resolveNodeNP1OUT_P(int tid, i Line 1343  DataLazy::resolveNodeNP1OUT_P(int tid, i
1343    
1344    
1345  const DataTypes::ValueType*  const DataTypes::ValueType*
1346  DataLazy::resolveNodeNP1OUT_2P(int tid, int sampleNo, size_t& roffset)  DataLazy::resolveNodeNP1OUT_2P(int tid, int sampleNo, size_t& roffset) const
1347  {  {
1348    if (m_readytype!='E')    if (m_readytype!='E')
1349    {    {
# Line 2013  DataLazy::resolveNodeNP1OUT_2P(int tid, Line 1378  DataLazy::resolveNodeNP1OUT_2P(int tid,
1378    return &m_samples;    return &m_samples;
1379  }  }
1380    
1381    const DataTypes::ValueType*
1382    DataLazy::resolveNodeCondEval(int tid, int sampleNo, size_t& roffset) const
1383    {
1384      if (m_readytype!='E')
1385      {
1386        throw DataException("Programmer error - resolveNodeCondEval should only be called on expanded Data.");
1387      }
1388      if (m_op!=CONDEVAL)
1389      {
1390        throw DataException("Programmer error - resolveNodeCondEval should only be called on CONDEVAL nodes.");
1391      }
1392      size_t subroffset;
1393    
1394      const ValueType* maskres=m_mask->resolveNodeSample(tid, sampleNo, subroffset);
1395      const ValueType* srcres=0;
1396      if ((*maskres)[subroffset]>0)
1397      {
1398        srcres=m_left->resolveNodeSample(tid, sampleNo, subroffset);
1399      }
1400      else
1401      {
1402        srcres=m_right->resolveNodeSample(tid, sampleNo, subroffset);
1403      }
1404    
1405      // Now we need to copy the result
1406    
1407      roffset=m_samplesize*tid;
1408      for (int i=0;i<m_samplesize;++i)
1409      {
1410        m_samples[roffset+i]=(*srcres)[subroffset+i];  
1411      }
1412    
1413      return &m_samples;
1414    }
1415    
1416  // This method assumes that any subexpressions which evaluate to Constant or Tagged Data  // This method assumes that any subexpressions which evaluate to Constant or Tagged Data
1417  // have already been collapsed to IDENTITY. So we must have at least one expanded child.  // have already been collapsed to IDENTITY. So we must have at least one expanded child.
# Line 2025  DataLazy::resolveNodeNP1OUT_2P(int tid, Line 1423  DataLazy::resolveNodeNP1OUT_2P(int tid,
1423  // For example, 2+Vector.  // For example, 2+Vector.
1424  // In this case each double within the point is treated individually  // In this case each double within the point is treated individually
1425  const DataTypes::ValueType*  const DataTypes::ValueType*
1426  DataLazy::resolveNodeBinary(int tid, int sampleNo, size_t& roffset)  DataLazy::resolveNodeBinary(int tid, int sampleNo, size_t& roffset) const
1427  {  {
1428  LAZYDEBUG(cout << "Resolve binary: " << toString() << endl;)  LAZYDEBUG(cout << "Resolve binary: " << toString() << endl;)
1429    
# Line 2152  LAZYDEBUG(cout << "Right res["<< rroffse Line 1550  LAZYDEBUG(cout << "Right res["<< rroffse
1550    
1551    
1552    roffset=m_samplesize*tid;    roffset=m_samplesize*tid;
1553    double* resultp=&(m_samples[roffset]);        // results are stored at the vector offset we recieved    double* resultp=&(m_samples[roffset]);        // results are stored at the vector offset we received
1554    switch(m_op)    switch(m_op)
1555    {    {
1556      case ADD:      case ADD:
# Line 2182  LAZYDEBUG(cout << "Result res[" << roffs Line 1580  LAZYDEBUG(cout << "Result res[" << roffs
1580  // have already been collapsed to IDENTITY. So we must have at least one expanded child.  // have already been collapsed to IDENTITY. So we must have at least one expanded child.
1581  // unlike the other resolve helpers, we must treat these datapoints separately.  // unlike the other resolve helpers, we must treat these datapoints separately.
1582  const DataTypes::ValueType*  const DataTypes::ValueType*
1583  DataLazy::resolveNodeTProd(int tid, int sampleNo, size_t& roffset)  DataLazy::resolveNodeTProd(int tid, int sampleNo, size_t& roffset) const
1584  {  {
1585  LAZYDEBUG(cout << "Resolve TensorProduct: " << toString() << endl;)  LAZYDEBUG(cout << "Resolve TensorProduct: " << toString() << endl;)
1586    
# Line 2214  LAZYDEBUG(cout << "m_samplesize=" << m_s Line 1612  LAZYDEBUG(cout << "m_samplesize=" << m_s
1612  LAZYDEBUG(cout << "outputshape=" << DataTypes::shapeToString(getShape()) << endl;)  LAZYDEBUG(cout << "outputshape=" << DataTypes::shapeToString(getShape()) << endl;)
1613  LAZYDEBUG(cout << "DPPS=" << m_right->getNumDPPSample() <<"."<<endl;)  LAZYDEBUG(cout << "DPPS=" << m_right->getNumDPPSample() <<"."<<endl;)
1614    
1615    double* resultp=&(m_samples[offset]);     // results are stored at the vector offset we recieved    double* resultp=&(m_samples[offset]);     // results are stored at the vector offset we received
1616    switch(m_op)    switch(m_op)
1617    {    {
1618      case PROD:      case PROD:
# Line 2238  LAZYDEBUG(cout << DataTypes::pointToStri Line 1636  LAZYDEBUG(cout << DataTypes::pointToStri
1636    roffset=offset;    roffset=offset;
1637    return &m_samples;    return &m_samples;
1638  }  }
 #endif //LAZY_NODE_STORAGE  
   
 /*  
   \brief Compute the value of the expression for the given sample.  
   \return Vector which stores the value of the subexpression for the given sample.  
   \param v A vector to store intermediate results.  
   \param offset Index in v to begin storing results.  
   \param sampleNo Sample number to evaluate.  
   \param roffset (output parameter) the offset in the return vector where the result begins.  
   
   The return value will be an existing vector so do not deallocate it.  
 */  
 // the vector and the offset are a place where the method could write its data if it wishes  
 // it is not obligated to do so. For example, if it has its own storage already, it can use that.  
 // Hence the return value to indicate where the data is actually stored.  
 // Regardless, the storage should be assumed to be used, even if it isn't.  
   
 // the roffset is the offset within the returned vector where the data begins  
 const DataTypes::ValueType*  
 DataLazy::resolveVectorSample(ValueType& v, size_t offset, int sampleNo, size_t& roffset)  
 {  
 LAZYDEBUG(cout << "Resolve sample " << toString() << endl;)  
     // collapse so we have a 'E' node or an IDENTITY for some other type  
   if (m_readytype!='E' && m_op!=IDENTITY)  
   {  
     collapse();  
   }  
   if (m_op==IDENTITY)    
   {  
     const ValueType& vec=m_id->getVectorRO();  
     if (m_readytype=='C')  
     {  
     roffset=0;  
 LAZYDEBUG(cout << "Finish  sample " << toString() << endl;)  
     return &(vec);  
     }  
     roffset=m_id->getPointOffset(sampleNo, 0);  
 LAZYDEBUG(cout << "Finish  sample " << toString() << endl;)  
     return &(vec);  
   }  
   if (m_readytype!='E')  
   {  
     throw DataException("Programmer Error - Collapse did not produce an expanded node.");  
   }  
   switch (getOpgroup(m_op))  
   {  
   case G_UNARY:  
   case G_UNARY_P: return resolveUnary(v, offset,sampleNo,roffset);  
   case G_BINARY: return resolveBinary(v, offset,sampleNo,roffset);  
   case G_NP1OUT: return resolveNP1OUT(v, offset, sampleNo,roffset);  
   case G_NP1OUT_P: return resolveNP1OUT_P(v, offset, sampleNo,roffset);  
   case G_TENSORPROD: return resolveTProd(v,offset, sampleNo,roffset);  
   case G_NP1OUT_2P: return resolveNP1OUT_2P(v, offset, sampleNo, roffset);  
   case G_REDUCTION: return resolveReduction(v, offset, sampleNo, roffset);  
   default:  
     throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");  
   }  
1639    
 }  
1640    
1641  const DataTypes::ValueType*  const DataTypes::ValueType*
1642  DataLazy::resolveSample(BufferGroup& bg, int sampleNo, size_t& roffset)  DataLazy::resolveSample(int sampleNo, size_t& roffset) const
1643  {  {
1644  #ifdef _OPENMP  #ifdef _OPENMP
1645      int tid=omp_get_thread_num();      int tid=omp_get_thread_num();
1646  #else  #else
1647      int tid=0;      int tid=0;
1648  #endif  #endif
1649  #ifdef LAZY_NODE_STORAGE  
1650      return resolveNodeSample(tid, sampleNo, roffset);  #ifdef LAZY_STACK_PROF
1651        stackstart[tid]=&tid;
1652        stackend[tid]=&tid;
1653        const DataTypes::ValueType* r=resolveNodeSample(tid, sampleNo, roffset);
1654        size_t d=(size_t)stackstart[tid]-(size_t)stackend[tid];
1655        #pragma omp critical
1656        if (d>maxstackuse)
1657        {
1658    cout << "Max resolve Stack use " << d << endl;
1659            maxstackuse=d;
1660        }
1661        return r;
1662  #else  #else
1663      return resolveVectorSample(bg.getBuffer(tid),bg.getOffset(tid),sampleNo,roffset);      return resolveNodeSample(tid, sampleNo, roffset);
1664  #endif  #endif
1665  }  }
1666    
# Line 2320  DataLazy::resolveToIdentity() Line 1671  DataLazy::resolveToIdentity()
1671  {  {
1672     if (m_op==IDENTITY)     if (m_op==IDENTITY)
1673      return;      return;
 #ifndef LAZY_NODE_STORAGE  
    DataReady_ptr p=resolveVectorWorker();  
 #else  
1674     DataReady_ptr p=resolveNodeWorker();     DataReady_ptr p=resolveNodeWorker();
 #endif  
1675     makeIdentity(p);     makeIdentity(p);
1676  }  }
1677    
# Line 2340  void DataLazy::makeIdentity(const DataRe Line 1687  void DataLazy::makeIdentity(const DataRe
1687     else if(p->isExpanded()) {m_readytype='E';}     else if(p->isExpanded()) {m_readytype='E';}
1688     else if (p->isTagged()) {m_readytype='T';}     else if (p->isTagged()) {m_readytype='T';}
1689     else {throw DataException("Unknown DataReady instance in convertToIdentity constructor.");}     else {throw DataException("Unknown DataReady instance in convertToIdentity constructor.");}
    m_buffsRequired=1;  
1690     m_samplesize=p->getNumDPPSample()*p->getNoValues();     m_samplesize=p->getNumDPPSample()*p->getNoValues();
    m_maxsamplesize=m_samplesize;  
1691     m_left.reset();     m_left.reset();
1692     m_right.reset();     m_right.reset();
1693  }  }
# Line 2355  DataLazy::resolve() Line 1700  DataLazy::resolve()
1700      return m_id;      return m_id;
1701  }  }
1702    
 #ifdef LAZY_NODE_STORAGE  
1703    
1704  // This version of resolve uses storage in each node to hold results  /* This is really a static method but I think that caused problems in windows */
1705  DataReady_ptr  void
1706  DataLazy::resolveNodeWorker()  DataLazy::resolveGroupWorker(std::vector<DataLazy*>& dats)
1707  {  {
1708    if (m_readytype!='E')     // if the whole sub-expression is Constant or Tagged, then evaluate it normally    if (dats.empty())
1709    {    {
1710      collapse();      return;
1711    }    }
1712    if (m_op==IDENTITY)       // So a lazy expression of Constant or Tagged data will be returned here.    vector<DataLazy*> work;
1713      FunctionSpace fs=dats[0]->getFunctionSpace();
1714      bool match=true;
1715      for (int i=dats.size()-1;i>=0;--i)
1716    {    {
1717      return m_id;      if (dats[i]->m_readytype!='E')
1718        {
1719            dats[i]->collapse();
1720        }
1721        if (dats[i]->m_op!=IDENTITY)
1722        {
1723            work.push_back(dats[i]);
1724            if (fs!=dats[i]->getFunctionSpace())
1725            {
1726                match=false;
1727            }
1728        }
1729    }    }
1730      // from this point on we must have m_op!=IDENTITY and m_readytype=='E'    if (work.empty())
   DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(),  ValueType(getNoValues()));  
   ValueType& resvec=result->getVectorRW();  
   DataReady_ptr resptr=DataReady_ptr(result);  
   
   int sample;  
   int totalsamples=getNumSamples();  
   const ValueType* res=0;   // Storage for answer  
 LAZYDEBUG(cout << "Total number of samples=" <<totalsamples << endl;)  
   #pragma omp parallel for private(sample,res) schedule(static)  
   for (sample=0;sample<totalsamples;++sample)  
1731    {    {
1732      size_t roffset=0;      return;     // no work to do
1733      }
1734      if (match)    // all functionspaces match.  Yes I realise this is overly strict
1735      {     // it is possible that dats[0] is one of the objects which we discarded and
1736            // all the other functionspaces match.
1737        vector<DataExpanded*> dep;
1738        vector<ValueType*> vecs;
1739        for (int i=0;i<work.size();++i)
1740        {
1741            dep.push_back(new DataExpanded(fs,work[i]->getShape(), ValueType(work[i]->getNoValues())));
1742            vecs.push_back(&(dep[i]->getVectorRW()));
1743        }
1744        int totalsamples=work[0]->getNumSamples();
1745        const ValueType* res=0; // Storage for answer
1746        int sample;
1747        #pragma omp parallel private(sample, res)
1748        {
1749            size_t roffset=0;
1750            #pragma omp for schedule(static)
1751            for (sample=0;sample<totalsamples;++sample)
1752            {
1753            roffset=0;
1754            int j;
1755            for (j=work.size()-1;j>=0;--j)
1756            {
1757  #ifdef _OPENMP  #ifdef _OPENMP
1758      res=resolveNodeSample(omp_get_thread_num(),sample,roffset);                  res=work[j]->resolveNodeSample(omp_get_thread_num(),sample,roffset);
1759  #else  #else
1760      res=resolveNodeSample(0,sample,roffset);                  res=work[j]->resolveNodeSample(0,sample,roffset);
1761  #endif  #endif
1762  LAZYDEBUG(cout << "Sample #" << sample << endl;)                  DataVector::size_type outoffset=dep[j]->getPointOffset(sample,0);
1763  LAZYDEBUG(cout << "Final res[" << roffset<< "]=" << (*res)[roffset] << (*res)[roffset]<< endl; )                  memcpy(&((*vecs[j])[outoffset]),&((*res)[roffset]),work[j]->m_samplesize*sizeof(DataVector::ElementType));
1764      DataVector::size_type outoffset=result->getPointOffset(sample,0);          }
1765      memcpy(&(resvec[outoffset]),&((*res)[roffset]),m_samplesize*sizeof(DataVector::ElementType));          }
1766        }
1767        // Now we need to load the new results as identity ops into the lazy nodes
1768        for (int i=work.size()-1;i>=0;--i)
1769        {
1770            work[i]->makeIdentity(boost::dynamic_pointer_cast<DataReady>(dep[i]->getPtr()));
1771        }
1772      }
1773      else  // functionspaces do not match
1774      {
1775        for (int i=0;i<work.size();++i)
1776        {
1777            work[i]->resolveToIdentity();
1778        }
1779    }    }
   return resptr;  
1780  }  }
1781    
 #endif // LAZY_NODE_STORAGE  
1782    
1783  // To simplify the memory management, all threads operate on one large vector, rather than one each.  
1784  // Each sample is evaluated independently and copied into the result DataExpanded.  // This version of resolve uses storage in each node to hold results
1785  DataReady_ptr  DataReady_ptr
1786  DataLazy::resolveVectorWorker()  DataLazy::resolveNodeWorker()
1787  {  {
   
 LAZYDEBUG(cout << "Sample size=" << m_samplesize << endl;)  
 LAZYDEBUG(cout << "Buffers=" << m_buffsRequired << endl;)  
1788    if (m_readytype!='E')     // if the whole sub-expression is Constant or Tagged, then evaluate it normally    if (m_readytype!='E')     // if the whole sub-expression is Constant or Tagged, then evaluate it normally
1789    {    {
1790      collapse();      collapse();
# Line 2414  LAZYDEBUG(cout << "Buffers=" << m_buffsR Line 1794  LAZYDEBUG(cout << "Buffers=" << m_buffsR
1794      return m_id;      return m_id;
1795    }    }
1796      // from this point on we must have m_op!=IDENTITY and m_readytype=='E'      // from this point on we must have m_op!=IDENTITY and m_readytype=='E'
   size_t threadbuffersize=m_maxsamplesize*(max(1,m_buffsRequired)); // Each thread needs to have enough  
     // storage to evaluate its expression  
   int numthreads=1;  
 #ifdef _OPENMP  
   numthreads=omp_get_max_threads();  
 #endif  
   ValueType v(numthreads*threadbuffersize);  
 LAZYDEBUG(cout << "Buffer created with size=" << v.size() << endl;)  
1797    DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(),  ValueType(getNoValues()));    DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(),  ValueType(getNoValues()));
1798    ValueType& resvec=result->getVectorRW();    ValueType& resvec=result->getVectorRW();
1799    DataReady_ptr resptr=DataReady_ptr(result);    DataReady_ptr resptr=DataReady_ptr(result);
1800    
1801    int sample;    int sample;
   size_t outoffset;     // offset in the output data  
1802    int totalsamples=getNumSamples();    int totalsamples=getNumSamples();
1803    const ValueType* res=0;   // Vector storing the answer    const ValueType* res=0;   // Storage for answer
   size_t resoffset=0;       // where in the vector to find the answer  
1804  LAZYDEBUG(cout << "Total number of samples=" <<totalsamples << endl;)  LAZYDEBUG(cout << "Total number of samples=" <<totalsamples << endl;)
1805    #pragma omp parallel for private(sample,resoffset,outoffset,res) schedule(static)    #pragma omp parallel private(sample,res)
   for (sample=0;sample<totalsamples;++sample)  
1806    {    {
1807  LAZYDEBUG(cout << "################################# " << sample << endl;)      size_t roffset=0;
1808    #ifdef LAZY_STACK_PROF
1809        stackstart[omp_get_thread_num()]=&roffset;
1810        stackend[omp_get_thread_num()]=&roffset;
1811    #endif
1812        #pragma omp for schedule(static)
1813        for (sample=0;sample<totalsamples;++sample)
1814        {
1815            roffset=0;
1816  #ifdef _OPENMP  #ifdef _OPENMP
1817      res=resolveVectorSample(v,threadbuffersize*omp_get_thread_num(),sample,resoffset);              res=resolveNodeSample(omp_get_thread_num(),sample,roffset);
1818  #else  #else
1819      res=resolveVectorSample(v,0,sample,resoffset);   // res would normally be v, but not if its a single IDENTITY op.              res=resolveNodeSample(0,sample,roffset);
1820  #endif  #endif
1821  LAZYDEBUG(cerr << "-------------------------------- " << endl;)  LAZYDEBUG(cout << "Sample #" << sample << endl;)
1822  LAZYDEBUG(cerr<< "Copying sample#" << sample << endl;)  LAZYDEBUG(cout << "Final res[" << roffset<< "]=" << (*res)[roffset] << (*res)[roffset]<< endl; )
1823      outoffset=result->getPointOffset(sample,0);              DataVector::size_type outoffset=result->getPointOffset(sample,0);
1824  LAZYDEBUG(cerr << "offset=" << outoffset << " from offset=" << resoffset << " " << m_samplesize << " doubles" << endl;)              memcpy(&(resvec[outoffset]),&((*res)[roffset]),m_samplesize*sizeof(DataVector::ElementType));
1825      for (unsigned int i=0;i<m_samplesize;++i,++outoffset,++resoffset)   // copy values into the output vector      }
     {  
 LAZYDEBUG(cerr << "outoffset=" << outoffset << " resoffset=" << resoffset << " " << (*res)[resoffset]<< endl;)  
     resvec[outoffset]=(*res)[resoffset];  
     }  
 LAZYDEBUG(cerr << DataTypes::pointToString(resvec,getShape(),outoffset-m_samplesize+DataTypes::noValues(getShape()),"Final result:") << endl;)  
 LAZYDEBUG(cerr << "*********************************" << endl;)  
1826    }    }
1827    #ifdef LAZY_STACK_PROF
1828      for (int i=0;i<getNumberOfThreads();++i)
1829      {
1830        size_t r=((size_t)stackstart[i] - (size_t)stackend[i]);
1831    //  cout << i << " " << stackstart[i] << " .. " << stackend[i] << " = " <<  r << endl;
1832        if (r>maxstackuse)
1833        {
1834            maxstackuse=r;
1835        }
1836      }
1837      cout << "Max resolve Stack use=" << maxstackuse << endl;
1838    #endif
1839    return resptr;    return resptr;
1840  }  }
1841    
# Line 2459  std::string Line 1843  std::string
1843  DataLazy::toString() const  DataLazy::toString() const
1844  {  {
1845    ostringstream oss;    ostringstream oss;
1846    oss << "Lazy Data:";    oss << "Lazy Data: [depth=" << m_height<< "] ";
1847    if (escriptParams.getPRINT_LAZY_TREE()==0)    switch (escriptParams.getLAZY_STR_FMT())
   {  
       intoString(oss);  
   }  
   else  
1848    {    {
1849      case 1:   // tree format
1850      oss << endl;      oss << endl;
1851      intoTreeString(oss,"");      intoTreeString(oss,"");
1852        break;
1853      case 2:   // just the depth
1854        break;
1855      default:
1856        intoString(oss);
1857        break;
1858    }    }
1859    return oss.str();    return oss.str();
1860  }  }
# Line 2527  DataLazy::intoString(ostringstream& oss) Line 1914  DataLazy::intoString(ostringstream& oss)
1914      oss << ", " << m_axis_offset << ", " << m_transpose;      oss << ", " << m_axis_offset << ", " << m_transpose;
1915      oss << ')';      oss << ')';
1916      break;      break;
1917      case G_CONDEVAL:
1918        oss << opToString(m_op)<< '(' ;
1919        m_mask->intoString(oss);
1920        oss << " ? ";
1921        m_left->intoString(oss);
1922        oss << " : ";
1923        m_right->intoString(oss);
1924        oss << ')';
1925        break;
1926    default:    default:
1927      oss << "UNKNOWN";      oss << "UNKNOWN";
1928    }    }
# Line 2699  DataLazy::setToZero() Line 2095  DataLazy::setToZero()
2095  //   m_readytype='C';  //   m_readytype='C';
2096  //   m_buffsRequired=1;  //   m_buffsRequired=1;
2097    
2098    privdebug=privdebug;  // to stop the compiler complaining about unused privdebug    (void)privdebug;  // to stop the compiler complaining about unused privdebug
2099    throw DataException("Programmer error - setToZero not supported for DataLazy (DataLazy objects should be read only).");    throw DataException("Programmer error - setToZero not supported for DataLazy (DataLazy objects should be read only).");
2100  }  }
2101    

Legend:
Removed from v.2737  
changed lines
  Added in v.4657

  ViewVC Help
Powered by ViewVC 1.1.26