/[escript]/trunk/escript/src/DataLazy.cpp
ViewVC logotype

Diff of /trunk/escript/src/DataLazy.cpp

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 1898 by jfenwick, Mon Oct 20 01:20:18 2008 UTC revision 1943 by jfenwick, Wed Oct 29 04:05:14 2008 UTC
# Line 27  Line 27 
27  #include "Data.h"  #include "Data.h"
28  #include "UnaryFuncs.h"     // for escript::fsign  #include "UnaryFuncs.h"     // for escript::fsign
29    
30    /*
31    How does DataLazy work?
32    ~~~~~~~~~~~~~~~~~~~~~~~
33    
34    Each instance represents a single operation on one or two other DataLazy instances. These arguments are normally
35    denoted left and right.
36    
37    A special operation, IDENTITY, stores an instance of DataReady in the m_id member.
38    This means that all "internal" nodes in the structure are instances of DataLazy.
39    
40    Each operation has a string representation as well as an opgroup - eg G_IDENTITY, G_BINARY, ...
41    Note that IDENITY is not considered a unary operation.
42    
43    I am avoiding calling the structure formed a tree because it is not guaranteed to be one (eg c=a+a).
44    It must however form a DAG (directed acyclic graph).
45    I will refer to individual DataLazy objects with the structure as nodes.
46    
47    Each node also stores:
48    - m_readytype \in {'E','T','C','?'} ~ indicates what sort of DataReady would be produced if the expression was
49        evaluated.
50    - m_length ~ how many values would be stored in the answer if the expression was evaluated.
51    - m_buffsrequired ~ the larged number of samples which would need to be kept simultaneously in order to
52        evaluate the expression.
53    - m_samplesize ~ the number of doubles stored in a sample.
54    
55    When a new node is created, the above values are computed based on the values in the child nodes.
56    Eg: if left requires 4 samples and right requires 6 then left+right requires 7 samples.
57    
58    The resolve method, which produces a DataReady from a DataLazy, does the following:
59    1) Create a DataReady to hold the new result.
60    2) Allocate a vector (v) big enough to hold m_buffsrequired samples.
61    3) For each sample, call resolveSample with v, to get its values and copy them into the result object.
62    
63    (In the case of OMP, multiple samples are resolved in parallel so the vector needs to be larger.)
64    
65    resolveSample returns a Vector* and an offset within that vector where the result is stored.
66    Normally, this would be v, but for identity nodes their internal vector is returned instead.
67    
68    The convention that I use, is that the resolve methods should store their results starting at the offset they are passed.
69    
70    For expressions which evaluate to Constant or Tagged, there is a different evaluation method.
71    The collapse method invokes the (non-lazy) operations on the Data class to evaluate the expression.
72    */
73    
74    
75  using namespace std;  using namespace std;
76  using namespace boost;  using namespace boost;
77    
# Line 39  opToString(ES_optype op); Line 84  opToString(ES_optype op);
84  namespace  namespace
85  {  {
86    
   
   
87  enum ES_opgroup  enum ES_opgroup
88  {  {
89     G_UNKNOWN,     G_UNKNOWN,
90     G_IDENTITY,     G_IDENTITY,
91     G_BINARY,     G_BINARY,        // pointwise operations with two arguments
92     G_UNARY     G_UNARY      // pointwise operations with one argument
93  };  };
94    
95    
96    
97    
98  string ES_opstrings[]={"UNKNOWN","IDENTITY","+","-","*","/","sin","cos","tan",  string ES_opstrings[]={"UNKNOWN","IDENTITY","+","-","*","/","^",
99                "sin","cos","tan",
100              "asin","acos","atan","sinh","cosh","tanh","erf",              "asin","acos","atan","sinh","cosh","tanh","erf",
101              "asinh","acosh","atanh",              "asinh","acosh","atanh",
102              "log10","log","sign","abs","neg","pos","exp","sqrt",              "log10","log","sign","abs","neg","pos","exp","sqrt",
103              "1/","where>0","where<0","where>=0","where<=0"};              "1/","where>0","where<0","where>=0","where<=0"};
104  int ES_opcount=32;  int ES_opcount=33;
105  ES_opgroup opgroups[]={G_UNKNOWN,G_IDENTITY,G_BINARY,G_BINARY,G_BINARY,G_BINARY,G_UNARY,G_UNARY,G_UNARY, //9  ES_opgroup opgroups[]={G_UNKNOWN,G_IDENTITY,G_BINARY,G_BINARY,G_BINARY,G_BINARY, G_BINARY,
106              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,    // 16              G_UNARY,G_UNARY,G_UNARY, //10
107              G_UNARY,G_UNARY,G_UNARY,                    // 19              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,    // 17
108              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,        // 27              G_UNARY,G_UNARY,G_UNARY,                    // 20
109                G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,        // 28
110              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY};              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY};
111  inline  inline
112  ES_opgroup  ES_opgroup
# Line 79  resultFS(DataAbstract_ptr left, DataAbst Line 124  resultFS(DataAbstract_ptr left, DataAbst
124      // that way, if interpolate is required in any other op we can just throw a      // that way, if interpolate is required in any other op we can just throw a
125      // programming error exception.      // programming error exception.
126    
127      FunctionSpace l=left->getFunctionSpace();
128      if (left->getFunctionSpace()!=right->getFunctionSpace())    FunctionSpace r=right->getFunctionSpace();
129      {    if (l!=r)
130          throw DataException("FunctionSpaces not equal - interpolation not supported on lazy data.");    {
131      }      if (r.probeInterpolation(l))
132      return left->getFunctionSpace();      {
133        return l;
134        }
135        if (l.probeInterpolation(r))
136        {
137        return r;
138        }
139        throw DataException("Cannot interpolate between the FunctionSpaces given for operation "+opToString(op)+".");
140      }
141      return l;
142  }  }
143    
144  // return the shape of the result of "left op right"  // return the shape of the result of "left op right"
# Line 93  resultShape(DataAbstract_ptr left, DataA Line 147  resultShape(DataAbstract_ptr left, DataA
147  {  {
148      if (left->getShape()!=right->getShape())      if (left->getShape()!=right->getShape())
149      {      {
150          throw DataException("Shapes not the same - shapes must match for lazy data.");        if (getOpgroup(op)!=G_BINARY)
151          {
152            throw DataException("Shapes not the name - shapes must match for (point)binary operations.");
153          }
154          if (left->getRank()==0)   // we need to allow scalar * anything
155          {
156            return right->getShape();
157          }
158          if (right->getRank()==0)
159          {
160            return left->getShape();
161          }
162          throw DataException("Shapes not the same - arguments must have matching shapes (or be scalars) for (point)binary operations on lazy data.");
163      }      }
164      return left->getShape();      return left->getShape();
165  }  }
166    
167    // determine the number of points in the result of "left op right"
168  size_t  size_t
169  resultLength(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)  resultLength(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
170  {  {
# Line 110  resultLength(DataAbstract_ptr left, Data Line 177  resultLength(DataAbstract_ptr left, Data
177     }     }
178  }  }
179    
180    // determine the number of samples requires to evaluate an expression combining left and right
181  int  int
182  calcBuffs(const DataLazy_ptr& left, const DataLazy_ptr& right, ES_optype op)  calcBuffs(const DataLazy_ptr& left, const DataLazy_ptr& right, ES_optype op)
183  {  {
# Line 123  calcBuffs(const DataLazy_ptr& left, cons Line 191  calcBuffs(const DataLazy_ptr& left, cons
191     }     }
192  }  }
193    
194    
195  }   // end anonymous namespace  }   // end anonymous namespace
196    
197    
198    
199    // Return a string representing the operation
200  const std::string&  const std::string&
201  opToString(ES_optype op)  opToString(ES_optype op)
202  {  {
# Line 143  DataLazy::DataLazy(DataAbstract_ptr p) Line 214  DataLazy::DataLazy(DataAbstract_ptr p)
214  {  {
215     if (p->isLazy())     if (p->isLazy())
216     {     {
     // TODO: fix this.   We could make the new node a copy of p?  
217      // I don't want identity of Lazy.      // I don't want identity of Lazy.
218      // Question: Why would that be so bad?      // Question: Why would that be so bad?
219      // Answer: We assume that the child of ID is something we can call getVector on      // Answer: We assume that the child of ID is something we can call getVector on
# Line 163  DataLazy::DataLazy(DataAbstract_ptr p) Line 233  DataLazy::DataLazy(DataAbstract_ptr p)
233  cout << "(1)Lazy created with " << m_samplesize << endl;  cout << "(1)Lazy created with " << m_samplesize << endl;
234  }  }
235    
236    
237    
238    
239  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op)  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op)
240      : parent(left->getFunctionSpace(),left->getShape()),      : parent(left->getFunctionSpace(),left->getShape()),
241      m_op(op)      m_op(op)
# Line 188  DataLazy::DataLazy(DataAbstract_ptr left Line 261  DataLazy::DataLazy(DataAbstract_ptr left
261  }  }
262    
263    
264  // DataLazy::DataLazy(DataLazy_ptr left, DataLazy_ptr right, ES_optype op)  // In this constructor we need to consider interpolation
 //  : parent(resultFS(left,right,op), resultShape(left,right,op)),  
 //  m_left(left),  
 //  m_right(right),  
 //  m_op(op)  
 // {  
 //    if (getOpgroup(op)!=G_BINARY)  
 //    {  
 //  throw DataException("Programmer error - constructor DataLazy(left, right, op) will only process BINARY operations.");  
 //    }  
 //    m_length=resultLength(m_left,m_right,m_op);  
 //    m_samplesize=getNumDPPSample()*getNoValues();  
 //    m_buffsRequired=calcBuffs(m_left, m_right, m_op);  
 // cout << "(2)Lazy created with " << m_samplesize << endl;  
 // }  
   
265  DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)  DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
266      : parent(resultFS(left,right,op), resultShape(left,right,op)),      : parent(resultFS(left,right,op), resultShape(left,right,op)),
267      m_op(op)      m_op(op)
# Line 212  DataLazy::DataLazy(DataAbstract_ptr left Line 270  DataLazy::DataLazy(DataAbstract_ptr left
270     {     {
271      throw DataException("Programmer error - constructor DataLazy(left, right, op) will only process BINARY operations.");      throw DataException("Programmer error - constructor DataLazy(left, right, op) will only process BINARY operations.");
272     }     }
273     if (left->isLazy())  
274       if (getFunctionSpace()!=left->getFunctionSpace())    // left needs to be interpolated
275       {
276        FunctionSpace fs=getFunctionSpace();
277        Data ltemp(left);
278        Data tmp(ltemp,fs);
279        left=tmp.borrowDataPtr();
280       }
281       if (getFunctionSpace()!=right->getFunctionSpace())   // left needs to be interpolated
282       {
283        Data tmp(Data(right),getFunctionSpace());
284        right=tmp.borrowDataPtr();
285       }
286       left->operandCheck(*right);
287    
288       if (left->isLazy())          // the children need to be DataLazy. Wrap them in IDENTITY if required
289     {     {
290      m_left=dynamic_pointer_cast<DataLazy>(left);      m_left=dynamic_pointer_cast<DataLazy>(left);
291     }     }
# Line 243  DataLazy::DataLazy(DataAbstract_ptr left Line 316  DataLazy::DataLazy(DataAbstract_ptr left
316      m_readytype='C';      m_readytype='C';
317     }     }
318     m_length=resultLength(m_left,m_right,m_op);     m_length=resultLength(m_left,m_right,m_op);
319     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();    
320     m_buffsRequired=calcBuffs(m_left, m_right,m_op);     m_buffsRequired=calcBuffs(m_left, m_right,m_op);
321  cout << "(3)Lazy created with " << m_samplesize << endl;  cout << "(3)Lazy created with " << m_samplesize << endl;
322  }  }
# Line 261  DataLazy::getBuffsRequired() const Line 334  DataLazy::getBuffsRequired() const
334  }  }
335    
336    
337    /*
338      \brief Evaluates the expression using methods on Data.
339      This does the work for the collapse method.
340      For reasons of efficiency do not call this method on DataExpanded nodes.
341    */
342  DataReady_ptr  DataReady_ptr
343  DataLazy::collapseToReady()  DataLazy::collapseToReady()
344  {  {
# Line 380  DataLazy::collapseToReady() Line 458  DataLazy::collapseToReady()
458    return result.borrowReadyPtr();    return result.borrowReadyPtr();
459  }  }
460    
461    /*
462       \brief Converts the DataLazy into an IDENTITY storing the value of the expression.
463       This method uses the original methods on the Data class to evaluate the expressions.
464       For this reason, it should not be used on DataExpanded instances. (To do so would defeat
465       the purpose of using DataLazy in the first place).
466    */
467  void  void
468  DataLazy::collapse()  DataLazy::collapse()
469  {  {
# Line 395  DataLazy::collapse() Line 479  DataLazy::collapse()
479    m_op=IDENTITY;    m_op=IDENTITY;
480  }  }
481    
482    /*
483      \brief Compute the value of the expression (binary operation) for the given sample.
484      \return Vector which stores the value of the subexpression for the given sample.
485      \param v A vector to store intermediate results.
486      \param offset Index in v to begin storing results.
487      \param sampleNo Sample number to evaluate.
488      \param roffset (output parameter) the offset in the return vector where the result begins.
489    
490      The return value will be an existing vector so do not deallocate it.
491      If the result is stored in v it should be stored at the offset given.
492      Everything from offset to the end of v should be considered available for this method to use.
493    */
494  DataTypes::ValueType*  DataTypes::ValueType*
495  DataLazy::resolveUnary(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const  DataLazy::resolveUnary(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const
496  {  {
# Line 516  DataLazy::resolveUnary(ValueType& v, siz Line 612  DataLazy::resolveUnary(ValueType& v, siz
612    
613    
614    
615  // const double*  
 // DataLazy::resolveUnary(ValueType& v,int sampleNo,  size_t offset) const  
 // {  
 //  // we assume that any collapsing has been done before we get here  
 //  // since we only have one argument we don't need to think about only  
 //  // processing single points.  
 //   if (m_readytype!='E')  
 //   {  
 //     throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");  
 //   }  
 //   const double* left=m_left->resolveSample(v,sampleNo,offset);  
 //   double* result=&(v[offset]);  
 //   switch (m_op)  
 //   {  
 //     case SIN:      
 //  tensor_unary_operation(m_samplesize, left, result, ::sin);  
 //  break;  
 //     case COS:  
 //  tensor_unary_operation(m_samplesize, left, result, ::cos);  
 //  break;  
 //     case TAN:  
 //  tensor_unary_operation(m_samplesize, left, result, ::tan);  
 //  break;  
 //     case ASIN:  
 //  tensor_unary_operation(m_samplesize, left, result, ::asin);  
 //  break;  
 //     case ACOS:  
 //  tensor_unary_operation(m_samplesize, left, result, ::acos);  
 //  break;  
 //     case ATAN:  
 //  tensor_unary_operation(m_samplesize, left, result, ::atan);  
 //  break;  
 //     case SINH:  
 //  tensor_unary_operation(m_samplesize, left, result, ::sinh);  
 //  break;  
 //     case COSH:  
 //  tensor_unary_operation(m_samplesize, left, result, ::cosh);  
 //  break;  
 //     case TANH:  
 //  tensor_unary_operation(m_samplesize, left, result, ::tanh);  
 //  break;  
 //     case ERF:  
 // #ifdef _WIN32  
 //  throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");  
 // #else  
 //  tensor_unary_operation(m_samplesize, left, result, ::erf);  
 //  break;  
 // #endif  
 //    case ASINH:  
 // #ifdef _WIN32  
 //  tensor_unary_operation(m_samplesize, left, result, escript::asinh_substitute);  
 // #else  
 //  tensor_unary_operation(m_samplesize, left, result, ::asinh);  
 // #endif    
 //  break;  
 //    case ACOSH:  
 // #ifdef _WIN32  
 //  tensor_unary_operation(m_samplesize, left, result, escript::acosh_substitute);  
 // #else  
 //  tensor_unary_operation(m_samplesize, left, result, ::acosh);  
 // #endif    
 //  break;  
 //    case ATANH:  
 // #ifdef _WIN32  
 //  tensor_unary_operation(m_samplesize, left, result, escript::atanh_substitute);  
 // #else  
 //  tensor_unary_operation(m_samplesize, left, result, ::atanh);  
 // #endif    
 //  break;  
 //     case LOG10:  
 //  tensor_unary_operation(m_samplesize, left, result, ::log10);  
 //  break;  
 //     case LOG:  
 //  tensor_unary_operation(m_samplesize, left, result, ::log);  
 //  break;  
 //     case SIGN:  
 //  tensor_unary_operation(m_samplesize, left, result, escript::fsign);  
 //  break;  
 //     case ABS:  
 //  tensor_unary_operation(m_samplesize, left, result, ::fabs);  
 //  break;  
 //     case NEG:  
 //  tensor_unary_operation(m_samplesize, left, result, negate<double>());  
 //  break;  
 //     case POS:  
 //  // it doesn't mean anything for delayed.  
 //  // it will just trigger a deep copy of the lazy object  
 //  throw DataException("Programmer error - POS not supported for lazy data.");  
 //  break;  
 //     case EXP:  
 //  tensor_unary_operation(m_samplesize, left, result, ::exp);  
 //  break;  
 //     case SQRT:  
 //  tensor_unary_operation(m_samplesize, left, result, ::sqrt);  
 //  break;  
 //     case RECIP:  
 //  tensor_unary_operation(m_samplesize, left, result, bind1st(divides<double>(),1.));  
 //  break;  
 //     case GZ:  
 //  tensor_unary_operation(m_samplesize, left, result, bind2nd(greater<double>(),0.0));  
 //  break;  
 //     case LZ:  
 //  tensor_unary_operation(m_samplesize, left, result, bind2nd(less<double>(),0.0));  
 //  break;  
 //     case GEZ:  
 //  tensor_unary_operation(m_samplesize, left, result, bind2nd(greater_equal<double>(),0.0));  
 //  break;  
 //     case LEZ:  
 //  tensor_unary_operation(m_samplesize, left, result, bind2nd(less_equal<double>(),0.0));  
 //  break;  
 //  
 //     default:  
 //  throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");  
 //   }  
 //   return result;  
 // }  
616    
617  #define PROC_OP(X) \  #define PROC_OP(X) \
618      for (int i=0;i<steps;++i,resultp+=getNoValues()) \      for (int i=0;i<steps;++i,resultp+=resultStep) \
619      { \      { \
620  cout << "Step#" << i << " chunk=" << chunksize << endl; \         tensor_binary_operation(chunksize, &((*left)[lroffset]), &((*right)[rroffset]), resultp, X); \
621  cout << left[0] << left[1] << left[2] << endl; \         lroffset+=leftStep; \
622  cout << right[0] << right[1] << right[2] << endl; \         rroffset+=rightStep; \
        tensor_binary_operation(chunksize, left, right, resultp, X); \  
        left+=leftStep; \  
        right+=rightStep; \  
 cout << "Result=" << result << " " << result[0] << result[1] << result[2] << endl; \  
623      }      }
624    
625    /*
626      \brief Compute the value of the expression (binary operation) for the given sample.
627      \return Vector which stores the value of the subexpression for the given sample.
628      \param v A vector to store intermediate results.
629      \param offset Index in v to begin storing results.
630      \param sampleNo Sample number to evaluate.
631      \param roffset (output parameter) the offset in the return vector where the result begins.
632    
633      The return value will be an existing vector so do not deallocate it.
634      If the result is stored in v it should be stored at the offset given.
635      Everything from offset to the end of v should be considered available for this method to use.
636    */
637    // This method assumes that any subexpressions which evaluate to Constant or Tagged Data
638    // have already been collapsed to IDENTITY. So we must have at least one expanded child.
639    // If both children are expanded, then we can process them in a single operation (we treat
640    // the whole sample as one big datapoint.
641    // If one of the children is not expanded, then we need to treat each point in the sample
642    // individually.
643    // There is an additional complication when scalar operations are considered.
644    // For example, 2+Vector.
645    // In this case each double within the point is treated individually
646  DataTypes::ValueType*  DataTypes::ValueType*
647  DataLazy::resolveBinary(ValueType& v,  size_t offset ,int sampleNo, size_t& roffset) const  DataLazy::resolveBinary(ValueType& v,  size_t offset, int sampleNo, size_t& roffset) const
648  {  {
     // again we assume that all collapsing has already been done  
     // so we have at least one expanded child.  
     // however, we could still have one of the children being not expanded.  
   
649  cout << "Resolve binary: " << toString() << endl;  cout << "Resolve binary: " << toString() << endl;
650    
651    size_t lroffset=0, rroffset=0;    size_t lroffset=0, rroffset=0;    // offsets in the left and right result vectors
652        // first work out which of the children are expanded
653    bool leftExp=(m_left->m_readytype=='E');    bool leftExp=(m_left->m_readytype=='E');
654    bool rightExp=(m_right->m_readytype=='E');    bool rightExp=(m_right->m_readytype=='E');
655    bool bigloops=((leftExp && rightExp) || (!leftExp && !rightExp)); // is processing in single step    bool bigloops=((leftExp && rightExp) || (!leftExp && !rightExp)); // is processing in single step?
656    int steps=(bigloops?1:getNumDPPSample());    int steps=(bigloops?1:getNumDPPSample());
657    size_t chunksize=(bigloops? m_samplesize : getNoValues());    size_t chunksize=(bigloops? m_samplesize : getNoValues());    // if bigloops, pretend the whole sample is a datapoint
658    int leftStep=((leftExp && !rightExp)? getNoValues() : 0);    if (m_left->getRank()!=m_right->getRank())    // need to deal with scalar * ? ops
659    int rightStep=((rightExp && !leftExp)? getNoValues() : 0);    {
660        EsysAssert((m_left->getRank()==0) || (m_right->getRank()==0), "Error - Ranks must match unless one is 0.");
661        steps=getNumDPPSample()*max(m_left->getNoValues(),m_right->getNoValues());
662        chunksize=1;    // for scalar
663      }    
664      int leftStep=((leftExp && !rightExp)? m_right->getNoValues() : 0);
665      int rightStep=((rightExp && !leftExp)? m_left->getNoValues() : 0);
666      int resultStep=max(leftStep,rightStep);   // only one (at most) should be !=0
667        // Get the values of sub-expressions
668    const ValueType* left=m_left->resolveSample(v,offset,sampleNo,lroffset);    const ValueType* left=m_left->resolveSample(v,offset,sampleNo,lroffset);
669    const ValueType* right=m_right->resolveSample(v,offset,sampleNo,rroffset);        const ValueType* right=m_right->resolveSample(v,offset+m_samplesize,sampleNo,rroffset); // Note
670      // now we need to know which args are expanded      // the right child starts further along.
671  cout << "left=" << left << " right=" << right << endl;    double* resultp=&(v[offset]);     // results are stored at the vector offset we recieved
 cout << "(Length) l=" << left->size() << " r=" << right->size() << " res=" << v.size() << endl;  
   double* resultp=&(v[offset]);  
672    switch(m_op)    switch(m_op)
673    {    {
674      case ADD:      case ADD:
675      for (int i=0;i<steps;++i,resultp+=getNoValues())      PROC_OP(plus<double>());
676      {      break;
677  cerr << "Step#" << i << " chunk=" << chunksize << endl;      case SUB:
678  cerr << left << "[" << lroffset << "] " << right << "[" << rroffset << "]" << endl;      PROC_OP(minus<double>());
679         tensor_binary_operation(chunksize, &((*left)[lroffset]), &((*right)[rroffset]), resultp, plus<double>());      break;
680         lroffset+=leftStep;      case MUL:
681         rroffset+=rightStep;      PROC_OP(multiplies<double>());
682  cerr << "left=" << lroffset << " right=" << rroffset << endl;      break;
683      }      case DIV:
684        PROC_OP(divides<double>());
685        break;
686        case POW:
687        PROC_OP(::pow);
688      break;      break;
 // need to fill in the rest  
689      default:      default:
690      throw DataException("Programmer error - resolveBinary can not resolve operator "+opToString(m_op)+".");      throw DataException("Programmer error - resolveBinary can not resolve operator "+opToString(m_op)+".");
691    }    }
692    roffset=offset;    roffset=offset;  
693    return &v;    return &v;
694  }  }
695    
696    
697    
698  // #define PROC_OP(X) \  /*
699  //  for (int i=0;i<steps;++i,resultp+=getNoValues()) \    \brief Compute the value of the expression for the given sample.
700  //  { \    \return Vector which stores the value of the subexpression for the given sample.
701  // cout << "Step#" << i << " chunk=" << chunksize << endl; \    \param v A vector to store intermediate results.
702  // cout << left[0] << left[1] << left[2] << endl; \    \param offset Index in v to begin storing results.
703  // cout << right[0] << right[1] << right[2] << endl; \    \param sampleNo Sample number to evaluate.
704  //     tensor_binary_operation(chunksize, left, right, resultp, X); \    \param roffset (output parameter) the offset in the return vector where the result begins.
 //     left+=leftStep; \  
 //     right+=rightStep; \  
 // cout << "Result=" << result << " " << result[0] << result[1] << result[2] << endl; \  
 //  }  
 //  
 // const double*  
 // DataLazy::resolveBinary(ValueType& v,int sampleNo,  size_t offset) const  
 // {  
 //  // again we assume that all collapsing has already been done  
 //  // so we have at least one expanded child.  
 //  // however, we could still have one of the children being not expanded.  
 //  
 // cout << "Resolve binary: " << toString() << endl;  
 //  
 //   const double* left=m_left->resolveSample(v,sampleNo,offset);  
 // // cout << "Done Left " << /*left[0] << left[1] << left[2] << */endl;  
 //   const double* right=m_right->resolveSample(v,sampleNo,offset);  
 // // cout << "Done Right"  << /*right[0] << right[1] << right[2] <<*/ endl;  
 //      // now we need to know which args are expanded  
 //   bool leftExp=(m_left->m_readytype=='E');  
 //   bool rightExp=(m_right->m_readytype=='E');  
 //   bool bigloops=((leftExp && rightExp) || (!leftExp && !rightExp));  // is processing in single step  
 //   int steps=(bigloops?1:getNumSamples());  
 //   size_t chunksize=(bigloops? m_samplesize : getNoValues());  
 //   int leftStep=((leftExp && !rightExp)? getNoValues() : 0);  
 //   int rightStep=((rightExp && !leftExp)? getNoValues() : 0);  
 // cout << "left=" << left << " right=" << right << endl;  
 //   double* result=&(v[offset]);  
 //   double* resultp=result;  
 //   switch(m_op)  
 //   {  
 //     case ADD:  
 //  for (int i=0;i<steps;++i,resultp+=getNoValues())  
 //  {  
 // cout << "Step#" << i << " chunk=" << chunksize << endl; \  
 // // cout << left[0] << left[1] << left[2] << endl;  
 // // cout << right[0] << right[1] << right[2] << endl;  
 //     tensor_binary_operation(chunksize, left, right, resultp, plus<double>());  
 // cout << "left=" << left << " right=" << right << " resp=" << resultp << endl;  
 //     left+=leftStep;  
 //     right+=rightStep;  
 // cout << "left=" << left << " right=" << right << endl;  
 // // cout << "Result=" << result << " " << result[0] << result[1] << result[2] << endl;  
 //  }  
 //  break;  
 // // need to fill in the rest  
 //     default:  
 //  throw DataException("Programmer error - resolveBinay can not resolve operator "+opToString(m_op)+".");  
 //   }  
 // // cout << "About to return "  << result[0] << result[1] << result[2] << endl;;  
 //   return result;  
 // }  
   
 // // the vector and the offset are a place where the method could write its data if it wishes  
 // // it is not obligated to do so. For example, if it has its own storage already, it can use that.  
 // // Hence the return value to indicate where the data is actually stored.  
 // // Regardless, the storage should be assumed to be used, even if it isn't.  
 // const double*  
 // DataLazy::resolveSample(ValueType& v,int sampleNo,  size_t offset )  
 // {  
 // cout << "Resolve sample " << toString() << endl;  
 //  // collapse so we have a 'E' node or an IDENTITY for some other type  
 //   if (m_readytype!='E' && m_op!=IDENTITY)  
 //   {  
 //  collapse();  
 //   }  
 //   if (m_op==IDENTITY)      
 //   {  
 //     const ValueType& vec=m_id->getVector();  
 //     if (m_readytype=='C')  
 //     {  
 //  return &(vec[0]);  
 //     }  
 //     return &(vec[m_id->getPointOffset(sampleNo, 0)]);  
 //   }  
 //   if (m_readytype!='E')  
 //   {  
 //     throw DataException("Programmer Error - Collapse did not produce an expanded node.");  
 //   }  
 //   switch (getOpgroup(m_op))  
 //   {  
 //   case G_UNARY: return resolveUnary(v,sampleNo,offset);  
 //   case G_BINARY: return resolveBinary(v,sampleNo,offset);  
 //   default:  
 //     throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");  
 //   }  
 // }  
   
   
705    
706      The return value will be an existing vector so do not deallocate it.
707    */
708  // the vector and the offset are a place where the method could write its data if it wishes  // the vector and the offset are a place where the method could write its data if it wishes
709  // it is not obligated to do so. For example, if it has its own storage already, it can use that.  // it is not obligated to do so. For example, if it has its own storage already, it can use that.
710  // Hence the return value to indicate where the data is actually stored.  // Hence the return value to indicate where the data is actually stored.
# Line 830  cout << "Resolve sample " << toString() Line 745  cout << "Resolve sample " << toString()
745  }  }
746    
747    
748    // To simplify the memory management, all threads operate on one large vector, rather than one each.
749    // Each sample is evaluated independently and copied into the result DataExpanded.
 // This version uses double* trying again with vectors  
 // DataReady_ptr  
 // DataLazy::resolve()  
 // {  
 //  
 // cout << "Sample size=" << m_samplesize << endl;  
 // cout << "Buffers=" << m_buffsRequired << endl;  
 //  
 //   if (m_readytype!='E')  
 //   {  
 //     collapse();  
 //   }  
 //   if (m_op==IDENTITY)  
 //   {  
 //     return m_id;  
 //   }  
 //      // from this point on we must have m_op!=IDENTITY and m_readytype=='E'  
 //   size_t threadbuffersize=m_samplesize*(max(1,m_buffsRequired)+1);  
 //   int numthreads=1;  
 // #ifdef _OPENMP  
 //   numthreads=getNumberOfThreads();  
 //   int threadnum=0;  
 // #endif  
 //   ValueType v(numthreads*threadbuffersize);    
 // cout << "Buffer created with size=" << v.size() << endl;  
 //   DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(),  ValueType(getNoValues()));  
 //   ValueType& resvec=result->getVector();  
 //   DataReady_ptr resptr=DataReady_ptr(result);  
 //   int sample;  
 //   int resoffset;  
 //   int totalsamples=getNumSamples();  
 //   const double* res=0;  
 //   #pragma omp parallel for private(sample,resoffset,threadnum,res) schedule(static)  
 //   for (sample=0;sample<totalsamples;++sample)  
 //   {  
 // cout << "################################# " << sample << endl;  
 // #ifdef _OPENMP  
 //     res=resolveSample(v,sample,threadbuffersize*omp_get_thread_num());  
 // #else  
 //     res=resolveSample(v,sample,0);   // this would normally be v, but not if its a single IDENTITY op.  
 // #endif  
 // cerr << "-------------------------------- " << endl;  
 //     resoffset=result->getPointOffset(sample,0);  
 // cerr << "offset=" << resoffset << endl;  
 //     for (unsigned int i=0;i<m_samplesize;++i,++resoffset)    // copy values into the output vector  
 //     {  
 //  resvec[resoffset]=res[i];  
 //     }  
 // cerr << "*********************************" << endl;  
 //   }  
 //   return resptr;  
 // }  
   
   
750  DataReady_ptr  DataReady_ptr
751  DataLazy::resolve()  DataLazy::resolve()
752  {  {
# Line 893  DataLazy::resolve() Line 754  DataLazy::resolve()
754  cout << "Sample size=" << m_samplesize << endl;  cout << "Sample size=" << m_samplesize << endl;
755  cout << "Buffers=" << m_buffsRequired << endl;  cout << "Buffers=" << m_buffsRequired << endl;
756    
757    if (m_readytype!='E')    if (m_readytype!='E')     // if the whole sub-expression is Constant or Tagged, then evaluate it normally
758    {    {
759      collapse();      collapse();
760    }    }
761    if (m_op==IDENTITY)    if (m_op==IDENTITY)       // So a lazy expression of Constant or Tagged data will be returned here.
762    {    {
763      return m_id;      return m_id;
764    }    }
765      // from this point on we must have m_op!=IDENTITY and m_readytype=='E'      // from this point on we must have m_op!=IDENTITY and m_readytype=='E'
766    size_t threadbuffersize=m_samplesize*(max(1,m_buffsRequired)+1);    size_t threadbuffersize=m_samplesize*(max(1,m_buffsRequired));    // Each thread needs to have enough
767        // storage to evaluate its expression
768    int numthreads=1;    int numthreads=1;
769  #ifdef _OPENMP  #ifdef _OPENMP
770    numthreads=getNumberOfThreads();    numthreads=getNumberOfThreads();
# Line 916  cout << "Buffer created with size=" << v Line 778  cout << "Buffer created with size=" << v
778    int sample;    int sample;
779    size_t outoffset;     // offset in the output data    size_t outoffset;     // offset in the output data
780    int totalsamples=getNumSamples();    int totalsamples=getNumSamples();
781    const ValueType* res=0;    const ValueType* res=0;   // Vector storing the answer
782    size_t resoffset=0;    size_t resoffset=0;       // where in the vector to find the answer
783    #pragma omp parallel for private(sample,resoffset,outoffset,threadnum,res) schedule(static)    #pragma omp parallel for private(sample,resoffset,outoffset,threadnum,res) schedule(static)
784    for (sample=0;sample<totalsamples;++sample)    for (sample=0;sample<totalsamples;++sample)
785    {    {
# Line 925  cout << "############################### Line 787  cout << "###############################
787  #ifdef _OPENMP  #ifdef _OPENMP
788      res=resolveSample(v,threadbuffersize*omp_get_thread_num(),sample,resoffset);      res=resolveSample(v,threadbuffersize*omp_get_thread_num(),sample,resoffset);
789  #else  #else
790      res=resolveSample(v,0,sample,resoffset);   // this would normally be v, but not if its a single IDENTITY op.      res=resolveSample(v,0,sample,resoffset);   // res would normally be v, but not if its a single IDENTITY op.
791  #endif  #endif
792  cerr << "-------------------------------- " << endl;  cerr << "-------------------------------- " << endl;
793      outoffset=result->getPointOffset(sample,0);      outoffset=result->getPointOffset(sample,0);
# Line 948  DataLazy::toString() const Line 810  DataLazy::toString() const
810    return oss.str();    return oss.str();
811  }  }
812    
813    
814  void  void
815  DataLazy::intoString(ostringstream& oss) const  DataLazy::intoString(ostringstream& oss) const
816  {  {
# Line 989  DataLazy::intoString(ostringstream& oss) Line 852  DataLazy::intoString(ostringstream& oss)
852    }    }
853  }  }
854    
 // Note that in this case, deepCopy does not make copies of the leaves.  
 // Hopefully copy on write (or whatever we end up using) will take care of this.  
855  DataAbstract*  DataAbstract*
856  DataLazy::deepCopy()  DataLazy::deepCopy()
857  {  {
858    if (m_op==IDENTITY)    switch (getOpgroup(m_op))
859    {    {
860      return new DataLazy(m_left);    // we don't need to copy the child here    case G_IDENTITY:  return new DataLazy(m_id->deepCopy()->getPtr());
861      case G_UNARY: return new DataLazy(m_left->deepCopy()->getPtr(),m_op);
862      case G_BINARY:    return new DataLazy(m_left->deepCopy()->getPtr(),m_right->deepCopy()->getPtr(),m_op);
863      default:
864        throw DataException("Programmer error - do not know how to deepcopy operator "+opToString(m_op)+".");
865    }    }
   return new DataLazy(m_left->deepCopy()->getPtr(),m_right->deepCopy()->getPtr(),m_op);  
866  }  }
867    
868    
# Line 1015  DataLazy::getSlice(const DataTypes::Regi Line 879  DataLazy::getSlice(const DataTypes::Regi
879    throw DataException("getSlice - not implemented for Lazy objects.");    throw DataException("getSlice - not implemented for Lazy objects.");
880  }  }
881    
882    
883    // To do this we need to rely on our child nodes
884    DataTypes::ValueType::size_type
885    DataLazy::getPointOffset(int sampleNo,
886                     int dataPointNo)
887    {
888      if (m_op==IDENTITY)
889      {
890        return m_id->getPointOffset(sampleNo,dataPointNo);
891      }
892      if (m_readytype!='E')
893      {
894        collapse();
895        return m_id->getPointOffset(sampleNo,dataPointNo);
896      }
897      // at this point we do not have an identity node and the expression will be Expanded
898      // so we only need to know which child to ask
899      if (m_left->m_readytype=='E')
900      {
901        return m_left->getPointOffset(sampleNo,dataPointNo);
902      }
903      else
904      {
905        return m_right->getPointOffset(sampleNo,dataPointNo);
906      }
907    }
908    
909    // To do this we need to rely on our child nodes
910  DataTypes::ValueType::size_type  DataTypes::ValueType::size_type
911  DataLazy::getPointOffset(int sampleNo,  DataLazy::getPointOffset(int sampleNo,
912                   int dataPointNo) const                   int dataPointNo) const
913  {  {
914    throw DataException("getPointOffset - not implemented for Lazy objects - yet.");    if (m_op==IDENTITY)
915      {
916        return m_id->getPointOffset(sampleNo,dataPointNo);
917      }
918      if (m_readytype=='E')
919      {
920        // at this point we do not have an identity node and the expression will be Expanded
921        // so we only need to know which child to ask
922        if (m_left->m_readytype=='E')
923        {
924        return m_left->getPointOffset(sampleNo,dataPointNo);
925        }
926        else
927        {
928        return m_right->getPointOffset(sampleNo,dataPointNo);
929        }
930      }
931      if (m_readytype=='C')
932      {
933        return m_left->getPointOffset(sampleNo,dataPointNo); // which child doesn't matter
934      }
935      throw DataException("Programmer error - getPointOffset on lazy data may require collapsing (but this object is marked const).");
936    }
937    
938    // It would seem that DataTagged will need to be treated differently since even after setting all tags
939    // to zero, all the tags from all the DataTags would be in the result.
940    // However since they all have the same value (0) whether they are there or not should not matter.
941    // So I have decided that for all types this method will create a constant 0.
942    // It can be promoted up as required.
943    // A possible efficiency concern might be expanded->constant->expanded which has an extra memory management
944    // but we can deal with that if it arrises.
945    void
946    DataLazy::setToZero()
947    {
948      DataTypes::ValueType v(getNoValues(),0);
949      m_id=DataReady_ptr(new DataConstant(getFunctionSpace(),getShape(),v));
950      m_op=IDENTITY;
951      m_right.reset();  
952      m_left.reset();
953      m_readytype='C';
954      m_buffsRequired=1;
955  }  }
956    
957  }   // end namespace  }   // end namespace

Legend:
Removed from v.1898  
changed lines
  Added in v.1943

  ViewVC Help
Powered by ViewVC 1.1.26