/[escript]/branches/clazy/escriptcore/src/DataLazy.cpp
ViewVC logotype

Diff of /branches/clazy/escriptcore/src/DataLazy.cpp

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

branches/schroedinger/escript/src/DataLazy.cpp revision 1910 by jfenwick, Thu Oct 23 03:05:28 2008 UTC trunk/escript/src/DataLazy.cpp revision 2084 by jfenwick, Fri Nov 21 05:20:42 2008 UTC
# Line 26  Line 26 
26  #include "DataTypes.h"  #include "DataTypes.h"
27  #include "Data.h"  #include "Data.h"
28  #include "UnaryFuncs.h"     // for escript::fsign  #include "UnaryFuncs.h"     // for escript::fsign
29    #include "Utils.h"
30    
31  /*  /*
32  How does DataLazy work?  How does DataLazy work?
# Line 47  I will refer to individual DataLazy obje Line 48  I will refer to individual DataLazy obje
48  Each node also stores:  Each node also stores:
49  - m_readytype \in {'E','T','C','?'} ~ indicates what sort of DataReady would be produced if the expression was  - m_readytype \in {'E','T','C','?'} ~ indicates what sort of DataReady would be produced if the expression was
50      evaluated.      evaluated.
 - m_length ~ how many values would be stored in the answer if the expression was evaluated.  
51  - m_buffsrequired ~ the larged number of samples which would need to be kept simultaneously in order to  - m_buffsrequired ~ the larged number of samples which would need to be kept simultaneously in order to
52      evaluate the expression.      evaluate the expression.
53  - m_samplesize ~ the number of doubles stored in a sample.  - m_samplesize ~ the number of doubles stored in a sample.
# Line 69  The convention that I use, is that the r Line 69  The convention that I use, is that the r
69    
70  For expressions which evaluate to Constant or Tagged, there is a different evaluation method.  For expressions which evaluate to Constant or Tagged, there is a different evaluation method.
71  The collapse method invokes the (non-lazy) operations on the Data class to evaluate the expression.  The collapse method invokes the (non-lazy) operations on the Data class to evaluate the expression.
72    
73    To add a new operator you need to do the following (plus anything I might have forgotten):
74    1) Add to the ES_optype.
75    2) determine what opgroup your operation belongs to (X)
76    3) add a string for the op to the end of ES_opstrings
77    4) increase ES_opcount
78    5) add an entry (X) to opgroups
79    6) add an entry to the switch in collapseToReady
80    7) add an entry to resolveX
81  */  */
82    
83    
# Line 78  using namespace boost; Line 87  using namespace boost;
87  namespace escript  namespace escript
88  {  {
89    
 const std::string&  
 opToString(ES_optype op);  
   
90  namespace  namespace
91  {  {
92    
# Line 89  enum ES_opgroup Line 95  enum ES_opgroup
95     G_UNKNOWN,     G_UNKNOWN,
96     G_IDENTITY,     G_IDENTITY,
97     G_BINARY,        // pointwise operations with two arguments     G_BINARY,        // pointwise operations with two arguments
98     G_UNARY      // pointwise operations with one argument     G_UNARY,     // pointwise operations with one argument
99       G_NP1OUT,        // non-pointwise op with one output
100       G_NP1OUT_P,      // non-pointwise op with one output requiring a parameter
101       G_TENSORPROD     // general tensor product
102  };  };
103    
104    
# Line 100  string ES_opstrings[]={"UNKNOWN","IDENTI Line 109  string ES_opstrings[]={"UNKNOWN","IDENTI
109              "asin","acos","atan","sinh","cosh","tanh","erf",              "asin","acos","atan","sinh","cosh","tanh","erf",
110              "asinh","acosh","atanh",              "asinh","acosh","atanh",
111              "log10","log","sign","abs","neg","pos","exp","sqrt",              "log10","log","sign","abs","neg","pos","exp","sqrt",
112              "1/","where>0","where<0","where>=0","where<=0"};              "1/","where>0","where<0","where>=0","where<=0",
113  int ES_opcount=32;              "symmetric","nonsymmetric",
114                "prod",
115                "transpose",
116                "trace"};
117    int ES_opcount=38;
118  ES_opgroup opgroups[]={G_UNKNOWN,G_IDENTITY,G_BINARY,G_BINARY,G_BINARY,G_BINARY, G_BINARY,  ES_opgroup opgroups[]={G_UNKNOWN,G_IDENTITY,G_BINARY,G_BINARY,G_BINARY,G_BINARY, G_BINARY,
119              G_UNARY,G_UNARY,G_UNARY, //10              G_UNARY,G_UNARY,G_UNARY, //10
120              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,    // 17              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,    // 17
121              G_UNARY,G_UNARY,G_UNARY,                    // 20              G_UNARY,G_UNARY,G_UNARY,                    // 20
122              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,        // 28              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,        // 28
123              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY};              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,            // 33
124                G_NP1OUT,G_NP1OUT,
125                G_TENSORPROD,
126                G_NP1OUT_P, G_NP1OUT_P};
127  inline  inline
128  ES_opgroup  ES_opgroup
129  getOpgroup(ES_optype op)  getOpgroup(ES_optype op)
# Line 124  resultFS(DataAbstract_ptr left, DataAbst Line 140  resultFS(DataAbstract_ptr left, DataAbst
140      // that way, if interpolate is required in any other op we can just throw a      // that way, if interpolate is required in any other op we can just throw a
141      // programming error exception.      // programming error exception.
142    
143      FunctionSpace l=left->getFunctionSpace();
144      if (left->getFunctionSpace()!=right->getFunctionSpace())    FunctionSpace r=right->getFunctionSpace();
145      {    if (l!=r)
146          throw DataException("FunctionSpaces not equal - interpolation not supported on lazy data.");    {
147      }      if (r.probeInterpolation(l))
148      return left->getFunctionSpace();      {
149        return l;
150        }
151        if (l.probeInterpolation(r))
152        {
153        return r;
154        }
155        throw DataException("Cannot interpolate between the FunctionSpaces given for operation "+opToString(op)+".");
156      }
157      return l;
158  }  }
159    
160  // return the shape of the result of "left op right"  // return the shape of the result of "left op right"
161    // the shapes resulting from tensor product are more complex to compute so are worked out elsewhere
162  DataTypes::ShapeType  DataTypes::ShapeType
163  resultShape(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)  resultShape(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
164  {  {
165      if (left->getShape()!=right->getShape())      if (left->getShape()!=right->getShape())
166      {      {
167        if (getOpgroup(op)!=G_BINARY)        if ((getOpgroup(op)!=G_BINARY) && (getOpgroup(op)!=G_NP1OUT))
168        {        {
169          throw DataException("Shapes not the name - shapes must match for (point)binary operations.");          throw DataException("Shapes not the name - shapes must match for (point)binary operations.");
170        }        }
# Line 155  resultShape(DataAbstract_ptr left, DataA Line 181  resultShape(DataAbstract_ptr left, DataA
181      return left->getShape();      return left->getShape();
182  }  }
183    
184  // determine the number of points in the result of "left op right"  // return the shape for "op left"
185  size_t  
186  resultLength(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)  DataTypes::ShapeType
187    resultShape(DataAbstract_ptr left, ES_optype op)
188  {  {
189     switch (getOpgroup(op))      switch(op)
190     {      {
191     case G_BINARY: return left->getLength();          case TRANS:
192     case G_UNARY: return left->getLength();          return left->getShape();
193     default:      break;
194      throw DataException("Programmer Error - attempt to getLength() for operator "+opToString(op)+".");      case TRACE:
195     }          return DataTypes::scalarShape;
196        break;
197            default:
198    cout << op << endl;
199        throw DataException("Programmer error - resultShape(left,op) can't compute shapes for operator "+opToString(op)+".");
200        }
201  }  }
202    
203    // determine the output shape for the general tensor product operation
204    // the additional parameters return information required later for the product
205    // the majority of this code is copy pasted from C_General_Tensor_Product
206    DataTypes::ShapeType
207    GTPShape(DataAbstract_ptr left, DataAbstract_ptr right, int axis_offset, int transpose, int& SL, int& SM, int& SR)
208    {
209        
210      // Get rank and shape of inputs
211      int rank0 = left->getRank();
212      int rank1 = right->getRank();
213      const DataTypes::ShapeType& shape0 = left->getShape();
214      const DataTypes::ShapeType& shape1 = right->getShape();
215    
216      // Prepare for the loops of the product and verify compatibility of shapes
217      int start0=0, start1=0;
218      if (transpose == 0)       {}
219      else if (transpose == 1)  { start0 = axis_offset; }
220      else if (transpose == 2)  { start1 = rank1-axis_offset; }
221      else              { throw DataException("DataLazy GeneralTensorProduct Constructor: Error - transpose should be 0, 1 or 2"); }
222    
223    
224      // Adjust the shapes for transpose
225      DataTypes::ShapeType tmpShape0(rank0);    // pre-sizing the vectors rather
226      DataTypes::ShapeType tmpShape1(rank1);    // than using push_back
227      for (int i=0; i<rank0; i++)   { tmpShape0[i]=shape0[(i+start0)%rank0]; }
228      for (int i=0; i<rank1; i++)   { tmpShape1[i]=shape1[(i+start1)%rank1]; }
229    
230      // Prepare for the loops of the product
231      SL=1, SM=1, SR=1;
232      for (int i=0; i<rank0-axis_offset; i++)   {
233        SL *= tmpShape0[i];
234      }
235      for (int i=rank0-axis_offset; i<rank0; i++)   {
236        if (tmpShape0[i] != tmpShape1[i-(rank0-axis_offset)]) {
237          throw DataException("C_GeneralTensorProduct: Error - incompatible shapes");
238        }
239        SM *= tmpShape0[i];
240      }
241      for (int i=axis_offset; i<rank1; i++)     {
242        SR *= tmpShape1[i];
243      }
244    
245      // Define the shape of the output (rank of shape is the sum of the loop ranges below)
246      DataTypes::ShapeType shape2(rank0+rank1-2*axis_offset);  
247      {         // block to limit the scope of out_index
248         int out_index=0;
249         for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z
250         for (int i=axis_offset; i<rank1; i++, ++out_index)   { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z
251      }
252      return shape2;
253    }
254    
255    
256    // determine the number of points in the result of "left op right"
257    // note that determining the resultLength for G_TENSORPROD is more complex and will not be processed here
258    // size_t
259    // resultLength(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
260    // {
261    //    switch (getOpgroup(op))
262    //    {
263    //    case G_BINARY: return left->getLength();
264    //    case G_UNARY: return left->getLength();
265    //    case G_NP1OUT: return left->getLength();
266    //    default:
267    //  throw DataException("Programmer Error - attempt to getLength() for operator "+opToString(op)+".");
268    //    }
269    // }
270    
271  // determine the number of samples requires to evaluate an expression combining left and right  // determine the number of samples requires to evaluate an expression combining left and right
272    // NP1OUT needs an extra buffer because we can't write the answers over the top of the input.
273    // The same goes for G_TENSORPROD
274  int  int
275  calcBuffs(const DataLazy_ptr& left, const DataLazy_ptr& right, ES_optype op)  calcBuffs(const DataLazy_ptr& left, const DataLazy_ptr& right, ES_optype op)
276  {  {
# Line 177  calcBuffs(const DataLazy_ptr& left, cons Line 279  calcBuffs(const DataLazy_ptr& left, cons
279     case G_IDENTITY: return 1;     case G_IDENTITY: return 1;
280     case G_BINARY: return max(left->getBuffsRequired(),right->getBuffsRequired()+1);     case G_BINARY: return max(left->getBuffsRequired(),right->getBuffsRequired()+1);
281     case G_UNARY: return max(left->getBuffsRequired(),1);     case G_UNARY: return max(left->getBuffsRequired(),1);
282       case G_NP1OUT: return 1+max(left->getBuffsRequired(),1);
283       case G_NP1OUT_P: return 1+max(left->getBuffsRequired(),1);
284       case G_TENSORPROD: return 1+max(left->getBuffsRequired(),right->getBuffsRequired()+1);
285     default:     default:
286      throw DataException("Programmer Error - attempt to calcBuffs() for operator "+opToString(op)+".");      throw DataException("Programmer Error - attempt to calcBuffs() for operator "+opToString(op)+".");
287     }     }
# Line 201  opToString(ES_optype op) Line 306  opToString(ES_optype op)
306    
307  DataLazy::DataLazy(DataAbstract_ptr p)  DataLazy::DataLazy(DataAbstract_ptr p)
308      : parent(p->getFunctionSpace(),p->getShape()),      : parent(p->getFunctionSpace(),p->getShape()),
309      m_op(IDENTITY)      m_op(IDENTITY),
310        m_axis_offset(0),
311        m_transpose(0),
312        m_SL(0), m_SM(0), m_SR(0)
313  {  {
314     if (p->isLazy())     if (p->isLazy())
315     {     {
# Line 218  DataLazy::DataLazy(DataAbstract_ptr p) Line 326  DataLazy::DataLazy(DataAbstract_ptr p)
326      else if (p->isTagged()) {m_readytype='T';}      else if (p->isTagged()) {m_readytype='T';}
327      else {throw DataException("Unknown DataReady instance in DataLazy constructor.");}      else {throw DataException("Unknown DataReady instance in DataLazy constructor.");}
328     }     }
    m_length=p->getLength();  
329     m_buffsRequired=1;     m_buffsRequired=1;
330     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
331       m_maxsamplesize=m_samplesize;
332  cout << "(1)Lazy created with " << m_samplesize << endl;  cout << "(1)Lazy created with " << m_samplesize << endl;
333  }  }
334    
# Line 229  cout << "(1)Lazy created with " << m_sam Line 337  cout << "(1)Lazy created with " << m_sam
337    
338  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op)  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op)
339      : parent(left->getFunctionSpace(),left->getShape()),      : parent(left->getFunctionSpace(),left->getShape()),
340      m_op(op)      m_op(op),
341        m_axis_offset(0),
342        m_transpose(0),
343        m_SL(0), m_SM(0), m_SR(0)
344  {  {
345     if (getOpgroup(op)!=G_UNARY)     if ((getOpgroup(op)!=G_UNARY) && (getOpgroup(op)!=G_NP1OUT))
346     {     {
347      throw DataException("Programmer error - constructor DataLazy(left, op) will only process UNARY operations.");      throw DataException("Programmer error - constructor DataLazy(left, op) will only process UNARY operations.");
348     }     }
349    
350     DataLazy_ptr lleft;     DataLazy_ptr lleft;
351     if (!left->isLazy())     if (!left->isLazy())
352     {     {
# Line 245  DataLazy::DataLazy(DataAbstract_ptr left Line 357  DataLazy::DataLazy(DataAbstract_ptr left
357      lleft=dynamic_pointer_cast<DataLazy>(left);      lleft=dynamic_pointer_cast<DataLazy>(left);
358     }     }
359     m_readytype=lleft->m_readytype;     m_readytype=lleft->m_readytype;
    m_length=left->getLength();  
360     m_left=lleft;     m_left=lleft;
361     m_buffsRequired=1;     m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point
362     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
363       m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());
364  }  }
365    
366    
367    // In this constructor we need to consider interpolation
368  DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)  DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
369      : parent(resultFS(left,right,op), resultShape(left,right,op)),      : parent(resultFS(left,right,op), resultShape(left,right,op)),
370      m_op(op)      m_op(op),
371        m_SL(0), m_SM(0), m_SR(0)
372  {  {
373     if (getOpgroup(op)!=G_BINARY)     if ((getOpgroup(op)!=G_BINARY))
374     {     {
375      throw DataException("Programmer error - constructor DataLazy(left, right, op) will only process BINARY operations.");      throw DataException("Programmer error - constructor DataLazy(left, right, op) will only process BINARY operations.");
376     }     }
377    
378       if (getFunctionSpace()!=left->getFunctionSpace())    // left needs to be interpolated
379       {
380        FunctionSpace fs=getFunctionSpace();
381        Data ltemp(left);
382        Data tmp(ltemp,fs);
383        left=tmp.borrowDataPtr();
384       }
385       if (getFunctionSpace()!=right->getFunctionSpace())   // right needs to be interpolated
386       {
387        Data tmp(Data(right),getFunctionSpace());
388        right=tmp.borrowDataPtr();
389       }
390       left->operandCheck(*right);
391    
392     if (left->isLazy())          // the children need to be DataLazy. Wrap them in IDENTITY if required     if (left->isLazy())          // the children need to be DataLazy. Wrap them in IDENTITY if required
393     {     {
394      m_left=dynamic_pointer_cast<DataLazy>(left);      m_left=dynamic_pointer_cast<DataLazy>(left);
# Line 290  DataLazy::DataLazy(DataAbstract_ptr left Line 419  DataLazy::DataLazy(DataAbstract_ptr left
419     {     {
420      m_readytype='C';      m_readytype='C';
421     }     }
422     m_length=resultLength(m_left,m_right,m_op);     m_samplesize=getNumDPPSample()*getNoValues();
423     m_samplesize=getNumDPPSample()*getNoValues();         m_maxsamplesize=max(max(m_samplesize,m_right->getMaxSampleSize()),m_left->getMaxSampleSize());  
424     m_buffsRequired=calcBuffs(m_left, m_right,m_op);     m_buffsRequired=calcBuffs(m_left, m_right,m_op);
425  cout << "(3)Lazy created with " << m_samplesize << endl;  cout << "(3)Lazy created with " << m_samplesize << endl;
426  }  }
427    
428    DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op, int axis_offset, int transpose)
429        : parent(resultFS(left,right,op), GTPShape(left,right, axis_offset, transpose, m_SL,m_SM, m_SR)),
430        m_op(op),
431        m_axis_offset(axis_offset),
432        m_transpose(transpose)
433    {
434       if ((getOpgroup(op)!=G_TENSORPROD))
435       {
436        throw DataException("Programmer error - constructor DataLazy(left, right, op, ax, tr) will only process BINARY operations which require parameters.");
437       }
438       if ((transpose>2) || (transpose<0))
439       {
440        throw DataException("DataLazy GeneralTensorProduct constructor: Error - transpose should be 0, 1 or 2");
441       }
442       if (getFunctionSpace()!=left->getFunctionSpace())    // left needs to be interpolated
443       {
444        FunctionSpace fs=getFunctionSpace();
445        Data ltemp(left);
446        Data tmp(ltemp,fs);
447        left=tmp.borrowDataPtr();
448       }
449       if (getFunctionSpace()!=right->getFunctionSpace())   // right needs to be interpolated
450       {
451        Data tmp(Data(right),getFunctionSpace());
452        right=tmp.borrowDataPtr();
453       }
454       left->operandCheck(*right);
455    
456       if (left->isLazy())          // the children need to be DataLazy. Wrap them in IDENTITY if required
457       {
458        m_left=dynamic_pointer_cast<DataLazy>(left);
459       }
460       else
461       {
462        m_left=DataLazy_ptr(new DataLazy(left));
463       }
464       if (right->isLazy())
465       {
466        m_right=dynamic_pointer_cast<DataLazy>(right);
467       }
468       else
469       {
470        m_right=DataLazy_ptr(new DataLazy(right));
471       }
472       char lt=m_left->m_readytype;
473       char rt=m_right->m_readytype;
474       if (lt=='E' || rt=='E')
475       {
476        m_readytype='E';
477       }
478       else if (lt=='T' || rt=='T')
479       {
480        m_readytype='T';
481       }
482       else
483       {
484        m_readytype='C';
485       }
486       m_samplesize=getNumDPPSample()*getNoValues();
487       m_maxsamplesize=max(max(m_samplesize,m_right->getMaxSampleSize()),m_left->getMaxSampleSize());  
488       m_buffsRequired=calcBuffs(m_left, m_right,m_op);
489    cout << "(4)Lazy created with " << m_samplesize << endl;
490    }
491    
492    
493    DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, int axis_offset)
494        : parent(left->getFunctionSpace(), resultShape(left,op)),
495        m_op(op),
496        m_axis_offset(axis_offset),
497        m_transpose(0)
498    {
499       if ((getOpgroup(op)!=G_NP1OUT_P))
500       {
501        throw DataException("Programmer error - constructor DataLazy(left, op, ax) will only process UNARY operations which require parameters.");
502       }
503       DataLazy_ptr lleft;
504       if (!left->isLazy())
505       {
506        lleft=DataLazy_ptr(new DataLazy(left));
507       }
508       else
509       {
510        lleft=dynamic_pointer_cast<DataLazy>(left);
511       }
512       m_readytype=lleft->m_readytype;
513       m_left=lleft;
514       m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point
515       m_samplesize=getNumDPPSample()*getNoValues();
516       m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());
517    cout << "(5)Lazy created with " << m_samplesize << endl;
518    }
519    
520    
521  DataLazy::~DataLazy()  DataLazy::~DataLazy()
522  {  {
# Line 309  DataLazy::getBuffsRequired() const Line 530  DataLazy::getBuffsRequired() const
530  }  }
531    
532    
533    size_t
534    DataLazy::getMaxSampleSize() const
535    {
536        return m_maxsamplesize;
537    }
538    
539  /*  /*
540    \brief Evaluates the expression using methods on Data.    \brief Evaluates the expression using methods on Data.
541    This does the work for the collapse method.    This does the work for the collapse method.
# Line 328  DataLazy::collapseToReady() Line 555  DataLazy::collapseToReady()
555    DataReady_ptr pleft=m_left->collapseToReady();    DataReady_ptr pleft=m_left->collapseToReady();
556    Data left(pleft);    Data left(pleft);
557    Data right;    Data right;
558    if (getOpgroup(m_op)==G_BINARY)    if ((getOpgroup(m_op)==G_BINARY) || (getOpgroup(m_op)==G_TENSORPROD))
559    {    {
560      right=Data(m_right->collapseToReady());      right=Data(m_right->collapseToReady());
561    }    }
# Line 427  DataLazy::collapseToReady() Line 654  DataLazy::collapseToReady()
654      case LEZ:      case LEZ:
655      result=left.whereNonPositive();      result=left.whereNonPositive();
656      break;      break;
657        case SYM:
658        result=left.symmetric();
659        break;
660        case NSYM:
661        result=left.nonsymmetric();
662        break;
663        case PROD:
664        result=C_GeneralTensorProduct(left,right,m_axis_offset, m_transpose);
665        break;
666        case TRANS:
667        result=left.transpose(m_axis_offset);
668        break;
669        case TRACE:
670        result=left.trace(m_axis_offset);
671        break;
672      default:      default:
673      throw DataException("Programmer error - do not know how to resolve operator "+opToString(m_op)+".");      throw DataException("Programmer error - collapseToReady does not know how to resolve operator "+opToString(m_op)+".");
674    }    }
675    return result.borrowReadyPtr();    return result.borrowReadyPtr();
676  }  }
# Line 455  DataLazy::collapse() Line 697  DataLazy::collapse()
697  }  }
698    
699  /*  /*
700    \brief Compute the value of the expression (binary operation) for the given sample.    \brief Compute the value of the expression (unary operation) for the given sample.
701    \return Vector which stores the value of the subexpression for the given sample.    \return Vector which stores the value of the subexpression for the given sample.
702    \param v A vector to store intermediate results.    \param v A vector to store intermediate results.
703    \param offset Index in v to begin storing results.    \param offset Index in v to begin storing results.
# Line 483  DataLazy::resolveUnary(ValueType& v, siz Line 725  DataLazy::resolveUnary(ValueType& v, siz
725    switch (m_op)    switch (m_op)
726    {    {
727      case SIN:        case SIN:  
728      tensor_unary_operation(m_samplesize, left, result, ::sin);      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sin);
729      break;      break;
730      case COS:      case COS:
731      tensor_unary_operation(m_samplesize, left, result, ::cos);      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::cos);
732      break;      break;
733      case TAN:      case TAN:
734      tensor_unary_operation(m_samplesize, left, result, ::tan);      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::tan);
735      break;      break;
736      case ASIN:      case ASIN:
737      tensor_unary_operation(m_samplesize, left, result, ::asin);      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::asin);
738      break;      break;
739      case ACOS:      case ACOS:
740      tensor_unary_operation(m_samplesize, left, result, ::acos);      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::acos);
741      break;      break;
742      case ATAN:      case ATAN:
743      tensor_unary_operation(m_samplesize, left, result, ::atan);      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::atan);
744      break;      break;
745      case SINH:      case SINH:
746      tensor_unary_operation(m_samplesize, left, result, ::sinh);      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sinh);
747      break;      break;
748      case COSH:      case COSH:
749      tensor_unary_operation(m_samplesize, left, result, ::cosh);      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::cosh);
750      break;      break;
751      case TANH:      case TANH:
752      tensor_unary_operation(m_samplesize, left, result, ::tanh);      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::tanh);
753      break;      break;
754      case ERF:      case ERF:
755  #ifdef _WIN32  #if defined (_WIN32) && !defined(__INTEL_COMPILER)
756      throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");      throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");
757  #else  #else
758      tensor_unary_operation(m_samplesize, left, result, ::erf);      tensor_unary_operation(m_samplesize, left, result, ::erf);
759      break;      break;
760  #endif  #endif
761     case ASINH:     case ASINH:
762  #ifdef _WIN32  #if defined (_WIN32) && !defined(__INTEL_COMPILER)
763      tensor_unary_operation(m_samplesize, left, result, escript::asinh_substitute);      tensor_unary_operation(m_samplesize, left, result, escript::asinh_substitute);
764  #else  #else
765      tensor_unary_operation(m_samplesize, left, result, ::asinh);      tensor_unary_operation(m_samplesize, left, result, ::asinh);
766  #endif    #endif  
767      break;      break;
768     case ACOSH:     case ACOSH:
769  #ifdef _WIN32  #if defined (_WIN32) && !defined(__INTEL_COMPILER)
770      tensor_unary_operation(m_samplesize, left, result, escript::acosh_substitute);      tensor_unary_operation(m_samplesize, left, result, escript::acosh_substitute);
771  #else  #else
772      tensor_unary_operation(m_samplesize, left, result, ::acosh);      tensor_unary_operation(m_samplesize, left, result, ::acosh);
773  #endif    #endif  
774      break;      break;
775     case ATANH:     case ATANH:
776  #ifdef _WIN32  #if defined (_WIN32) && !defined(__INTEL_COMPILER)
777      tensor_unary_operation(m_samplesize, left, result, escript::atanh_substitute);      tensor_unary_operation(m_samplesize, left, result, escript::atanh_substitute);
778  #else  #else
779      tensor_unary_operation(m_samplesize, left, result, ::atanh);      tensor_unary_operation(m_samplesize, left, result, ::atanh);
780  #endif    #endif  
781      break;      break;
782      case LOG10:      case LOG10:
783      tensor_unary_operation(m_samplesize, left, result, ::log10);      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::log10);
784      break;      break;
785      case LOG:      case LOG:
786      tensor_unary_operation(m_samplesize, left, result, ::log);      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::log);
787      break;      break;
788      case SIGN:      case SIGN:
789      tensor_unary_operation(m_samplesize, left, result, escript::fsign);      tensor_unary_operation(m_samplesize, left, result, escript::fsign);
790      break;      break;
791      case ABS:      case ABS:
792      tensor_unary_operation(m_samplesize, left, result, ::fabs);      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::fabs);
793      break;      break;
794      case NEG:      case NEG:
795      tensor_unary_operation(m_samplesize, left, result, negate<double>());      tensor_unary_operation(m_samplesize, left, result, negate<double>());
# Line 558  DataLazy::resolveUnary(ValueType& v, siz Line 800  DataLazy::resolveUnary(ValueType& v, siz
800      throw DataException("Programmer error - POS not supported for lazy data.");      throw DataException("Programmer error - POS not supported for lazy data.");
801      break;      break;
802      case EXP:      case EXP:
803      tensor_unary_operation(m_samplesize, left, result, ::exp);      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::exp);
804      break;      break;
805      case SQRT:      case SQRT:
806      tensor_unary_operation(m_samplesize, left, result, ::sqrt);      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sqrt);
807      break;      break;
808      case RECIP:      case RECIP:
809      tensor_unary_operation(m_samplesize, left, result, bind1st(divides<double>(),1.));      tensor_unary_operation(m_samplesize, left, result, bind1st(divides<double>(),1.));
# Line 586  DataLazy::resolveUnary(ValueType& v, siz Line 828  DataLazy::resolveUnary(ValueType& v, siz
828  }  }
829    
830    
831    /*
832      \brief Compute the value of the expression (unary operation) for the given sample.
833      \return Vector which stores the value of the subexpression for the given sample.
834      \param v A vector to store intermediate results.
835      \param offset Index in v to begin storing results.
836      \param sampleNo Sample number to evaluate.
837      \param roffset (output parameter) the offset in the return vector where the result begins.
838    
839      The return value will be an existing vector so do not deallocate it.
840      If the result is stored in v it should be stored at the offset given.
841      Everything from offset to the end of v should be considered available for this method to use.
842    */
843    DataTypes::ValueType*
844    DataLazy::resolveNP1OUT(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const
845    {
846        // we assume that any collapsing has been done before we get here
847        // since we only have one argument we don't need to think about only
848        // processing single points.
849      if (m_readytype!='E')
850      {
851        throw DataException("Programmer error - resolveNP1OUT should only be called on expanded Data.");
852      }
853        // since we can't write the result over the input, we need a result offset further along
854      size_t subroffset=roffset+m_samplesize;
855      const ValueType* vleft=m_left->resolveSample(v,offset,sampleNo,subroffset);
856      roffset=offset;
857      switch (m_op)
858      {
859        case SYM:
860        DataMaths::symmetric(*vleft,m_left->getShape(),subroffset, v, getShape(), offset);
861        break;
862        case NSYM:
863        DataMaths::nonsymmetric(*vleft,m_left->getShape(),subroffset, v, getShape(), offset);
864        break;
865        default:
866        throw DataException("Programmer error - resolveNP1OUT can not resolve operator "+opToString(m_op)+".");
867      }
868      return &v;
869    }
870    
871    /*
872      \brief Compute the value of the expression (unary operation) for the given sample.
873      \return Vector which stores the value of the subexpression for the given sample.
874      \param v A vector to store intermediate results.
875      \param offset Index in v to begin storing results.
876      \param sampleNo Sample number to evaluate.
877      \param roffset (output parameter) the offset in the return vector where the result begins.
878    
879      The return value will be an existing vector so do not deallocate it.
880      If the result is stored in v it should be stored at the offset given.
881      Everything from offset to the end of v should be considered available for this method to use.
882    */
883    DataTypes::ValueType*
884    DataLazy::resolveNP1OUT_P(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const
885    {
886        // we assume that any collapsing has been done before we get here
887        // since we only have one argument we don't need to think about only
888        // processing single points.
889      if (m_readytype!='E')
890      {
891        throw DataException("Programmer error - resolveNP1OUT_P should only be called on expanded Data.");
892      }
893        // since we can't write the result over the input, we need a result offset further along
894      size_t subroffset=roffset+m_samplesize;
895      const ValueType* vleft=m_left->resolveSample(v,offset,sampleNo,subroffset);
896      roffset=offset;
897      switch (m_op)
898      {
899        case TRACE:
900             DataMaths::trace(*vleft,m_left->getShape(),subroffset, v,getShape(),offset,m_axis_offset);
901        break;
902        case TRANS:
903             DataMaths::transpose(*vleft,m_left->getShape(),subroffset, v,getShape(),offset,m_axis_offset);
904        break;
905        default:
906        throw DataException("Programmer error - resolveNP1OUTP can not resolve operator "+opToString(m_op)+".");
907      }
908      return &v;
909    }
910    
911    
912  #define PROC_OP(X) \  #define PROC_OP(TYPE,X)                               \
913      for (int i=0;i<steps;++i,resultp+=resultStep) \      for (int i=0;i<steps;++i,resultp+=resultStep) \
914      { \      { \
915         tensor_binary_operation(chunksize, &((*left)[lroffset]), &((*right)[rroffset]), resultp, X); \         tensor_binary_operation< TYPE >(chunksize, &((*left)[lroffset]), &((*right)[rroffset]), resultp, X); \
916         lroffset+=leftStep; \         lroffset+=leftStep; \
917         rroffset+=rightStep; \         rroffset+=rightStep; \
918      }      }
# Line 627  cout << "Resolve binary: " << toString() Line 947  cout << "Resolve binary: " << toString()
947      // first work out which of the children are expanded      // first work out which of the children are expanded
948    bool leftExp=(m_left->m_readytype=='E');    bool leftExp=(m_left->m_readytype=='E');
949    bool rightExp=(m_right->m_readytype=='E');    bool rightExp=(m_right->m_readytype=='E');
950      if (!leftExp && !rightExp)
951      {
952        throw DataException("Programmer Error - please use collapse if neither argument has type 'E'.");
953      }
954      bool leftScalar=(m_left->getRank()==0);
955      bool rightScalar=(m_right->getRank()==0);
956    bool bigloops=((leftExp && rightExp) || (!leftExp && !rightExp)); // is processing in single step?    bool bigloops=((leftExp && rightExp) || (!leftExp && !rightExp)); // is processing in single step?
957    int steps=(bigloops?1:getNumDPPSample());    int steps=(bigloops?1:getNumDPPSample());
958    size_t chunksize=(bigloops? m_samplesize : getNoValues());    // if bigloops, pretend the whole sample is a datapoint    size_t chunksize=(bigloops? m_samplesize : getNoValues());    // if bigloops, pretend the whole sample is a datapoint
959    if (m_left->getRank()!=m_right->getRank())    // need to deal with scalar * ? ops    if (m_left->getRank()!=m_right->getRank())    // need to deal with scalar * ? ops
960    {    {
961      EsysAssert((m_left->getRank()==0) || (m_right->getRank()==0), "Error - Ranks must match unless one is 0.");      if (!leftScalar && !rightScalar)
962        {
963           throw DataException("resolveBinary - ranks of arguments must match unless one of them is scalar.");
964        }
965      steps=getNumDPPSample()*max(m_left->getNoValues(),m_right->getNoValues());      steps=getNumDPPSample()*max(m_left->getNoValues(),m_right->getNoValues());
966      chunksize=1;    // for scalar      chunksize=1;    // for scalar
967    }        }    
968    int leftStep=((leftExp && !rightExp)? m_right->getNoValues() : 0);    int leftStep=((leftExp && (!rightExp || rightScalar))? m_right->getNoValues() : 0);
969    int rightStep=((rightExp && !leftExp)? m_left->getNoValues() : 0);    int rightStep=((rightExp && (!leftExp || leftScalar))? m_left->getNoValues() : 0);
970    int resultStep=max(leftStep,rightStep);   // only one (at most) should be !=0    int resultStep=max(leftStep,rightStep);   // only one (at most) should be !=0
971      // Get the values of sub-expressions      // Get the values of sub-expressions
972    const ValueType* left=m_left->resolveSample(v,offset,sampleNo,lroffset);    const ValueType* left=m_left->resolveSample(v,offset,sampleNo,lroffset);
# Line 647  cout << "Resolve binary: " << toString() Line 976  cout << "Resolve binary: " << toString()
976    switch(m_op)    switch(m_op)
977    {    {
978      case ADD:      case ADD:
979      PROC_OP(plus<double>());          PROC_OP(NO_ARG,plus<double>());
980      break;      break;
981      case SUB:      case SUB:
982      PROC_OP(minus<double>());      PROC_OP(NO_ARG,minus<double>());
983      break;      break;
984      case MUL:      case MUL:
985      PROC_OP(multiplies<double>());      PROC_OP(NO_ARG,multiplies<double>());
986      break;      break;
987      case DIV:      case DIV:
988      PROC_OP(divides<double>());      PROC_OP(NO_ARG,divides<double>());
989      break;      break;
990      case POW:      case POW:
991      PROC_OP(::pow);         PROC_OP(double (double,double),::pow);
992      break;      break;
993      default:      default:
994      throw DataException("Programmer error - resolveBinary can not resolve operator "+opToString(m_op)+".");      throw DataException("Programmer error - resolveBinary can not resolve operator "+opToString(m_op)+".");
# Line 669  cout << "Resolve binary: " << toString() Line 998  cout << "Resolve binary: " << toString()
998  }  }
999    
1000    
1001    /*
1002      \brief Compute the value of the expression (tensor product) for the given sample.
1003      \return Vector which stores the value of the subexpression for the given sample.
1004      \param v A vector to store intermediate results.
1005      \param offset Index in v to begin storing results.
1006      \param sampleNo Sample number to evaluate.
1007      \param roffset (output parameter) the offset in the return vector where the result begins.
1008    
1009      The return value will be an existing vector so do not deallocate it.
1010      If the result is stored in v it should be stored at the offset given.
1011      Everything from offset to the end of v should be considered available for this method to use.
1012    */
1013    // This method assumes that any subexpressions which evaluate to Constant or Tagged Data
1014    // have already been collapsed to IDENTITY. So we must have at least one expanded child.
1015    // unlike the other resolve helpers, we must treat these datapoints separately.
1016    DataTypes::ValueType*
1017    DataLazy::resolveTProd(ValueType& v,  size_t offset, int sampleNo, size_t& roffset) const
1018    {
1019    cout << "Resolve TensorProduct: " << toString() << endl;
1020    
1021      size_t lroffset=0, rroffset=0;    // offsets in the left and right result vectors
1022        // first work out which of the children are expanded
1023      bool leftExp=(m_left->m_readytype=='E');
1024      bool rightExp=(m_right->m_readytype=='E');
1025      int steps=getNumDPPSample();
1026      int leftStep=((leftExp && !rightExp)? m_right->getNoValues() : 0);
1027      int rightStep=((rightExp && !leftExp)? m_left->getNoValues() : 0);
1028      int resultStep=max(leftStep,rightStep);   // only one (at most) should be !=0
1029        // Get the values of sub-expressions (leave a gap of one sample for the result).
1030      const ValueType* left=m_left->resolveSample(v,offset+m_samplesize,sampleNo,lroffset);
1031      const ValueType* right=m_right->resolveSample(v,offset+2*m_samplesize,sampleNo,rroffset);
1032      double* resultp=&(v[offset]);     // results are stored at the vector offset we recieved
1033      switch(m_op)
1034      {
1035        case PROD:
1036        for (int i=0;i<steps;++i,resultp+=resultStep)
1037        {
1038              const double *ptr_0 = &((*left)[lroffset]);
1039              const double *ptr_1 = &((*right)[rroffset]);
1040              matrix_matrix_product(m_SL, m_SM, m_SR, ptr_0, ptr_1, resultp, m_transpose);
1041          lroffset+=leftStep;
1042          rroffset+=rightStep;
1043        }
1044        break;
1045        default:
1046        throw DataException("Programmer error - resolveTProduct can not resolve operator "+opToString(m_op)+".");
1047      }
1048      roffset=offset;
1049      return &v;
1050    }
1051    
1052    
1053    
1054  /*  /*
1055    \brief Compute the value of the expression for the given sample.    \brief Compute the value of the expression for the given sample.
# Line 714  cout << "Resolve sample " << toString() Line 1095  cout << "Resolve sample " << toString()
1095    {    {
1096    case G_UNARY: return resolveUnary(v, offset,sampleNo,roffset);    case G_UNARY: return resolveUnary(v, offset,sampleNo,roffset);
1097    case G_BINARY: return resolveBinary(v, offset,sampleNo,roffset);    case G_BINARY: return resolveBinary(v, offset,sampleNo,roffset);
1098      case G_NP1OUT: return resolveNP1OUT(v, offset, sampleNo,roffset);
1099      case G_NP1OUT_P: return resolveNP1OUT_P(v, offset, sampleNo,roffset);
1100      case G_TENSORPROD: return resolveTProd(v,offset, sampleNo,roffset);
1101    default:    default:
1102      throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");      throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");
1103    }    }
# Line 738  cout << "Buffers=" << m_buffsRequired << Line 1122  cout << "Buffers=" << m_buffsRequired <<
1122      return m_id;      return m_id;
1123    }    }
1124      // from this point on we must have m_op!=IDENTITY and m_readytype=='E'      // from this point on we must have m_op!=IDENTITY and m_readytype=='E'
1125    size_t threadbuffersize=m_samplesize*(max(1,m_buffsRequired));    // Each thread needs to have enough    size_t threadbuffersize=m_maxsamplesize*(max(1,m_buffsRequired)); // Each thread needs to have enough
1126      // storage to evaluate its expression      // storage to evaluate its expression
1127    int numthreads=1;    int numthreads=1;
1128  #ifdef _OPENMP  #ifdef _OPENMP
1129    numthreads=getNumberOfThreads();    numthreads=getNumberOfThreads();
   int threadnum=0;  
1130  #endif  #endif
1131    ValueType v(numthreads*threadbuffersize);    ValueType v(numthreads*threadbuffersize);
1132  cout << "Buffer created with size=" << v.size() << endl;  cout << "Buffer created with size=" << v.size() << endl;
# Line 755  cout << "Buffer created with size=" << v Line 1138  cout << "Buffer created with size=" << v
1138    int totalsamples=getNumSamples();    int totalsamples=getNumSamples();
1139    const ValueType* res=0;   // Vector storing the answer    const ValueType* res=0;   // Vector storing the answer
1140    size_t resoffset=0;       // where in the vector to find the answer    size_t resoffset=0;       // where in the vector to find the answer
1141    #pragma omp parallel for private(sample,resoffset,outoffset,threadnum,res) schedule(static)    #pragma omp parallel for private(sample,resoffset,outoffset,res) schedule(static)
1142    for (sample=0;sample<totalsamples;++sample)    for (sample=0;sample<totalsamples;++sample)
1143    {    {
1144  cout << "################################# " << sample << endl;  cout << "################################# " << sample << endl;
# Line 818  DataLazy::intoString(ostringstream& oss) Line 1201  DataLazy::intoString(ostringstream& oss)
1201      oss << ')';      oss << ')';
1202      break;      break;
1203    case G_UNARY:    case G_UNARY:
1204      case G_NP1OUT:
1205      case G_NP1OUT_P:
1206      oss << opToString(m_op) << '(';      oss << opToString(m_op) << '(';
1207      m_left->intoString(oss);      m_left->intoString(oss);
1208      oss << ')';      oss << ')';
1209      break;      break;
1210      case G_TENSORPROD:
1211        oss << opToString(m_op) << '(';
1212        m_left->intoString(oss);
1213        oss << ", ";
1214        m_right->intoString(oss);
1215        oss << ')';
1216        break;
1217    default:    default:
1218      oss << "UNKNOWN";      oss << "UNKNOWN";
1219    }    }
1220  }  }
1221    
 // Note that in this case, deepCopy does not make copies of the leaves.  
 // Hopefully copy on write (or whatever we end up using) will take care of this.  
1222  DataAbstract*  DataAbstract*
1223  DataLazy::deepCopy()  DataLazy::deepCopy()
1224  {  {
1225    if (m_op==IDENTITY)    switch (getOpgroup(m_op))
1226    {    {
1227      return new DataLazy(m_left);    // we don't need to copy the child here    case G_IDENTITY:  return new DataLazy(m_id->deepCopy()->getPtr());
1228      case G_UNARY: return new DataLazy(m_left->deepCopy()->getPtr(),m_op);
1229      case G_BINARY:    return new DataLazy(m_left->deepCopy()->getPtr(),m_right->deepCopy()->getPtr(),m_op);
1230      case G_NP1OUT: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(),m_op);
1231      case G_TENSORPROD: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(), m_op, m_axis_offset, m_transpose);
1232      default:
1233        throw DataException("Programmer error - do not know how to deepcopy operator "+opToString(m_op)+".");
1234    }    }
   return new DataLazy(m_left->deepCopy()->getPtr(),m_right->deepCopy()->getPtr(),m_op);  
1235  }  }
1236    
1237    
1238    // There is no single, natural interpretation of getLength on DataLazy.
1239    // Instances of DataReady can look at the size of their vectors.
1240    // For lazy though, it could be the size the data would be if it were resolved;
1241    // or it could be some function of the lengths of the DataReady instances which
1242    // form part of the expression.
1243    // Rather than have people making assumptions, I have disabled the method.
1244  DataTypes::ValueType::size_type  DataTypes::ValueType::size_type
1245  DataLazy::getLength() const  DataLazy::getLength() const
1246  {  {
1247    return m_length;    throw DataException("getLength() does not make sense for lazy data.");
1248  }  }
1249    
1250    
# Line 853  DataLazy::getSlice(const DataTypes::Regi Line 1254  DataLazy::getSlice(const DataTypes::Regi
1254    throw DataException("getSlice - not implemented for Lazy objects.");    throw DataException("getSlice - not implemented for Lazy objects.");
1255  }  }
1256    
1257    
1258    // To do this we need to rely on our child nodes
1259    DataTypes::ValueType::size_type
1260    DataLazy::getPointOffset(int sampleNo,
1261                     int dataPointNo)
1262    {
1263      if (m_op==IDENTITY)
1264      {
1265        return m_id->getPointOffset(sampleNo,dataPointNo);
1266      }
1267      if (m_readytype!='E')
1268      {
1269        collapse();
1270        return m_id->getPointOffset(sampleNo,dataPointNo);
1271      }
1272      // at this point we do not have an identity node and the expression will be Expanded
1273      // so we only need to know which child to ask
1274      if (m_left->m_readytype=='E')
1275      {
1276        return m_left->getPointOffset(sampleNo,dataPointNo);
1277      }
1278      else
1279      {
1280        return m_right->getPointOffset(sampleNo,dataPointNo);
1281      }
1282    }
1283    
1284    // To do this we need to rely on our child nodes
1285  DataTypes::ValueType::size_type  DataTypes::ValueType::size_type
1286  DataLazy::getPointOffset(int sampleNo,  DataLazy::getPointOffset(int sampleNo,
1287                   int dataPointNo) const                   int dataPointNo) const
1288  {  {
1289    throw DataException("getPointOffset - not implemented for Lazy objects - yet.");    if (m_op==IDENTITY)
1290      {
1291        return m_id->getPointOffset(sampleNo,dataPointNo);
1292      }
1293      if (m_readytype=='E')
1294      {
1295        // at this point we do not have an identity node and the expression will be Expanded
1296        // so we only need to know which child to ask
1297        if (m_left->m_readytype=='E')
1298        {
1299        return m_left->getPointOffset(sampleNo,dataPointNo);
1300        }
1301        else
1302        {
1303        return m_right->getPointOffset(sampleNo,dataPointNo);
1304        }
1305      }
1306      if (m_readytype=='C')
1307      {
1308        return m_left->getPointOffset(sampleNo,dataPointNo); // which child doesn't matter
1309      }
1310      throw DataException("Programmer error - getPointOffset on lazy data may require collapsing (but this object is marked const).");
1311  }  }
1312    
1313  // It would seem that DataTagged will need to be treated differently since even after setting all tags  // It would seem that DataTagged will need to be treated differently since even after setting all tags

Legend:
Removed from v.1910  
changed lines
  Added in v.2084

  ViewVC Help
Powered by ViewVC 1.1.26