/[escript]/branches/clazy/escriptcore/src/DataLazy.cpp
ViewVC logotype

Diff of /branches/clazy/escriptcore/src/DataLazy.cpp

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

branches/schroedinger_upto1946/escript/src/DataLazy.cpp revision 1993 by phornby, Fri Nov 7 04:52:15 2008 UTC trunk/escript/src/DataLazy.cpp revision 2166 by jfenwick, Tue Dec 16 06:08:02 2008 UTC
# Line 28  Line 28 
28  #include "UnaryFuncs.h"     // for escript::fsign  #include "UnaryFuncs.h"     // for escript::fsign
29  #include "Utils.h"  #include "Utils.h"
30    
31    // #define LAZYDEBUG(X) if (privdebug){X;}
32    #define LAZYDEBUG(X)
33    namespace
34    {
35    bool privdebug=false;
36    
37    #define ENABLEDEBUG privdebug=true;
38    #define DISABLEDEBUG privdebug=false;
39    }
40    
41  /*  /*
42  How does DataLazy work?  How does DataLazy work?
43  ~~~~~~~~~~~~~~~~~~~~~~~  ~~~~~~~~~~~~~~~~~~~~~~~
# Line 48  I will refer to individual DataLazy obje Line 58  I will refer to individual DataLazy obje
58  Each node also stores:  Each node also stores:
59  - m_readytype \in {'E','T','C','?'} ~ indicates what sort of DataReady would be produced if the expression was  - m_readytype \in {'E','T','C','?'} ~ indicates what sort of DataReady would be produced if the expression was
60      evaluated.      evaluated.
 - m_length ~ how many values would be stored in the answer if the expression was evaluated.  
61  - m_buffsrequired ~ the larged number of samples which would need to be kept simultaneously in order to  - m_buffsrequired ~ the larged number of samples which would need to be kept simultaneously in order to
62      evaluate the expression.      evaluate the expression.
63  - m_samplesize ~ the number of doubles stored in a sample.  - m_samplesize ~ the number of doubles stored in a sample.
# Line 70  The convention that I use, is that the r Line 79  The convention that I use, is that the r
79    
80  For expressions which evaluate to Constant or Tagged, there is a different evaluation method.  For expressions which evaluate to Constant or Tagged, there is a different evaluation method.
81  The collapse method invokes the (non-lazy) operations on the Data class to evaluate the expression.  The collapse method invokes the (non-lazy) operations on the Data class to evaluate the expression.
82    
83    To add a new operator you need to do the following (plus anything I might have forgotten - adding a new group for example):
84    1) Add to the ES_optype.
85    2) determine what opgroup your operation belongs to (X)
86    3) add a string for the op to the end of ES_opstrings
87    4) increase ES_opcount
88    5) add an entry (X) to opgroups
89    6) add an entry to the switch in collapseToReady
90    7) add an entry to resolveX
91  */  */
92    
93    
# Line 87  enum ES_opgroup Line 105  enum ES_opgroup
105     G_UNKNOWN,     G_UNKNOWN,
106     G_IDENTITY,     G_IDENTITY,
107     G_BINARY,        // pointwise operations with two arguments     G_BINARY,        // pointwise operations with two arguments
108     G_UNARY      // pointwise operations with one argument     G_UNARY,     // pointwise operations with one argument
109       G_UNARY_P,       // pointwise operations with one argument, requiring a parameter
110       G_NP1OUT,        // non-pointwise op with one output
111       G_NP1OUT_P,      // non-pointwise op with one output requiring a parameter
112       G_TENSORPROD     // general tensor product
113  };  };
114    
115    
# Line 98  string ES_opstrings[]={"UNKNOWN","IDENTI Line 120  string ES_opstrings[]={"UNKNOWN","IDENTI
120              "asin","acos","atan","sinh","cosh","tanh","erf",              "asin","acos","atan","sinh","cosh","tanh","erf",
121              "asinh","acosh","atanh",              "asinh","acosh","atanh",
122              "log10","log","sign","abs","neg","pos","exp","sqrt",              "log10","log","sign","abs","neg","pos","exp","sqrt",
123              "1/","where>0","where<0","where>=0","where<=0"};              "1/","where>0","where<0","where>=0","where<=0", "where<>0","where=0",
124  int ES_opcount=33;              "symmetric","nonsymmetric",
125                "prod",
126                "transpose", "trace"};
127    int ES_opcount=40;
128  ES_opgroup opgroups[]={G_UNKNOWN,G_IDENTITY,G_BINARY,G_BINARY,G_BINARY,G_BINARY, G_BINARY,  ES_opgroup opgroups[]={G_UNKNOWN,G_IDENTITY,G_BINARY,G_BINARY,G_BINARY,G_BINARY, G_BINARY,
129              G_UNARY,G_UNARY,G_UNARY, //10              G_UNARY,G_UNARY,G_UNARY, //10
130              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,    // 17              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,    // 17
131              G_UNARY,G_UNARY,G_UNARY,                    // 20              G_UNARY,G_UNARY,G_UNARY,                    // 20
132              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,        // 28              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,    // 28
133              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY};              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY, G_UNARY_P, G_UNARY_P,      // 35
134                G_NP1OUT,G_NP1OUT,
135                G_TENSORPROD,
136                G_NP1OUT_P, G_NP1OUT_P};
137  inline  inline
138  ES_opgroup  ES_opgroup
139  getOpgroup(ES_optype op)  getOpgroup(ES_optype op)
# Line 140  resultFS(DataAbstract_ptr left, DataAbst Line 168  resultFS(DataAbstract_ptr left, DataAbst
168  }  }
169    
170  // return the shape of the result of "left op right"  // return the shape of the result of "left op right"
171    // the shapes resulting from tensor product are more complex to compute so are worked out elsewhere
172  DataTypes::ShapeType  DataTypes::ShapeType
173  resultShape(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)  resultShape(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
174  {  {
175      if (left->getShape()!=right->getShape())      if (left->getShape()!=right->getShape())
176      {      {
177        if (getOpgroup(op)!=G_BINARY)        if ((getOpgroup(op)!=G_BINARY) && (getOpgroup(op)!=G_NP1OUT))
178        {        {
179          throw DataException("Shapes not the name - shapes must match for (point)binary operations.");          throw DataException("Shapes not the name - shapes must match for (point)binary operations.");
180        }        }
# Line 162  resultShape(DataAbstract_ptr left, DataA Line 191  resultShape(DataAbstract_ptr left, DataA
191      return left->getShape();      return left->getShape();
192  }  }
193    
194  // determine the number of points in the result of "left op right"  // return the shape for "op left"
195  size_t  
196  resultLength(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)  DataTypes::ShapeType
197    resultShape(DataAbstract_ptr left, ES_optype op)
198  {  {
199     switch (getOpgroup(op))      switch(op)
200     {      {
201     case G_BINARY: return left->getLength();          case TRANS:
202     case G_UNARY: return left->getLength();          return left->getShape();
203     default:      break;
204      throw DataException("Programmer Error - attempt to getLength() for operator "+opToString(op)+".");      case TRACE:
205     }          return DataTypes::scalarShape;
206        break;
207            default:
208        throw DataException("Programmer error - resultShape(left,op) can't compute shapes for operator "+opToString(op)+".");
209        }
210    }
211    
212    // determine the output shape for the general tensor product operation
213    // the additional parameters return information required later for the product
214    // the majority of this code is copy pasted from C_General_Tensor_Product
215    DataTypes::ShapeType
216    GTPShape(DataAbstract_ptr left, DataAbstract_ptr right, int axis_offset, int transpose, int& SL, int& SM, int& SR)
217    {
218        
219      // Get rank and shape of inputs
220      int rank0 = left->getRank();
221      int rank1 = right->getRank();
222      const DataTypes::ShapeType& shape0 = left->getShape();
223      const DataTypes::ShapeType& shape1 = right->getShape();
224    
225      // Prepare for the loops of the product and verify compatibility of shapes
226      int start0=0, start1=0;
227      if (transpose == 0)       {}
228      else if (transpose == 1)  { start0 = axis_offset; }
229      else if (transpose == 2)  { start1 = rank1-axis_offset; }
230      else              { throw DataException("DataLazy GeneralTensorProduct Constructor: Error - transpose should be 0, 1 or 2"); }
231    
232      if (rank0<axis_offset)
233      {
234        throw DataException("DataLazy GeneralTensorProduct Constructor: Error - rank of left < axisoffset");
235      }
236    
237      // Adjust the shapes for transpose
238      DataTypes::ShapeType tmpShape0(rank0);    // pre-sizing the vectors rather
239      DataTypes::ShapeType tmpShape1(rank1);    // than using push_back
240      for (int i=0; i<rank0; i++)   { tmpShape0[i]=shape0[(i+start0)%rank0]; }
241      for (int i=0; i<rank1; i++)   { tmpShape1[i]=shape1[(i+start1)%rank1]; }
242    
243      // Prepare for the loops of the product
244      SL=1, SM=1, SR=1;
245      for (int i=0; i<rank0-axis_offset; i++)   {
246        SL *= tmpShape0[i];
247      }
248      for (int i=rank0-axis_offset; i<rank0; i++)   {
249        if (tmpShape0[i] != tmpShape1[i-(rank0-axis_offset)]) {
250          throw DataException("C_GeneralTensorProduct: Error - incompatible shapes");
251        }
252        SM *= tmpShape0[i];
253      }
254      for (int i=axis_offset; i<rank1; i++)     {
255        SR *= tmpShape1[i];
256      }
257    
258      // Define the shape of the output (rank of shape is the sum of the loop ranges below)
259      DataTypes::ShapeType shape2(rank0+rank1-2*axis_offset);  
260      {         // block to limit the scope of out_index
261         int out_index=0;
262         for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z
263         for (int i=axis_offset; i<rank1; i++, ++out_index)   { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z
264      }
265    
266      if (shape2.size()>ESCRIPT_MAX_DATA_RANK)
267      {
268         ostringstream os;
269         os << "C_GeneralTensorProduct: Error - Attempt to create a rank " << shape2.size() << " object. The maximum rank is " << ESCRIPT_MAX_DATA_RANK << ".";
270         throw DataException(os.str());
271      }
272    
273      return shape2;
274  }  }
275    
276    
277    // determine the number of points in the result of "left op right"
278    // note that determining the resultLength for G_TENSORPROD is more complex and will not be processed here
279    // size_t
280    // resultLength(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
281    // {
282    //    switch (getOpgroup(op))
283    //    {
284    //    case G_BINARY: return left->getLength();
285    //    case G_UNARY: return left->getLength();
286    //    case G_NP1OUT: return left->getLength();
287    //    default:
288    //  throw DataException("Programmer Error - attempt to getLength() for operator "+opToString(op)+".");
289    //    }
290    // }
291    
292  // determine the number of samples requires to evaluate an expression combining left and right  // determine the number of samples requires to evaluate an expression combining left and right
293    // NP1OUT needs an extra buffer because we can't write the answers over the top of the input.
294    // The same goes for G_TENSORPROD
295    // It might seem that pointwise binary ops (G_BINARY) could be written over the top of the lefths.
296    // This would be true were it not for the possibility that the LHS could be a scalar which needs to be examined
297    // multiple times
298  int  int
299  calcBuffs(const DataLazy_ptr& left, const DataLazy_ptr& right, ES_optype op)  calcBuffs(const DataLazy_ptr& left, const DataLazy_ptr& right, ES_optype op)
300  {  {
301     switch(getOpgroup(op))     switch(getOpgroup(op))
302     {     {
303     case G_IDENTITY: return 1;     case G_IDENTITY: return 1;
304     case G_BINARY: return max(left->getBuffsRequired(),right->getBuffsRequired()+1);     case G_BINARY: return 1+max(left->getBuffsRequired(),right->getBuffsRequired()+1);
305     case G_UNARY: return max(left->getBuffsRequired(),1);     case G_UNARY:
306       case G_UNARY_P: return max(left->getBuffsRequired(),1);
307       case G_NP1OUT: return 1+max(left->getBuffsRequired(),1);
308       case G_NP1OUT_P: return 1+max(left->getBuffsRequired(),1);
309       case G_TENSORPROD: return 1+max(left->getBuffsRequired(),right->getBuffsRequired()+1);
310     default:     default:
311      throw DataException("Programmer Error - attempt to calcBuffs() for operator "+opToString(op)+".");      throw DataException("Programmer Error - attempt to calcBuffs() for operator "+opToString(op)+".");
312     }     }
# Line 208  opToString(ES_optype op) Line 331  opToString(ES_optype op)
331    
332  DataLazy::DataLazy(DataAbstract_ptr p)  DataLazy::DataLazy(DataAbstract_ptr p)
333      : parent(p->getFunctionSpace(),p->getShape()),      : parent(p->getFunctionSpace(),p->getShape()),
334      m_op(IDENTITY)      m_op(IDENTITY),
335        m_axis_offset(0),
336        m_transpose(0),
337        m_SL(0), m_SM(0), m_SR(0)
338  {  {
339     if (p->isLazy())     if (p->isLazy())
340     {     {
# Line 225  DataLazy::DataLazy(DataAbstract_ptr p) Line 351  DataLazy::DataLazy(DataAbstract_ptr p)
351      else if (p->isTagged()) {m_readytype='T';}      else if (p->isTagged()) {m_readytype='T';}
352      else {throw DataException("Unknown DataReady instance in DataLazy constructor.");}      else {throw DataException("Unknown DataReady instance in DataLazy constructor.");}
353     }     }
    m_length=p->getLength();  
354     m_buffsRequired=1;     m_buffsRequired=1;
355     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
356  cout << "(1)Lazy created with " << m_samplesize << endl;     m_maxsamplesize=m_samplesize;
357    LAZYDEBUG(cout << "(1)Lazy created with " << m_samplesize << endl;)
358  }  }
359    
360    
# Line 236  cout << "(1)Lazy created with " << m_sam Line 362  cout << "(1)Lazy created with " << m_sam
362    
363  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op)  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op)
364      : parent(left->getFunctionSpace(),left->getShape()),      : parent(left->getFunctionSpace(),left->getShape()),
365      m_op(op)      m_op(op),
366        m_axis_offset(0),
367        m_transpose(0),
368        m_SL(0), m_SM(0), m_SR(0)
369  {  {
370     if (getOpgroup(op)!=G_UNARY)     if ((getOpgroup(op)!=G_UNARY) && (getOpgroup(op)!=G_NP1OUT))
371     {     {
372      throw DataException("Programmer error - constructor DataLazy(left, op) will only process UNARY operations.");      throw DataException("Programmer error - constructor DataLazy(left, op) will only process UNARY operations.");
373     }     }
374    
375     DataLazy_ptr lleft;     DataLazy_ptr lleft;
376     if (!left->isLazy())     if (!left->isLazy())
377     {     {
# Line 252  DataLazy::DataLazy(DataAbstract_ptr left Line 382  DataLazy::DataLazy(DataAbstract_ptr left
382      lleft=dynamic_pointer_cast<DataLazy>(left);      lleft=dynamic_pointer_cast<DataLazy>(left);
383     }     }
384     m_readytype=lleft->m_readytype;     m_readytype=lleft->m_readytype;
    m_length=left->getLength();  
385     m_left=lleft;     m_left=lleft;
386     m_buffsRequired=1;     m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point
387     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
388       m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());
389  }  }
390    
391    
392  // In this constructor we need to consider interpolation  // In this constructor we need to consider interpolation
393  DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)  DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
394      : parent(resultFS(left,right,op), resultShape(left,right,op)),      : parent(resultFS(left,right,op), resultShape(left,right,op)),
395      m_op(op)      m_op(op),
396        m_SL(0), m_SM(0), m_SR(0)
397  {  {
398     if (getOpgroup(op)!=G_BINARY)     if ((getOpgroup(op)!=G_BINARY))
399     {     {
400      throw DataException("Programmer error - constructor DataLazy(left, right, op) will only process BINARY operations.");      throw DataException("Programmer error - constructor DataLazy(left, right, op) will only process BINARY operations.");
401     }     }
# Line 276  DataLazy::DataLazy(DataAbstract_ptr left Line 407  DataLazy::DataLazy(DataAbstract_ptr left
407      Data tmp(ltemp,fs);      Data tmp(ltemp,fs);
408      left=tmp.borrowDataPtr();      left=tmp.borrowDataPtr();
409     }     }
410     if (getFunctionSpace()!=right->getFunctionSpace())   // left needs to be interpolated     if (getFunctionSpace()!=right->getFunctionSpace())   // right needs to be interpolated
411     {     {
412      Data tmp(Data(right),getFunctionSpace());      Data tmp(Data(right),getFunctionSpace());
413      right=tmp.borrowDataPtr();      right=tmp.borrowDataPtr();
# Line 313  DataLazy::DataLazy(DataAbstract_ptr left Line 444  DataLazy::DataLazy(DataAbstract_ptr left
444     {     {
445      m_readytype='C';      m_readytype='C';
446     }     }
447     m_length=resultLength(m_left,m_right,m_op);     m_samplesize=getNumDPPSample()*getNoValues();
448     m_samplesize=getNumDPPSample()*getNoValues();         m_maxsamplesize=max(max(m_samplesize,m_right->getMaxSampleSize()),m_left->getMaxSampleSize());  
449     m_buffsRequired=calcBuffs(m_left, m_right,m_op);     m_buffsRequired=calcBuffs(m_left, m_right,m_op);
450  cout << "(3)Lazy created with " << m_samplesize << endl;  LAZYDEBUG(cout << "(3)Lazy created with " << m_samplesize << endl;)
451  }  }
452    
453    DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op, int axis_offset, int transpose)
454        : parent(resultFS(left,right,op), GTPShape(left,right, axis_offset, transpose, m_SL,m_SM, m_SR)),
455        m_op(op),
456        m_axis_offset(axis_offset),
457        m_transpose(transpose)
458    {
459       if ((getOpgroup(op)!=G_TENSORPROD))
460       {
461        throw DataException("Programmer error - constructor DataLazy(left, right, op, ax, tr) will only process BINARY operations which require parameters.");
462       }
463       if ((transpose>2) || (transpose<0))
464       {
465        throw DataException("DataLazy GeneralTensorProduct constructor: Error - transpose should be 0, 1 or 2");
466       }
467       if (getFunctionSpace()!=left->getFunctionSpace())    // left needs to be interpolated
468       {
469        FunctionSpace fs=getFunctionSpace();
470        Data ltemp(left);
471        Data tmp(ltemp,fs);
472        left=tmp.borrowDataPtr();
473       }
474       if (getFunctionSpace()!=right->getFunctionSpace())   // right needs to be interpolated
475       {
476        Data tmp(Data(right),getFunctionSpace());
477        right=tmp.borrowDataPtr();
478       }
479       left->operandCheck(*right);
480    
481       if (left->isLazy())          // the children need to be DataLazy. Wrap them in IDENTITY if required
482       {
483        m_left=dynamic_pointer_cast<DataLazy>(left);
484       }
485       else
486       {
487        m_left=DataLazy_ptr(new DataLazy(left));
488       }
489       if (right->isLazy())
490       {
491        m_right=dynamic_pointer_cast<DataLazy>(right);
492       }
493       else
494       {
495        m_right=DataLazy_ptr(new DataLazy(right));
496       }
497       char lt=m_left->m_readytype;
498       char rt=m_right->m_readytype;
499       if (lt=='E' || rt=='E')
500       {
501        m_readytype='E';
502       }
503       else if (lt=='T' || rt=='T')
504       {
505        m_readytype='T';
506       }
507       else
508       {
509        m_readytype='C';
510       }
511       m_samplesize=getNumDPPSample()*getNoValues();
512       m_maxsamplesize=max(max(m_samplesize,m_right->getMaxSampleSize()),m_left->getMaxSampleSize());  
513       m_buffsRequired=calcBuffs(m_left, m_right,m_op);
514    LAZYDEBUG(cout << "(4)Lazy created with " << m_samplesize << endl;)
515    }
516    
517    
518    DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, int axis_offset)
519        : parent(left->getFunctionSpace(), resultShape(left,op)),
520        m_op(op),
521        m_axis_offset(axis_offset),
522        m_transpose(0),
523        m_tol(0)
524    {
525       if ((getOpgroup(op)!=G_NP1OUT_P))
526       {
527        throw DataException("Programmer error - constructor DataLazy(left, op, ax) will only process UNARY operations which require parameters.");
528       }
529       DataLazy_ptr lleft;
530       if (!left->isLazy())
531       {
532        lleft=DataLazy_ptr(new DataLazy(left));
533       }
534       else
535       {
536        lleft=dynamic_pointer_cast<DataLazy>(left);
537       }
538       m_readytype=lleft->m_readytype;
539       m_left=lleft;
540       m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point
541       m_samplesize=getNumDPPSample()*getNoValues();
542       m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());
543    LAZYDEBUG(cout << "(5)Lazy created with " << m_samplesize << endl;)
544    }
545    
546    DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, double tol)
547        : parent(left->getFunctionSpace(), left->getShape()),
548        m_op(op),
549        m_axis_offset(0),
550        m_transpose(0),
551        m_tol(tol)
552    {
553       if ((getOpgroup(op)!=G_UNARY_P))
554       {
555        throw DataException("Programmer error - constructor DataLazy(left, op, tol) will only process UNARY operations which require parameters.");
556       }
557       DataLazy_ptr lleft;
558       if (!left->isLazy())
559       {
560        lleft=DataLazy_ptr(new DataLazy(left));
561       }
562       else
563       {
564        lleft=dynamic_pointer_cast<DataLazy>(left);
565       }
566       m_readytype=lleft->m_readytype;
567       m_left=lleft;
568       m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point
569       m_samplesize=getNumDPPSample()*getNoValues();
570       m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());
571    LAZYDEBUG(cout << "(6)Lazy created with " << m_samplesize << endl;)
572    }
573    
574  DataLazy::~DataLazy()  DataLazy::~DataLazy()
575  {  {
# Line 332  DataLazy::getBuffsRequired() const Line 583  DataLazy::getBuffsRequired() const
583  }  }
584    
585    
586    size_t
587    DataLazy::getMaxSampleSize() const
588    {
589        return m_maxsamplesize;
590    }
591    
592  /*  /*
593    \brief Evaluates the expression using methods on Data.    \brief Evaluates the expression using methods on Data.
594    This does the work for the collapse method.    This does the work for the collapse method.
# Line 351  DataLazy::collapseToReady() Line 608  DataLazy::collapseToReady()
608    DataReady_ptr pleft=m_left->collapseToReady();    DataReady_ptr pleft=m_left->collapseToReady();
609    Data left(pleft);    Data left(pleft);
610    Data right;    Data right;
611    if (getOpgroup(m_op)==G_BINARY)    if ((getOpgroup(m_op)==G_BINARY) || (getOpgroup(m_op)==G_TENSORPROD))
612    {    {
613      right=Data(m_right->collapseToReady());      right=Data(m_right->collapseToReady());
614    }    }
# Line 450  DataLazy::collapseToReady() Line 707  DataLazy::collapseToReady()
707      case LEZ:      case LEZ:
708      result=left.whereNonPositive();      result=left.whereNonPositive();
709      break;      break;
710        case NEZ:
711        result=left.whereNonZero(m_tol);
712        break;
713        case EZ:
714        result=left.whereZero(m_tol);
715        break;
716        case SYM:
717        result=left.symmetric();
718        break;
719        case NSYM:
720        result=left.nonsymmetric();
721        break;
722        case PROD:
723        result=C_GeneralTensorProduct(left,right,m_axis_offset, m_transpose);
724        break;
725        case TRANS:
726        result=left.transpose(m_axis_offset);
727        break;
728        case TRACE:
729        result=left.trace(m_axis_offset);
730        break;
731      default:      default:
732      throw DataException("Programmer error - do not know how to resolve operator "+opToString(m_op)+".");      throw DataException("Programmer error - collapseToReady does not know how to resolve operator "+opToString(m_op)+".");
733    }    }
734    return result.borrowReadyPtr();    return result.borrowReadyPtr();
735  }  }
# Line 478  DataLazy::collapse() Line 756  DataLazy::collapse()
756  }  }
757    
758  /*  /*
759    \brief Compute the value of the expression (binary operation) for the given sample.    \brief Compute the value of the expression (unary operation) for the given sample.
760    \return Vector which stores the value of the subexpression for the given sample.    \return Vector which stores the value of the subexpression for the given sample.
761    \param v A vector to store intermediate results.    \param v A vector to store intermediate results.
762    \param offset Index in v to begin storing results.    \param offset Index in v to begin storing results.
# Line 533  DataLazy::resolveUnary(ValueType& v, siz Line 811  DataLazy::resolveUnary(ValueType& v, siz
811      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::tanh);      tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::tanh);
812      break;      break;
813      case ERF:      case ERF:
814  #ifdef _WIN32  #if defined (_WIN32) && !defined(__INTEL_COMPILER)
815      throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");      throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");
816  #else  #else
817      tensor_unary_operation(m_samplesize, left, result, ::erf);      tensor_unary_operation(m_samplesize, left, result, ::erf);
818      break;      break;
819  #endif  #endif
820     case ASINH:     case ASINH:
821  #ifdef _WIN32  #if defined (_WIN32) && !defined(__INTEL_COMPILER)
822      tensor_unary_operation(m_samplesize, left, result, escript::asinh_substitute);      tensor_unary_operation(m_samplesize, left, result, escript::asinh_substitute);
823  #else  #else
824      tensor_unary_operation(m_samplesize, left, result, ::asinh);      tensor_unary_operation(m_samplesize, left, result, ::asinh);
825  #endif    #endif  
826      break;      break;
827     case ACOSH:     case ACOSH:
828  #ifdef _WIN32  #if defined (_WIN32) && !defined(__INTEL_COMPILER)
829      tensor_unary_operation(m_samplesize, left, result, escript::acosh_substitute);      tensor_unary_operation(m_samplesize, left, result, escript::acosh_substitute);
830  #else  #else
831      tensor_unary_operation(m_samplesize, left, result, ::acosh);      tensor_unary_operation(m_samplesize, left, result, ::acosh);
832  #endif    #endif  
833      break;      break;
834     case ATANH:     case ATANH:
835  #ifdef _WIN32  #if defined (_WIN32) && !defined(__INTEL_COMPILER)
836      tensor_unary_operation(m_samplesize, left, result, escript::atanh_substitute);      tensor_unary_operation(m_samplesize, left, result, escript::atanh_substitute);
837  #else  #else
838      tensor_unary_operation(m_samplesize, left, result, ::atanh);      tensor_unary_operation(m_samplesize, left, result, ::atanh);
# Line 601  DataLazy::resolveUnary(ValueType& v, siz Line 879  DataLazy::resolveUnary(ValueType& v, siz
879      case LEZ:      case LEZ:
880      tensor_unary_operation(m_samplesize, left, result, bind2nd(less_equal<double>(),0.0));      tensor_unary_operation(m_samplesize, left, result, bind2nd(less_equal<double>(),0.0));
881      break;      break;
882    // There are actually G_UNARY_P but I don't see a compelling reason to treat them differently
883        case NEZ:
884        tensor_unary_operation(m_samplesize, left, result, bind2nd(AbsGT(),m_tol));
885        break;
886        case EZ:
887        tensor_unary_operation(m_samplesize, left, result, bind2nd(AbsLTE(),m_tol));
888        break;
889    
890      default:      default:
891      throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");      throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");
# Line 612  DataLazy::resolveUnary(ValueType& v, siz Line 897  DataLazy::resolveUnary(ValueType& v, siz
897    
898    
899    
900    
901    /*
902      \brief Compute the value of the expression (unary operation) for the given sample.
903      \return Vector which stores the value of the subexpression for the given sample.
904      \param v A vector to store intermediate results.
905      \param offset Index in v to begin storing results.
906      \param sampleNo Sample number to evaluate.
907      \param roffset (output parameter) the offset in the return vector where the result begins.
908    
909      The return value will be an existing vector so do not deallocate it.
910      If the result is stored in v it should be stored at the offset given.
911      Everything from offset to the end of v should be considered available for this method to use.
912    */
913    DataTypes::ValueType*
914    DataLazy::resolveNP1OUT(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const
915    {
916        // we assume that any collapsing has been done before we get here
917        // since we only have one argument we don't need to think about only
918        // processing single points.
919      if (m_readytype!='E')
920      {
921        throw DataException("Programmer error - resolveNP1OUT should only be called on expanded Data.");
922      }
923        // since we can't write the result over the input, we need a result offset further along
924      size_t subroffset=roffset+m_samplesize;
925    LAZYDEBUG(cerr << "subroffset=" << subroffset << endl;)
926      const ValueType* vleft=m_left->resolveSample(v,offset+m_left->m_samplesize,sampleNo,subroffset);
927      roffset=offset;
928      size_t loop=0;
929      size_t numsteps=(m_readytype=='E')?getNumDPPSample():1;
930      size_t step=getNoValues();
931      switch (m_op)
932      {
933        case SYM:
934        for (loop=0;loop<numsteps;++loop)
935        {
936            DataMaths::symmetric(*vleft,m_left->getShape(),subroffset, v, getShape(), offset);
937            subroffset+=step;
938            offset+=step;
939        }
940        break;
941        case NSYM:
942        for (loop=0;loop<numsteps;++loop)
943        {
944            DataMaths::nonsymmetric(*vleft,m_left->getShape(),subroffset, v, getShape(), offset);
945            subroffset+=step;
946            offset+=step;
947        }
948        break;
949        default:
950        throw DataException("Programmer error - resolveNP1OUT can not resolve operator "+opToString(m_op)+".");
951      }
952      return &v;
953    }
954    
955    /*
956      \brief Compute the value of the expression (unary operation) for the given sample.
957      \return Vector which stores the value of the subexpression for the given sample.
958      \param v A vector to store intermediate results.
959      \param offset Index in v to begin storing results.
960      \param sampleNo Sample number to evaluate.
961      \param roffset (output parameter) the offset in the return vector where the result begins.
962    
963      The return value will be an existing vector so do not deallocate it.
964      If the result is stored in v it should be stored at the offset given.
965      Everything from offset to the end of v should be considered available for this method to use.
966    */
967    DataTypes::ValueType*
968    DataLazy::resolveNP1OUT_P(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const
969    {
970        // we assume that any collapsing has been done before we get here
971        // since we only have one argument we don't need to think about only
972        // processing single points.
973      if (m_readytype!='E')
974      {
975        throw DataException("Programmer error - resolveNP1OUT_P should only be called on expanded Data.");
976      }
977        // since we can't write the result over the input, we need a result offset further along
978      size_t subroffset;
979      const ValueType* vleft=m_left->resolveSample(v,offset+m_left->m_samplesize,sampleNo,subroffset);
980    LAZYDEBUG(cerr << "srcsamplesize=" << offset+m_left->m_samplesize << " beg=" << subroffset << endl;)
981    LAZYDEBUG(cerr << "Offset for 5800=" << getPointOffset(5800/getNumDPPSample(),5800%getNumDPPSample()) << endl;)
982      roffset=offset;
983      size_t loop=0;
984      size_t numsteps=(m_readytype=='E')?getNumDPPSample():1;
985      size_t outstep=getNoValues();
986      size_t instep=m_left->getNoValues();
987    LAZYDEBUG(cerr << "instep=" << instep << " outstep=" << outstep<< " numsteps=" << numsteps << endl;)
988      switch (m_op)
989      {
990        case TRACE:
991        for (loop=0;loop<numsteps;++loop)
992        {
993    size_t zz=sampleNo*getNumDPPSample()+loop;
994    if (zz==5800)
995    {
996    LAZYDEBUG(cerr << "point=" <<  zz<< endl;)
997    LAZYDEBUG(cerr << "Input to  trace=" << DataTypes::pointToString(*vleft,m_left->getShape(),subroffset,"") << endl;)
998    LAZYDEBUG(cerr << "Offset for point=" << getPointOffset(5800/getNumDPPSample(),5800%getNumDPPSample()) << " vs ";)
999    LAZYDEBUG(cerr << subroffset << endl;)
1000    LAZYDEBUG(cerr << "output=" << offset << endl;)
1001    }
1002                DataMaths::trace(*vleft,m_left->getShape(),subroffset, v ,getShape(),offset,m_axis_offset);
1003    if (zz==5800)
1004    {
1005    LAZYDEBUG(cerr << "Result of trace=" << DataTypes::pointToString(v,getShape(),offset,"") << endl;)
1006    }
1007            subroffset+=instep;
1008            offset+=outstep;
1009        }
1010        break;
1011        case TRANS:
1012        for (loop=0;loop<numsteps;++loop)
1013        {
1014                DataMaths::transpose(*vleft,m_left->getShape(),subroffset, v,getShape(),offset,m_axis_offset);
1015            subroffset+=instep;
1016            offset+=outstep;
1017        }
1018        break;
1019        default:
1020        throw DataException("Programmer error - resolveNP1OUTP can not resolve operator "+opToString(m_op)+".");
1021      }
1022      return &v;
1023    }
1024    
1025    
1026  #define PROC_OP(TYPE,X)                               \  #define PROC_OP(TYPE,X)                               \
1027      for (int i=0;i<steps;++i,resultp+=resultStep) \      for (int j=0;j<onumsteps;++j)\
1028      { \      {\
1029         tensor_binary_operation##TYPE(chunksize, &((*left)[lroffset]), &((*right)[rroffset]), resultp, X); \        for (int i=0;i<numsteps;++i,resultp+=resultStep) \
1030         lroffset+=leftStep; \        { \
1031         rroffset+=rightStep; \  LAZYDEBUG(cout << "[left,right]=[" << lroffset << "," << rroffset << "]" << endl;)\
1032    LAZYDEBUG(cout << "{left,right}={" << (*left)[lroffset] << "," << (*right)[rroffset] << "}\n";)\
1033             tensor_binary_operation< TYPE >(chunksize, &((*left)[lroffset]), &((*right)[rroffset]), resultp, X); \
1034    LAZYDEBUG(cout << " result=      " << resultp[0] << endl;) \
1035             lroffset+=leftstep; \
1036             rroffset+=rightstep; \
1037          }\
1038          lroffset+=oleftstep;\
1039          rroffset+=orightstep;\
1040      }      }
1041    
1042  /*  /*
# Line 644  DataLazy::resolveUnary(ValueType& v, siz Line 1063  DataLazy::resolveUnary(ValueType& v, siz
1063  DataTypes::ValueType*  DataTypes::ValueType*
1064  DataLazy::resolveBinary(ValueType& v,  size_t offset, int sampleNo, size_t& roffset) const  DataLazy::resolveBinary(ValueType& v,  size_t offset, int sampleNo, size_t& roffset) const
1065  {  {
1066  cout << "Resolve binary: " << toString() << endl;  LAZYDEBUG(cout << "Resolve binary: " << toString() << endl;)
1067    
1068    size_t lroffset=0, rroffset=0;    // offsets in the left and right result vectors    size_t lroffset=0, rroffset=0;    // offsets in the left and right result vectors
1069      // first work out which of the children are expanded      // first work out which of the children are expanded
1070    bool leftExp=(m_left->m_readytype=='E');    bool leftExp=(m_left->m_readytype=='E');
1071    bool rightExp=(m_right->m_readytype=='E');    bool rightExp=(m_right->m_readytype=='E');
1072    bool bigloops=((leftExp && rightExp) || (!leftExp && !rightExp)); // is processing in single step?    if (!leftExp && !rightExp)
1073    int steps=(bigloops?1:getNumDPPSample());    {
1074    size_t chunksize=(bigloops? m_samplesize : getNoValues());    // if bigloops, pretend the whole sample is a datapoint      throw DataException("Programmer Error - please use collapse if neither argument has type 'E'.");
1075    if (m_left->getRank()!=m_right->getRank())    // need to deal with scalar * ? ops    }
1076    {    bool leftScalar=(m_left->getRank()==0);
1077      EsysAssert((m_left->getRank()==0) || (m_right->getRank()==0), "Error - Ranks must match unless one is 0.");    bool rightScalar=(m_right->getRank()==0);
1078      steps=getNumDPPSample()*max(m_left->getNoValues(),m_right->getNoValues());    if ((m_left->getRank()!=m_right->getRank()) && (!leftScalar && !rightScalar))
1079      chunksize=1;    // for scalar    {
1080    }          throw DataException("resolveBinary - ranks of arguments must match unless one of them is scalar.");
1081    int leftStep=((leftExp && !rightExp)? m_right->getNoValues() : 0);    }
1082    int rightStep=((rightExp && !leftExp)? m_left->getNoValues() : 0);    size_t leftsize=m_left->getNoValues();
1083    int resultStep=max(leftStep,rightStep);   // only one (at most) should be !=0    size_t rightsize=m_right->getNoValues();
1084      size_t chunksize=1;           // how many doubles will be processed in one go
1085      int leftstep=0;       // how far should the left offset advance after each step
1086      int rightstep=0;
1087      int numsteps=0;       // total number of steps for the inner loop
1088      int oleftstep=0;  // the o variables refer to the outer loop
1089      int orightstep=0; // The outer loop is only required in cases where there is an extended scalar
1090      int onumsteps=1;
1091      
1092      bool LES=(leftExp && leftScalar); // Left is an expanded scalar
1093      bool RES=(rightExp && rightScalar);
1094      bool LS=(!leftExp && leftScalar); // left is a single scalar
1095      bool RS=(!rightExp && rightScalar);
1096      bool LN=(!leftExp && !leftScalar);    // left is a single non-scalar
1097      bool RN=(!rightExp && !rightScalar);
1098      bool LEN=(leftExp && !leftScalar);    // left is an expanded non-scalar
1099      bool REN=(rightExp && !rightScalar);
1100    
1101      if ((LES && RES) || (LEN && REN)) // both are Expanded scalars or both are expanded non-scalars
1102      {
1103        chunksize=m_left->getNumDPPSample()*leftsize;
1104        leftstep=0;
1105        rightstep=0;
1106        numsteps=1;
1107      }
1108      else if (LES || RES)
1109      {
1110        chunksize=1;
1111        if (LES)        // left is an expanded scalar
1112        {
1113            if (RS)
1114            {
1115               leftstep=1;
1116               rightstep=0;
1117               numsteps=m_left->getNumDPPSample();
1118            }
1119            else        // RN or REN
1120            {
1121               leftstep=0;
1122               oleftstep=1;
1123               rightstep=1;
1124               orightstep=(RN?-rightsize:0);
1125               numsteps=rightsize;
1126               onumsteps=m_left->getNumDPPSample();
1127            }
1128        }
1129        else        // right is an expanded scalar
1130        {
1131            if (LS)
1132            {
1133               rightstep=1;
1134               leftstep=0;
1135               numsteps=m_right->getNumDPPSample();
1136            }
1137            else
1138            {
1139               rightstep=0;
1140               orightstep=1;
1141               leftstep=1;
1142               oleftstep=(LN?-leftsize:0);
1143               numsteps=leftsize;
1144               onumsteps=m_right->getNumDPPSample();
1145            }
1146        }
1147      }
1148      else  // this leaves (LEN, RS), (LEN, RN) and their transposes
1149      {
1150        if (LEN)    // and Right will be a single value
1151        {
1152            chunksize=rightsize;
1153            leftstep=rightsize;
1154            rightstep=0;
1155            numsteps=m_left->getNumDPPSample();
1156            if (RS)
1157            {
1158               numsteps*=leftsize;
1159            }
1160        }
1161        else    // REN
1162        {
1163            chunksize=leftsize;
1164            rightstep=leftsize;
1165            leftstep=0;
1166            numsteps=m_right->getNumDPPSample();
1167            if (LS)
1168            {
1169               numsteps*=rightsize;
1170            }
1171        }
1172      }
1173    
1174      int resultStep=max(leftstep,rightstep);   // only one (at most) should be !=0
1175      // Get the values of sub-expressions      // Get the values of sub-expressions
1176    const ValueType* left=m_left->resolveSample(v,offset,sampleNo,lroffset);    const ValueType* left=m_left->resolveSample(v,offset+getMaxSampleSize(),sampleNo,lroffset);   // see note on
1177    const ValueType* right=m_right->resolveSample(v,offset+m_samplesize,sampleNo,rroffset); // Note      // calcBufss for why we can't put offset as the 2nd param above
1178      const ValueType* right=m_right->resolveSample(v,offset+2*getMaxSampleSize(),sampleNo,rroffset); // Note
1179      // the right child starts further along.      // the right child starts further along.
1180    LAZYDEBUG(cout << "Post sub calls in " << toString() << endl;)
1181    LAZYDEBUG(cout << "shapes=" << DataTypes::shapeToString(m_left->getShape()) << "," << DataTypes::shapeToString(m_right->getShape()) << endl;)
1182    LAZYDEBUG(cout << "chunksize=" << chunksize << endl << "leftstep=" << leftstep << " rightstep=" << rightstep;)
1183    LAZYDEBUG(cout << " numsteps=" << numsteps << endl << "oleftstep=" << oleftstep << " orightstep=" << orightstep;)
1184    LAZYDEBUG(cout << "onumsteps=" << onumsteps << endl;)
1185    LAZYDEBUG(cout << " DPPS=" << m_left->getNumDPPSample() << "," <<m_right->getNumDPPSample() << endl;)
1186    LAZYDEBUG(cout << "" << LS << RS << LN << RN << LES << RES <<LEN << REN <<   endl;)
1187    double* resultp=&(v[offset]);     // results are stored at the vector offset we recieved    double* resultp=&(v[offset]);     // results are stored at the vector offset we recieved
1188    switch(m_op)    switch(m_op)
1189    {    {
1190      case ADD:      case ADD:
1191          PROC_OP(/**/,plus<double>());          PROC_OP(NO_ARG,plus<double>());
1192      break;      break;
1193      case SUB:      case SUB:
1194      PROC_OP(/**/,minus<double>());      PROC_OP(NO_ARG,minus<double>());
1195      break;      break;
1196      case MUL:      case MUL:
1197      PROC_OP(/**/,multiplies<double>());      PROC_OP(NO_ARG,multiplies<double>());
1198      break;      break;
1199      case DIV:      case DIV:
1200      PROC_OP(/**/,divides<double>());      PROC_OP(NO_ARG,divides<double>());
1201      break;      break;
1202      case POW:      case POW:
1203         PROC_OP(<double (double,double)>,::pow);         PROC_OP(double (double,double),::pow);
1204      break;      break;
1205      default:      default:
1206      throw DataException("Programmer error - resolveBinary can not resolve operator "+opToString(m_op)+".");      throw DataException("Programmer error - resolveBinary can not resolve operator "+opToString(m_op)+".");
# Line 694  cout << "Resolve binary: " << toString() Line 1212  cout << "Resolve binary: " << toString()
1212    
1213    
1214  /*  /*
1215      \brief Compute the value of the expression (tensor product) for the given sample.
1216      \return Vector which stores the value of the subexpression for the given sample.
1217      \param v A vector to store intermediate results.
1218      \param offset Index in v to begin storing results.
1219      \param sampleNo Sample number to evaluate.
1220      \param roffset (output parameter) the offset in the return vector where the result begins.
1221    
1222      The return value will be an existing vector so do not deallocate it.
1223      If the result is stored in v it should be stored at the offset given.
1224      Everything from offset to the end of v should be considered available for this method to use.
1225    */
1226    // This method assumes that any subexpressions which evaluate to Constant or Tagged Data
1227    // have already been collapsed to IDENTITY. So we must have at least one expanded child.
1228    // unlike the other resolve helpers, we must treat these datapoints separately.
1229    DataTypes::ValueType*
1230    DataLazy::resolveTProd(ValueType& v,  size_t offset, int sampleNo, size_t& roffset) const
1231    {
1232    LAZYDEBUG(cout << "Resolve TensorProduct: " << toString() << endl;)
1233    
1234      size_t lroffset=0, rroffset=0;    // offsets in the left and right result vectors
1235        // first work out which of the children are expanded
1236      bool leftExp=(m_left->m_readytype=='E');
1237      bool rightExp=(m_right->m_readytype=='E');
1238      int steps=getNumDPPSample();
1239      int leftStep=((leftExp && !rightExp)? m_right->getNoValues() : 0);
1240      int rightStep=((rightExp && !leftExp)? m_left->getNoValues() : 0);
1241      int resultStep=max(leftStep,rightStep);   // only one (at most) should be !=0
1242        // Get the values of sub-expressions (leave a gap of one sample for the result).
1243      int gap=offset+m_left->getMaxSampleSize();    // actually only needs to be m_left->m_samplesize
1244      const ValueType* left=m_left->resolveSample(v,gap,sampleNo,lroffset);
1245      gap+=m_right->getMaxSampleSize();
1246      const ValueType* right=m_right->resolveSample(v,gap,sampleNo,rroffset);
1247    LAZYDEBUG(cout << "Post sub calls: " << toString() << endl;)
1248    LAZYDEBUG(cout << "LeftExp=" << leftExp << " rightExp=" << rightExp << endl;)
1249    LAZYDEBUG(cout << "LeftR=" << m_left->getRank() << " rightExp=" << m_right->getRank() << endl;)
1250    LAZYDEBUG(cout << "LeftSize=" << m_left->getNoValues() << " RightSize=" << m_right->getNoValues() << endl;)
1251    LAZYDEBUG(cout << "m_samplesize=" << m_samplesize << endl;)
1252    LAZYDEBUG(cout << "outputshape=" << DataTypes::shapeToString(getShape()) << endl;)
1253      double* resultp=&(v[offset]);     // results are stored at the vector offset we recieved
1254      switch(m_op)
1255      {
1256        case PROD:
1257        for (int i=0;i<steps;++i,resultp+=resultStep)
1258        {
1259    LAZYDEBUG(cout << "lroffset=" << lroffset << "rroffset=" << rroffset << endl;)
1260    LAZYDEBUG(cout << "l*=" << left << " r*=" << right << endl;)
1261    LAZYDEBUG(cout << "m_SL=" << m_SL << " m_SM=" << m_SM << " m_SR=" << m_SR << endl;)
1262              const double *ptr_0 = &((*left)[lroffset]);
1263              const double *ptr_1 = &((*right)[rroffset]);
1264    LAZYDEBUG(cout << DataTypes::pointToString(*left, m_left->getShape(),lroffset,"LEFT") << endl;)
1265    LAZYDEBUG(cout << DataTypes::pointToString(*right,m_right->getShape(),rroffset, "RIGHT") << endl;)
1266              matrix_matrix_product(m_SL, m_SM, m_SR, ptr_0, ptr_1, resultp, m_transpose);
1267          lroffset+=leftStep;
1268          rroffset+=rightStep;
1269        }
1270        break;
1271        default:
1272        throw DataException("Programmer error - resolveTProduct can not resolve operator "+opToString(m_op)+".");
1273      }
1274      roffset=offset;
1275      return &v;
1276    }
1277    
1278    
1279    
1280    /*
1281    \brief Compute the value of the expression for the given sample.    \brief Compute the value of the expression for the given sample.
1282    \return Vector which stores the value of the subexpression for the given sample.    \return Vector which stores the value of the subexpression for the given sample.
1283    \param v A vector to store intermediate results.    \param v A vector to store intermediate results.
# Line 712  cout << "Resolve binary: " << toString() Line 1296  cout << "Resolve binary: " << toString()
1296  const DataTypes::ValueType*  const DataTypes::ValueType*
1297  DataLazy::resolveSample(ValueType& v, size_t offset, int sampleNo, size_t& roffset)  DataLazy::resolveSample(ValueType& v, size_t offset, int sampleNo, size_t& roffset)
1298  {  {
1299  cout << "Resolve sample " << toString() << endl;  LAZYDEBUG(cout << "Resolve sample " << toString() << endl;)
1300      // collapse so we have a 'E' node or an IDENTITY for some other type      // collapse so we have a 'E' node or an IDENTITY for some other type
1301    if (m_readytype!='E' && m_op!=IDENTITY)    if (m_readytype!='E' && m_op!=IDENTITY)
1302    {    {
# Line 724  cout << "Resolve sample " << toString() Line 1308  cout << "Resolve sample " << toString()
1308      if (m_readytype=='C')      if (m_readytype=='C')
1309      {      {
1310      roffset=0;      roffset=0;
1311    LAZYDEBUG(cout << "Finish  sample " << toString() << endl;)
1312      return &(vec);      return &(vec);
1313      }      }
1314      roffset=m_id->getPointOffset(sampleNo, 0);      roffset=m_id->getPointOffset(sampleNo, 0);
1315    LAZYDEBUG(cout << "Finish  sample " << toString() << endl;)
1316      return &(vec);      return &(vec);
1317    }    }
1318    if (m_readytype!='E')    if (m_readytype!='E')
# Line 735  cout << "Resolve sample " << toString() Line 1321  cout << "Resolve sample " << toString()
1321    }    }
1322    switch (getOpgroup(m_op))    switch (getOpgroup(m_op))
1323    {    {
1324    case G_UNARY: return resolveUnary(v, offset,sampleNo,roffset);    case G_UNARY:
1325      case G_UNARY_P: return resolveUnary(v, offset,sampleNo,roffset);
1326    case G_BINARY: return resolveBinary(v, offset,sampleNo,roffset);    case G_BINARY: return resolveBinary(v, offset,sampleNo,roffset);
1327      case G_NP1OUT: return resolveNP1OUT(v, offset, sampleNo,roffset);
1328      case G_NP1OUT_P: return resolveNP1OUT_P(v, offset, sampleNo,roffset);
1329      case G_TENSORPROD: return resolveTProd(v,offset, sampleNo,roffset);
1330    default:    default:
1331      throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");      throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");
1332    }    }
1333    
1334  }  }
1335    
1336    
# Line 749  DataReady_ptr Line 1340  DataReady_ptr
1340  DataLazy::resolve()  DataLazy::resolve()
1341  {  {
1342    
1343  cout << "Sample size=" << m_samplesize << endl;  LAZYDEBUG(cout << "Sample size=" << m_samplesize << endl;)
1344  cout << "Buffers=" << m_buffsRequired << endl;  LAZYDEBUG(cout << "Buffers=" << m_buffsRequired << endl;)
1345    
1346    if (m_readytype!='E')     // if the whole sub-expression is Constant or Tagged, then evaluate it normally    if (m_readytype!='E')     // if the whole sub-expression is Constant or Tagged, then evaluate it normally
1347    {    {
# Line 761  cout << "Buffers=" << m_buffsRequired << Line 1352  cout << "Buffers=" << m_buffsRequired <<
1352      return m_id;      return m_id;
1353    }    }
1354      // from this point on we must have m_op!=IDENTITY and m_readytype=='E'      // from this point on we must have m_op!=IDENTITY and m_readytype=='E'
1355    size_t threadbuffersize=m_samplesize*(max(1,m_buffsRequired));    // Each thread needs to have enough    size_t threadbuffersize=m_maxsamplesize*(max(1,m_buffsRequired)); // Each thread needs to have enough
1356      // storage to evaluate its expression      // storage to evaluate its expression
1357    int numthreads=1;    int numthreads=1;
1358  #ifdef _OPENMP  #ifdef _OPENMP
1359    numthreads=getNumberOfThreads();    numthreads=getNumberOfThreads();
   int threadnum=0;  
1360  #endif  #endif
1361    ValueType v(numthreads*threadbuffersize);    ValueType v(numthreads*threadbuffersize);
1362  cout << "Buffer created with size=" << v.size() << endl;  LAZYDEBUG(cout << "Buffer created with size=" << v.size() << endl;)
1363    DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(),  ValueType(getNoValues()));    DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(),  ValueType(getNoValues()));
1364    ValueType& resvec=result->getVector();    ValueType& resvec=result->getVector();
1365    DataReady_ptr resptr=DataReady_ptr(result);    DataReady_ptr resptr=DataReady_ptr(result);
# Line 778  cout << "Buffer created with size=" << v Line 1368  cout << "Buffer created with size=" << v
1368    int totalsamples=getNumSamples();    int totalsamples=getNumSamples();
1369    const ValueType* res=0;   // Vector storing the answer    const ValueType* res=0;   // Vector storing the answer
1370    size_t resoffset=0;       // where in the vector to find the answer    size_t resoffset=0;       // where in the vector to find the answer
1371    #pragma omp parallel for private(sample,resoffset,outoffset,threadnum,res) schedule(static)  LAZYDEBUG(cout << "Total number of samples=" <<totalsamples << endl;)
1372      #pragma omp parallel for private(sample,resoffset,outoffset,res) schedule(static)
1373    for (sample=0;sample<totalsamples;++sample)    for (sample=0;sample<totalsamples;++sample)
1374    {    {
1375  cout << "################################# " << sample << endl;        if (sample==0)  {ENABLEDEBUG}
1376    
1377    //       if (sample==5800/getNumDPPSample())  {ENABLEDEBUG}
1378    LAZYDEBUG(cout << "################################# " << sample << endl;)
1379  #ifdef _OPENMP  #ifdef _OPENMP
1380      res=resolveSample(v,threadbuffersize*omp_get_thread_num(),sample,resoffset);      res=resolveSample(v,threadbuffersize*omp_get_thread_num(),sample,resoffset);
1381  #else  #else
1382      res=resolveSample(v,0,sample,resoffset);   // res would normally be v, but not if its a single IDENTITY op.      res=resolveSample(v,0,sample,resoffset);   // res would normally be v, but not if its a single IDENTITY op.
1383  #endif  #endif
1384  cerr << "-------------------------------- " << endl;  LAZYDEBUG(cerr << "-------------------------------- " << endl;)
1385    LAZYDEBUG(cerr<< "Copying sample#" << sample << endl;)
1386      outoffset=result->getPointOffset(sample,0);      outoffset=result->getPointOffset(sample,0);
1387  cerr << "offset=" << outoffset << endl;  LAZYDEBUG(cerr << "offset=" << outoffset << " from offset=" << resoffset << " " << m_samplesize << " doubles" << endl;)
1388      for (unsigned int i=0;i<m_samplesize;++i,++outoffset,++resoffset)   // copy values into the output vector      for (unsigned int i=0;i<m_samplesize;++i,++outoffset,++resoffset)   // copy values into the output vector
1389      {      {
1390    LAZYDEBUG(cerr << "outoffset=" << outoffset << " resoffset=" << resoffset << " " << (*res)[resoffset]<< endl;)
1391      resvec[outoffset]=(*res)[resoffset];      resvec[outoffset]=(*res)[resoffset];
1392      }      }
1393  cerr << "*********************************" << endl;  LAZYDEBUG(cerr << DataTypes::pointToString(resvec,getShape(),outoffset-m_samplesize+DataTypes::noValues(getShape()),"Final result:") << endl;)
1394    LAZYDEBUG(cerr << "*********************************" << endl;)
1395        DISABLEDEBUG
1396    }    }
1397    return resptr;    return resptr;
1398  }  }
# Line 841  DataLazy::intoString(ostringstream& oss) Line 1439  DataLazy::intoString(ostringstream& oss)
1439      oss << ')';      oss << ')';
1440      break;      break;
1441    case G_UNARY:    case G_UNARY:
1442      case G_UNARY_P:
1443      case G_NP1OUT:
1444      case G_NP1OUT_P:
1445      oss << opToString(m_op) << '(';      oss << opToString(m_op) << '(';
1446      m_left->intoString(oss);      m_left->intoString(oss);
1447      oss << ')';      oss << ')';
1448      break;      break;
1449      case G_TENSORPROD:
1450        oss << opToString(m_op) << '(';
1451        m_left->intoString(oss);
1452        oss << ", ";
1453        m_right->intoString(oss);
1454        oss << ')';
1455        break;
1456    default:    default:
1457      oss << "UNKNOWN";      oss << "UNKNOWN";
1458    }    }
# Line 858  DataLazy::deepCopy() Line 1466  DataLazy::deepCopy()
1466    case G_IDENTITY:  return new DataLazy(m_id->deepCopy()->getPtr());    case G_IDENTITY:  return new DataLazy(m_id->deepCopy()->getPtr());
1467    case G_UNARY: return new DataLazy(m_left->deepCopy()->getPtr(),m_op);    case G_UNARY: return new DataLazy(m_left->deepCopy()->getPtr(),m_op);
1468    case G_BINARY:    return new DataLazy(m_left->deepCopy()->getPtr(),m_right->deepCopy()->getPtr(),m_op);    case G_BINARY:    return new DataLazy(m_left->deepCopy()->getPtr(),m_right->deepCopy()->getPtr(),m_op);
1469      case G_NP1OUT: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(),m_op);
1470      case G_TENSORPROD: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(), m_op, m_axis_offset, m_transpose);
1471    default:    default:
1472      throw DataException("Programmer error - do not know how to deepcopy operator "+opToString(m_op)+".");      throw DataException("Programmer error - do not know how to deepcopy operator "+opToString(m_op)+".");
1473    }    }
1474  }  }
1475    
1476    
1477    // There is no single, natural interpretation of getLength on DataLazy.
1478    // Instances of DataReady can look at the size of their vectors.
1479    // For lazy though, it could be the size the data would be if it were resolved;
1480    // or it could be some function of the lengths of the DataReady instances which
1481    // form part of the expression.
1482    // Rather than have people making assumptions, I have disabled the method.
1483  DataTypes::ValueType::size_type  DataTypes::ValueType::size_type
1484  DataLazy::getLength() const  DataLazy::getLength() const
1485  {  {
1486    return m_length;    throw DataException("getLength() does not make sense for lazy data.");
1487  }  }
1488    
1489    

Legend:
Removed from v.1993  
changed lines
  Added in v.2166

  ViewVC Help
Powered by ViewVC 1.1.26