/[escript]/branches/clazy/escriptcore/src/DataLazy.cpp
ViewVC logotype

Diff of /branches/clazy/escriptcore/src/DataLazy.cpp

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 2066 by jfenwick, Thu Nov 20 05:31:33 2008 UTC revision 2157 by jfenwick, Mon Dec 15 06:05:58 2008 UTC
# Line 28  Line 28 
28  #include "UnaryFuncs.h"     // for escript::fsign  #include "UnaryFuncs.h"     // for escript::fsign
29  #include "Utils.h"  #include "Utils.h"
30    
31    // #define LAZYDEBUG(X) if (privdebug){X;}
32    #define LAZYDEBUG(X)
33    namespace
34    {
35    bool privdebug=false;
36    
37    #define ENABLEDEBUG privdebug=true;
38    #define DISABLEDEBUG privdebug=false;
39    }
40    
41  /*  /*
42  How does DataLazy work?  How does DataLazy work?
43  ~~~~~~~~~~~~~~~~~~~~~~~  ~~~~~~~~~~~~~~~~~~~~~~~
# Line 70  The convention that I use, is that the r Line 80  The convention that I use, is that the r
80  For expressions which evaluate to Constant or Tagged, there is a different evaluation method.  For expressions which evaluate to Constant or Tagged, there is a different evaluation method.
81  The collapse method invokes the (non-lazy) operations on the Data class to evaluate the expression.  The collapse method invokes the (non-lazy) operations on the Data class to evaluate the expression.
82    
83  To add a new operator you need to do the following (plus anything I might have forgotten):  To add a new operator you need to do the following (plus anything I might have forgotten - adding a new group for example):
84  1) Add to the ES_optype.  1) Add to the ES_optype.
85  2) determine what opgroup your operation belongs to (X)  2) determine what opgroup your operation belongs to (X)
86  3) add a string for the op to the end of ES_opstrings  3) add a string for the op to the end of ES_opstrings
# Line 96  enum ES_opgroup Line 106  enum ES_opgroup
106     G_IDENTITY,     G_IDENTITY,
107     G_BINARY,        // pointwise operations with two arguments     G_BINARY,        // pointwise operations with two arguments
108     G_UNARY,     // pointwise operations with one argument     G_UNARY,     // pointwise operations with one argument
109       G_UNARY_P,       // pointwise operations with one argument, requiring a parameter
110     G_NP1OUT,        // non-pointwise op with one output     G_NP1OUT,        // non-pointwise op with one output
111       G_NP1OUT_P,      // non-pointwise op with one output requiring a parameter
112     G_TENSORPROD     // general tensor product     G_TENSORPROD     // general tensor product
113  };  };
114    
# Line 108  string ES_opstrings[]={"UNKNOWN","IDENTI Line 120  string ES_opstrings[]={"UNKNOWN","IDENTI
120              "asin","acos","atan","sinh","cosh","tanh","erf",              "asin","acos","atan","sinh","cosh","tanh","erf",
121              "asinh","acosh","atanh",              "asinh","acosh","atanh",
122              "log10","log","sign","abs","neg","pos","exp","sqrt",              "log10","log","sign","abs","neg","pos","exp","sqrt",
123              "1/","where>0","where<0","where>=0","where<=0",              "1/","where>0","where<0","where>=0","where<=0", "where<>0","where=0",
124              "symmetric","nonsymmetric",              "symmetric","nonsymmetric",
125              "prod"};              "prod",
126  int ES_opcount=36;              "transpose", "trace"};
127    int ES_opcount=40;
128  ES_opgroup opgroups[]={G_UNKNOWN,G_IDENTITY,G_BINARY,G_BINARY,G_BINARY,G_BINARY, G_BINARY,  ES_opgroup opgroups[]={G_UNKNOWN,G_IDENTITY,G_BINARY,G_BINARY,G_BINARY,G_BINARY, G_BINARY,
129              G_UNARY,G_UNARY,G_UNARY, //10              G_UNARY,G_UNARY,G_UNARY, //10
130              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,    // 17              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,    // 17
131              G_UNARY,G_UNARY,G_UNARY,                    // 20              G_UNARY,G_UNARY,G_UNARY,                    // 20
132              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,        // 28              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,    // 28
133              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,            // 33              G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY, G_UNARY_P, G_UNARY_P,      // 35
134              G_NP1OUT,G_NP1OUT,              G_NP1OUT,G_NP1OUT,
135              G_TENSORPROD};              G_TENSORPROD,
136                G_NP1OUT_P, G_NP1OUT_P};
137  inline  inline
138  ES_opgroup  ES_opgroup
139  getOpgroup(ES_optype op)  getOpgroup(ES_optype op)
# Line 177  resultShape(DataAbstract_ptr left, DataA Line 191  resultShape(DataAbstract_ptr left, DataA
191      return left->getShape();      return left->getShape();
192  }  }
193    
194    // return the shape for "op left"
195    
196    DataTypes::ShapeType
197    resultShape(DataAbstract_ptr left, ES_optype op)
198    {
199        switch(op)
200        {
201            case TRANS:
202            return left->getShape();
203        break;
204        case TRACE:
205            return DataTypes::scalarShape;
206        break;
207            default:
208        throw DataException("Programmer error - resultShape(left,op) can't compute shapes for operator "+opToString(op)+".");
209        }
210    }
211    
212  // determine the output shape for the general tensor product operation  // determine the output shape for the general tensor product operation
213  // the additional parameters return information required later for the product  // the additional parameters return information required later for the product
214  // the majority of this code is copy pasted from C_General_Tensor_Product  // the majority of this code is copy pasted from C_General_Tensor_Product
# Line 197  GTPShape(DataAbstract_ptr left, DataAbst Line 229  GTPShape(DataAbstract_ptr left, DataAbst
229    else if (transpose == 2)  { start1 = rank1-axis_offset; }    else if (transpose == 2)  { start1 = rank1-axis_offset; }
230    else              { throw DataException("DataLazy GeneralTensorProduct Constructor: Error - transpose should be 0, 1 or 2"); }    else              { throw DataException("DataLazy GeneralTensorProduct Constructor: Error - transpose should be 0, 1 or 2"); }
231    
232      if (rank0<axis_offset)
233      {
234        throw DataException("DataLazy GeneralTensorProduct Constructor: Error - rank of left < axisoffset");
235      }
236    
237    // Adjust the shapes for transpose    // Adjust the shapes for transpose
238    DataTypes::ShapeType tmpShape0(rank0);    // pre-sizing the vectors rather    DataTypes::ShapeType tmpShape0(rank0);    // pre-sizing the vectors rather
# Line 226  GTPShape(DataAbstract_ptr left, DataAbst Line 262  GTPShape(DataAbstract_ptr left, DataAbst
262       for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z       for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z
263       for (int i=axis_offset; i<rank1; i++, ++out_index)   { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z       for (int i=axis_offset; i<rank1; i++, ++out_index)   { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z
264    }    }
265    
266      if (shape2.size()>ESCRIPT_MAX_DATA_RANK)
267      {
268         ostringstream os;
269         os << "C_GeneralTensorProduct: Error - Attempt to create a rank " << shape2.size() << " object. The maximum rank is " << ESCRIPT_MAX_DATA_RANK << ".";
270         throw DataException(os.str());
271      }
272    
273    return shape2;    return shape2;
274  }  }
275    
# Line 248  GTPShape(DataAbstract_ptr left, DataAbst Line 292  GTPShape(DataAbstract_ptr left, DataAbst
292  // determine the number of samples requires to evaluate an expression combining left and right  // determine the number of samples requires to evaluate an expression combining left and right
293  // NP1OUT needs an extra buffer because we can't write the answers over the top of the input.  // NP1OUT needs an extra buffer because we can't write the answers over the top of the input.
294  // The same goes for G_TENSORPROD  // The same goes for G_TENSORPROD
295    // It might seem that pointwise binary ops (G_BINARY) could be written over the top of the lefths.
296    // This would be true were it not for the possibility that the LHS could be a scalar which needs to be examined
297    // multiple times
298  int  int
299  calcBuffs(const DataLazy_ptr& left, const DataLazy_ptr& right, ES_optype op)  calcBuffs(const DataLazy_ptr& left, const DataLazy_ptr& right, ES_optype op)
300  {  {
301     switch(getOpgroup(op))     switch(getOpgroup(op))
302     {     {
303     case G_IDENTITY: return 1;     case G_IDENTITY: return 1;
304     case G_BINARY: return max(left->getBuffsRequired(),right->getBuffsRequired()+1);     case G_BINARY: return 1+max(left->getBuffsRequired(),right->getBuffsRequired()+1);
305     case G_UNARY: return max(left->getBuffsRequired(),1);     case G_UNARY:
306       case G_UNARY_P: return max(left->getBuffsRequired(),1);
307     case G_NP1OUT: return 1+max(left->getBuffsRequired(),1);     case G_NP1OUT: return 1+max(left->getBuffsRequired(),1);
308       case G_NP1OUT_P: return 1+max(left->getBuffsRequired(),1);
309     case G_TENSORPROD: return 1+max(left->getBuffsRequired(),right->getBuffsRequired()+1);     case G_TENSORPROD: return 1+max(left->getBuffsRequired(),right->getBuffsRequired()+1);
310     default:     default:
311      throw DataException("Programmer Error - attempt to calcBuffs() for operator "+opToString(op)+".");      throw DataException("Programmer Error - attempt to calcBuffs() for operator "+opToString(op)+".");
# Line 305  DataLazy::DataLazy(DataAbstract_ptr p) Line 354  DataLazy::DataLazy(DataAbstract_ptr p)
354     m_buffsRequired=1;     m_buffsRequired=1;
355     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
356     m_maxsamplesize=m_samplesize;     m_maxsamplesize=m_samplesize;
357  cout << "(1)Lazy created with " << m_samplesize << endl;  LAZYDEBUG(cout << "(1)Lazy created with " << m_samplesize << endl;)
358  }  }
359    
360    
# Line 398  DataLazy::DataLazy(DataAbstract_ptr left Line 447  DataLazy::DataLazy(DataAbstract_ptr left
447     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
448     m_maxsamplesize=max(max(m_samplesize,m_right->getMaxSampleSize()),m_left->getMaxSampleSize());       m_maxsamplesize=max(max(m_samplesize,m_right->getMaxSampleSize()),m_left->getMaxSampleSize());  
449     m_buffsRequired=calcBuffs(m_left, m_right,m_op);     m_buffsRequired=calcBuffs(m_left, m_right,m_op);
450  cout << "(3)Lazy created with " << m_samplesize << endl;  LAZYDEBUG(cout << "(3)Lazy created with " << m_samplesize << endl;)
451  }  }
452    
453  DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op, int axis_offset, int transpose)  DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op, int axis_offset, int transpose)
# Line 462  DataLazy::DataLazy(DataAbstract_ptr left Line 511  DataLazy::DataLazy(DataAbstract_ptr left
511     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
512     m_maxsamplesize=max(max(m_samplesize,m_right->getMaxSampleSize()),m_left->getMaxSampleSize());       m_maxsamplesize=max(max(m_samplesize,m_right->getMaxSampleSize()),m_left->getMaxSampleSize());  
513     m_buffsRequired=calcBuffs(m_left, m_right,m_op);     m_buffsRequired=calcBuffs(m_left, m_right,m_op);
514  cout << "(4)Lazy created with " << m_samplesize << endl;  LAZYDEBUG(cout << "(4)Lazy created with " << m_samplesize << endl;)
515  }  }
516    
517    
518    DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, int axis_offset)
519        : parent(left->getFunctionSpace(), resultShape(left,op)),
520        m_op(op),
521        m_axis_offset(axis_offset),
522        m_transpose(0),
523        m_tol(0)
524    {
525       if ((getOpgroup(op)!=G_NP1OUT_P))
526       {
527        throw DataException("Programmer error - constructor DataLazy(left, op, ax) will only process UNARY operations which require parameters.");
528       }
529       DataLazy_ptr lleft;
530       if (!left->isLazy())
531       {
532        lleft=DataLazy_ptr(new DataLazy(left));
533       }
534       else
535       {
536        lleft=dynamic_pointer_cast<DataLazy>(left);
537       }
538       m_readytype=lleft->m_readytype;
539       m_left=lleft;
540       m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point
541       m_samplesize=getNumDPPSample()*getNoValues();
542       m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());
543    LAZYDEBUG(cout << "(5)Lazy created with " << m_samplesize << endl;)
544    }
545    
546    DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, double tol)
547        : parent(left->getFunctionSpace(), left->getShape()),
548        m_op(op),
549        m_axis_offset(0),
550        m_transpose(0),
551        m_tol(tol)
552    {
553       if ((getOpgroup(op)!=G_UNARY_P))
554       {
555        throw DataException("Programmer error - constructor DataLazy(left, op, tol) will only process UNARY operations which require parameters.");
556       }
557       DataLazy_ptr lleft;
558       if (!left->isLazy())
559       {
560        lleft=DataLazy_ptr(new DataLazy(left));
561       }
562       else
563       {
564        lleft=dynamic_pointer_cast<DataLazy>(left);
565       }
566       m_readytype=lleft->m_readytype;
567       m_left=lleft;
568       m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point
569       m_samplesize=getNumDPPSample()*getNoValues();
570       m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());
571    LAZYDEBUG(cout << "(6)Lazy created with " << m_samplesize << endl;)
572    }
573    
574  DataLazy::~DataLazy()  DataLazy::~DataLazy()
575  {  {
576  }  }
# Line 602  DataLazy::collapseToReady() Line 707  DataLazy::collapseToReady()
707      case LEZ:      case LEZ:
708      result=left.whereNonPositive();      result=left.whereNonPositive();
709      break;      break;
710        case NEZ:
711        result=left.whereNonZero(m_tol);
712        break;
713        case EZ:
714        result=left.whereZero(m_tol);
715        break;
716      case SYM:      case SYM:
717      result=left.symmetric();      result=left.symmetric();
718      break;      break;
# Line 611  DataLazy::collapseToReady() Line 722  DataLazy::collapseToReady()
722      case PROD:      case PROD:
723      result=C_GeneralTensorProduct(left,right,m_axis_offset, m_transpose);      result=C_GeneralTensorProduct(left,right,m_axis_offset, m_transpose);
724      break;      break;
725        case TRANS:
726        result=left.transpose(m_axis_offset);
727        break;
728        case TRACE:
729        result=left.trace(m_axis_offset);
730        break;
731      default:      default:
732      throw DataException("Programmer error - collapseToReady does not know how to resolve operator "+opToString(m_op)+".");      throw DataException("Programmer error - collapseToReady does not know how to resolve operator "+opToString(m_op)+".");
733    }    }
# Line 762  DataLazy::resolveUnary(ValueType& v, siz Line 879  DataLazy::resolveUnary(ValueType& v, siz
879      case LEZ:      case LEZ:
880      tensor_unary_operation(m_samplesize, left, result, bind2nd(less_equal<double>(),0.0));      tensor_unary_operation(m_samplesize, left, result, bind2nd(less_equal<double>(),0.0));
881      break;      break;
882    // There are actually G_UNARY_P but I don't see a compelling reason to treat them differently
883        case NEZ:
884        tensor_unary_operation(m_samplesize, left, result, bind2nd(AbsGT(),m_tol));
885        break;
886        case EZ:
887        tensor_unary_operation(m_samplesize, left, result, bind2nd(AbsLTE(),m_tol));
888        break;
889    
890      default:      default:
891      throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");      throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");
# Line 770  DataLazy::resolveUnary(ValueType& v, siz Line 894  DataLazy::resolveUnary(ValueType& v, siz
894  }  }
895    
896    
897    
898    
899    
900    
901  /*  /*
902    \brief Compute the value of the expression (unary operation) for the given sample.    \brief Compute the value of the expression (unary operation) for the given sample.
903    \return Vector which stores the value of the subexpression for the given sample.    \return Vector which stores the value of the subexpression for the given sample.
# Line 794  DataLazy::resolveNP1OUT(ValueType& v, si Line 922  DataLazy::resolveNP1OUT(ValueType& v, si
922    }    }
923      // since we can't write the result over the input, we need a result offset further along      // since we can't write the result over the input, we need a result offset further along
924    size_t subroffset=roffset+m_samplesize;    size_t subroffset=roffset+m_samplesize;
925    const ValueType* vleft=m_left->resolveSample(v,offset,sampleNo,subroffset);  LAZYDEBUG(cerr << "subroffset=" << subroffset << endl;)
926      const ValueType* vleft=m_left->resolveSample(v,offset+m_left->m_samplesize,sampleNo,subroffset);
927    roffset=offset;    roffset=offset;
928      size_t loop=0;
929      size_t numsteps=(m_readytype=='E')?getNumDPPSample():1;
930      size_t step=getNoValues();
931    switch (m_op)    switch (m_op)
932    {    {
933      case SYM:      case SYM:
934      DataMaths::symmetric(*vleft,m_left->getShape(),subroffset, v, getShape(), offset);      for (loop=0;loop<numsteps;++loop)
935        {
936            DataMaths::symmetric(*vleft,m_left->getShape(),subroffset, v, getShape(), offset);
937            subroffset+=step;
938            offset+=step;
939        }
940      break;      break;
941      case NSYM:      case NSYM:
942      DataMaths::nonsymmetric(*vleft,m_left->getShape(),subroffset, v, getShape(), offset);      for (loop=0;loop<numsteps;++loop)
943        {
944            DataMaths::nonsymmetric(*vleft,m_left->getShape(),subroffset, v, getShape(), offset);
945            subroffset+=step;
946            offset+=step;
947        }
948      break;      break;
949      default:      default:
950      throw DataException("Programmer error - resolveNP1OUT can not resolve operator "+opToString(m_op)+".");      throw DataException("Programmer error - resolveNP1OUT can not resolve operator "+opToString(m_op)+".");
# Line 810  DataLazy::resolveNP1OUT(ValueType& v, si Line 952  DataLazy::resolveNP1OUT(ValueType& v, si
952    return &v;    return &v;
953  }  }
954    
955    /*
956      \brief Compute the value of the expression (unary operation) for the given sample.
957      \return Vector which stores the value of the subexpression for the given sample.
958      \param v A vector to store intermediate results.
959      \param offset Index in v to begin storing results.
960      \param sampleNo Sample number to evaluate.
961      \param roffset (output parameter) the offset in the return vector where the result begins.
962    
963      The return value will be an existing vector so do not deallocate it.
964      If the result is stored in v it should be stored at the offset given.
965      Everything from offset to the end of v should be considered available for this method to use.
966    */
967    DataTypes::ValueType*
968    DataLazy::resolveNP1OUT_P(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const
969    {
970        // we assume that any collapsing has been done before we get here
971        // since we only have one argument we don't need to think about only
972        // processing single points.
973      if (m_readytype!='E')
974      {
975        throw DataException("Programmer error - resolveNP1OUT_P should only be called on expanded Data.");
976      }
977        // since we can't write the result over the input, we need a result offset further along
978      size_t subroffset;
979      const ValueType* vleft=m_left->resolveSample(v,offset+m_left->m_samplesize,sampleNo,subroffset);
980    LAZYDEBUG(cerr << "from=" << offset+m_left->m_samplesize << " to=" << subroffset << " ret=" << roffset << endl;)
981      roffset=offset;
982      size_t loop=0;
983      size_t numsteps=(m_readytype=='E')?getNumDPPSample():1;
984      size_t step=getNoValues();
985      switch (m_op)
986      {
987        case TRACE:
988        for (loop=0;loop<numsteps;++loop)
989        {
990                DataMaths::trace(*vleft,m_left->getShape(),subroffset, v,getShape(),offset,m_axis_offset);
991            subroffset+=step;
992            offset+=step;
993        }
994        break;
995        case TRANS:
996        for (loop=0;loop<numsteps;++loop)
997        {
998                DataMaths::transpose(*vleft,m_left->getShape(),subroffset, v,getShape(),offset,m_axis_offset);
999            subroffset+=step;
1000            offset+=step;
1001        }
1002        break;
1003        default:
1004        throw DataException("Programmer error - resolveNP1OUTP can not resolve operator "+opToString(m_op)+".");
1005      }
1006      return &v;
1007    }
1008    
1009    
1010  #define PROC_OP(TYPE,X)                               \  #define PROC_OP(TYPE,X)                               \
1011      for (int i=0;i<steps;++i,resultp+=resultStep) \      for (int j=0;j<onumsteps;++j)\
1012      { \      {\
1013         tensor_binary_operation< TYPE >(chunksize, &((*left)[lroffset]), &((*right)[rroffset]), resultp, X); \        for (int i=0;i<numsteps;++i,resultp+=resultStep) \
1014         lroffset+=leftStep; \        { \
1015         rroffset+=rightStep; \  LAZYDEBUG(cout << "[left,right]=[" << lroffset << "," << rroffset << "]" << endl;)\
1016    LAZYDEBUG(cout << "{left,right}={" << (*left)[lroffset] << "," << (*right)[rroffset] << "}\n";)\
1017             tensor_binary_operation< TYPE >(chunksize, &((*left)[lroffset]), &((*right)[rroffset]), resultp, X); \
1018    LAZYDEBUG(cout << " result=      " << resultp[0] << endl;) \
1019             lroffset+=leftstep; \
1020             rroffset+=rightstep; \
1021          }\
1022          lroffset+=oleftstep;\
1023          rroffset+=orightstep;\
1024      }      }
1025    
1026  /*  /*
# Line 845  DataLazy::resolveNP1OUT(ValueType& v, si Line 1047  DataLazy::resolveNP1OUT(ValueType& v, si
1047  DataTypes::ValueType*  DataTypes::ValueType*
1048  DataLazy::resolveBinary(ValueType& v,  size_t offset, int sampleNo, size_t& roffset) const  DataLazy::resolveBinary(ValueType& v,  size_t offset, int sampleNo, size_t& roffset) const
1049  {  {
1050  cout << "Resolve binary: " << toString() << endl;  LAZYDEBUG(cout << "Resolve binary: " << toString() << endl;)
1051    
1052    size_t lroffset=0, rroffset=0;    // offsets in the left and right result vectors    size_t lroffset=0, rroffset=0;    // offsets in the left and right result vectors
1053      // first work out which of the children are expanded      // first work out which of the children are expanded
1054    bool leftExp=(m_left->m_readytype=='E');    bool leftExp=(m_left->m_readytype=='E');
1055    bool rightExp=(m_right->m_readytype=='E');    bool rightExp=(m_right->m_readytype=='E');
1056    bool bigloops=((leftExp && rightExp) || (!leftExp && !rightExp)); // is processing in single step?    if (!leftExp && !rightExp)
1057    int steps=(bigloops?1:getNumDPPSample());    {
1058    size_t chunksize=(bigloops? m_samplesize : getNoValues());    // if bigloops, pretend the whole sample is a datapoint      throw DataException("Programmer Error - please use collapse if neither argument has type 'E'.");
1059    if (m_left->getRank()!=m_right->getRank())    // need to deal with scalar * ? ops    }
1060    {    bool leftScalar=(m_left->getRank()==0);
1061      EsysAssert((m_left->getRank()==0) || (m_right->getRank()==0), "Error - Ranks must match unless one is 0.");    bool rightScalar=(m_right->getRank()==0);
1062      steps=getNumDPPSample()*max(m_left->getNoValues(),m_right->getNoValues());    if ((m_left->getRank()!=m_right->getRank()) && (!leftScalar && !rightScalar))
1063      chunksize=1;    // for scalar    {
1064    }          throw DataException("resolveBinary - ranks of arguments must match unless one of them is scalar.");
1065    int leftStep=((leftExp && !rightExp)? m_right->getNoValues() : 0);    }
1066    int rightStep=((rightExp && !leftExp)? m_left->getNoValues() : 0);    size_t leftsize=m_left->getNoValues();
1067    int resultStep=max(leftStep,rightStep);   // only one (at most) should be !=0    size_t rightsize=m_right->getNoValues();
1068      size_t chunksize=1;           // how many doubles will be processed in one go
1069      int leftstep=0;       // how far should the left offset advance after each step
1070      int rightstep=0;
1071      int numsteps=0;       // total number of steps for the inner loop
1072      int oleftstep=0;  // the o variables refer to the outer loop
1073      int orightstep=0; // The outer loop is only required in cases where there is an extended scalar
1074      int onumsteps=1;
1075      
1076      bool LES=(leftExp && leftScalar); // Left is an expanded scalar
1077      bool RES=(rightExp && rightScalar);
1078      bool LS=(!leftExp && leftScalar); // left is a single scalar
1079      bool RS=(!rightExp && rightScalar);
1080      bool LN=(!leftExp && !leftScalar);    // left is a single non-scalar
1081      bool RN=(!rightExp && !rightScalar);
1082      bool LEN=(leftExp && !leftScalar);    // left is an expanded non-scalar
1083      bool REN=(rightExp && !rightScalar);
1084    
1085      if ((LES && RES) || (LEN && REN)) // both are Expanded scalars or both are expanded non-scalars
1086      {
1087        chunksize=m_left->getNumDPPSample()*leftsize;
1088        leftstep=0;
1089        rightstep=0;
1090        numsteps=1;
1091      }
1092      else if (LES || RES)
1093      {
1094        chunksize=1;
1095        if (LES)        // left is an expanded scalar
1096        {
1097            if (RS)
1098            {
1099               leftstep=1;
1100               rightstep=0;
1101               numsteps=m_left->getNumDPPSample();
1102            }
1103            else        // RN or REN
1104            {
1105               leftstep=0;
1106               oleftstep=1;
1107               rightstep=1;
1108               orightstep=(RN?-rightsize:0);
1109               numsteps=rightsize;
1110               onumsteps=m_left->getNumDPPSample();
1111            }
1112        }
1113        else        // right is an expanded scalar
1114        {
1115            if (LS)
1116            {
1117               rightstep=1;
1118               leftstep=0;
1119               numsteps=m_right->getNumDPPSample();
1120            }
1121            else
1122            {
1123               rightstep=0;
1124               orightstep=1;
1125               leftstep=1;
1126               oleftstep=(LN?-leftsize:0);
1127               numsteps=leftsize;
1128               onumsteps=m_right->getNumDPPSample();
1129            }
1130        }
1131      }
1132      else  // this leaves (LEN, RS), (LEN, RN) and their transposes
1133      {
1134        if (LEN)    // and Right will be a single value
1135        {
1136            chunksize=rightsize;
1137            leftstep=rightsize;
1138            rightstep=0;
1139            numsteps=m_left->getNumDPPSample();
1140            if (RS)
1141            {
1142               numsteps*=leftsize;
1143            }
1144        }
1145        else    // REN
1146        {
1147            chunksize=leftsize;
1148            rightstep=leftsize;
1149            leftstep=0;
1150            numsteps=m_right->getNumDPPSample();
1151            if (LS)
1152            {
1153               numsteps*=rightsize;
1154            }
1155        }
1156      }
1157    
1158      int resultStep=max(leftstep,rightstep);   // only one (at most) should be !=0
1159      // Get the values of sub-expressions      // Get the values of sub-expressions
1160    const ValueType* left=m_left->resolveSample(v,offset,sampleNo,lroffset);    const ValueType* left=m_left->resolveSample(v,offset+getMaxSampleSize(),sampleNo,lroffset);   // see note on
1161    const ValueType* right=m_right->resolveSample(v,offset+m_samplesize,sampleNo,rroffset); // Note      // calcBufss for why we can't put offset as the 2nd param above
1162      const ValueType* right=m_right->resolveSample(v,offset+2*getMaxSampleSize(),sampleNo,rroffset); // Note
1163      // the right child starts further along.      // the right child starts further along.
1164    LAZYDEBUG(cout << "Post sub calls in " << toString() << endl;)
1165    LAZYDEBUG(cout << "shapes=" << DataTypes::shapeToString(m_left->getShape()) << "," << DataTypes::shapeToString(m_right->getShape()) << endl;)
1166    LAZYDEBUG(cout << "chunksize=" << chunksize << endl << "leftstep=" << leftstep << " rightstep=" << rightstep;)
1167    LAZYDEBUG(cout << " numsteps=" << numsteps << endl << "oleftstep=" << oleftstep << " orightstep=" << orightstep;)
1168    LAZYDEBUG(cout << "onumsteps=" << onumsteps << endl;)
1169    LAZYDEBUG(cout << " DPPS=" << m_left->getNumDPPSample() << "," <<m_right->getNumDPPSample() << endl;)
1170    LAZYDEBUG(cout << "" << LS << RS << LN << RN << LES << RES <<LEN << REN <<   endl;)
1171    double* resultp=&(v[offset]);     // results are stored at the vector offset we recieved    double* resultp=&(v[offset]);     // results are stored at the vector offset we recieved
1172    switch(m_op)    switch(m_op)
1173    {    {
# Line 893  cout << "Resolve binary: " << toString() Line 1194  cout << "Resolve binary: " << toString()
1194  }  }
1195    
1196    
1197    
1198  /*  /*
1199    \brief Compute the value of the expression (tensor product) for the given sample.    \brief Compute the value of the expression (tensor product) for the given sample.
1200    \return Vector which stores the value of the subexpression for the given sample.    \return Vector which stores the value of the subexpression for the given sample.
# Line 911  cout << "Resolve binary: " << toString() Line 1213  cout << "Resolve binary: " << toString()
1213  DataTypes::ValueType*  DataTypes::ValueType*
1214  DataLazy::resolveTProd(ValueType& v,  size_t offset, int sampleNo, size_t& roffset) const  DataLazy::resolveTProd(ValueType& v,  size_t offset, int sampleNo, size_t& roffset) const
1215  {  {
1216  cout << "Resolve TensorProduct: " << toString() << endl;  LAZYDEBUG(cout << "Resolve TensorProduct: " << toString() << endl;)
1217    
1218    size_t lroffset=0, rroffset=0;    // offsets in the left and right result vectors    size_t lroffset=0, rroffset=0;    // offsets in the left and right result vectors
1219      // first work out which of the children are expanded      // first work out which of the children are expanded
# Line 922  cout << "Resolve TensorProduct: " << toS Line 1224  cout << "Resolve TensorProduct: " << toS
1224    int rightStep=((rightExp && !leftExp)? m_left->getNoValues() : 0);    int rightStep=((rightExp && !leftExp)? m_left->getNoValues() : 0);
1225    int resultStep=max(leftStep,rightStep);   // only one (at most) should be !=0    int resultStep=max(leftStep,rightStep);   // only one (at most) should be !=0
1226      // Get the values of sub-expressions (leave a gap of one sample for the result).      // Get the values of sub-expressions (leave a gap of one sample for the result).
1227    const ValueType* left=m_left->resolveSample(v,offset+m_samplesize,sampleNo,lroffset);    int gap=offset+m_left->getMaxSampleSize();    // actually only needs to be m_left->m_samplesize
1228    const ValueType* right=m_right->resolveSample(v,offset+2*m_samplesize,sampleNo,rroffset);    const ValueType* left=m_left->resolveSample(v,gap,sampleNo,lroffset);
1229      gap+=m_right->getMaxSampleSize();
1230      const ValueType* right=m_right->resolveSample(v,gap,sampleNo,rroffset);
1231    LAZYDEBUG(cout << "Post sub calls: " << toString() << endl;)
1232    LAZYDEBUG(cout << "LeftExp=" << leftExp << " rightExp=" << rightExp << endl;)
1233    LAZYDEBUG(cout << "LeftR=" << m_left->getRank() << " rightExp=" << m_right->getRank() << endl;)
1234    LAZYDEBUG(cout << "LeftSize=" << m_left->getNoValues() << " RightSize=" << m_right->getNoValues() << endl;)
1235    LAZYDEBUG(cout << "m_samplesize=" << m_samplesize << endl;)
1236    LAZYDEBUG(cout << "outputshape=" << DataTypes::shapeToString(getShape()) << endl;)
1237    double* resultp=&(v[offset]);     // results are stored at the vector offset we recieved    double* resultp=&(v[offset]);     // results are stored at the vector offset we recieved
1238    switch(m_op)    switch(m_op)
1239    {    {
1240      case PROD:      case PROD:
1241      for (int i=0;i<steps;++i,resultp+=resultStep)      for (int i=0;i<steps;++i,resultp+=resultStep)
1242      {      {
1243    LAZYDEBUG(cout << "lroffset=" << lroffset << "rroffset=" << rroffset << endl;)
1244    LAZYDEBUG(cout << "l*=" << left << " r*=" << right << endl;)
1245    LAZYDEBUG(cout << "m_SL=" << m_SL << " m_SM=" << m_SM << " m_SR=" << m_SR << endl;)
1246            const double *ptr_0 = &((*left)[lroffset]);            const double *ptr_0 = &((*left)[lroffset]);
1247            const double *ptr_1 = &((*right)[rroffset]);            const double *ptr_1 = &((*right)[rroffset]);
1248    LAZYDEBUG(cout << DataTypes::pointToString(*left, m_left->getShape(),lroffset,"LEFT") << endl;)
1249    LAZYDEBUG(cout << DataTypes::pointToString(*right,m_right->getShape(),rroffset, "RIGHT") << endl;)
1250            matrix_matrix_product(m_SL, m_SM, m_SR, ptr_0, ptr_1, resultp, m_transpose);            matrix_matrix_product(m_SL, m_SM, m_SR, ptr_0, ptr_1, resultp, m_transpose);
1251        lroffset+=leftStep;        lroffset+=leftStep;
1252        rroffset+=rightStep;        rroffset+=rightStep;
# Line 965  cout << "Resolve TensorProduct: " << toS Line 1280  cout << "Resolve TensorProduct: " << toS
1280  const DataTypes::ValueType*  const DataTypes::ValueType*
1281  DataLazy::resolveSample(ValueType& v, size_t offset, int sampleNo, size_t& roffset)  DataLazy::resolveSample(ValueType& v, size_t offset, int sampleNo, size_t& roffset)
1282  {  {
1283  cout << "Resolve sample " << toString() << endl;  LAZYDEBUG(cout << "Resolve sample " << toString() << endl;)
1284      // collapse so we have a 'E' node or an IDENTITY for some other type      // collapse so we have a 'E' node or an IDENTITY for some other type
1285    if (m_readytype!='E' && m_op!=IDENTITY)    if (m_readytype!='E' && m_op!=IDENTITY)
1286    {    {
# Line 977  cout << "Resolve sample " << toString() Line 1292  cout << "Resolve sample " << toString()
1292      if (m_readytype=='C')      if (m_readytype=='C')
1293      {      {
1294      roffset=0;      roffset=0;
1295    LAZYDEBUG(cout << "Finish  sample " << toString() << endl;)
1296      return &(vec);      return &(vec);
1297      }      }
1298      roffset=m_id->getPointOffset(sampleNo, 0);      roffset=m_id->getPointOffset(sampleNo, 0);
1299    LAZYDEBUG(cout << "Finish  sample " << toString() << endl;)
1300      return &(vec);      return &(vec);
1301    }    }
1302    if (m_readytype!='E')    if (m_readytype!='E')
# Line 988  cout << "Resolve sample " << toString() Line 1305  cout << "Resolve sample " << toString()
1305    }    }
1306    switch (getOpgroup(m_op))    switch (getOpgroup(m_op))
1307    {    {
1308    case G_UNARY: return resolveUnary(v, offset,sampleNo,roffset);    case G_UNARY:
1309      case G_UNARY_P: return resolveUnary(v, offset,sampleNo,roffset);
1310    case G_BINARY: return resolveBinary(v, offset,sampleNo,roffset);    case G_BINARY: return resolveBinary(v, offset,sampleNo,roffset);
1311    case G_NP1OUT: return resolveNP1OUT(v, offset, sampleNo,roffset);    case G_NP1OUT: return resolveNP1OUT(v, offset, sampleNo,roffset);
1312      case G_NP1OUT_P: return resolveNP1OUT_P(v, offset, sampleNo,roffset);
1313    case G_TENSORPROD: return resolveTProd(v,offset, sampleNo,roffset);    case G_TENSORPROD: return resolveTProd(v,offset, sampleNo,roffset);
1314    default:    default:
1315      throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");      throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");
1316    }    }
1317    
1318  }  }
1319    
1320    
# Line 1004  DataReady_ptr Line 1324  DataReady_ptr
1324  DataLazy::resolve()  DataLazy::resolve()
1325  {  {
1326    
1327  cout << "Sample size=" << m_samplesize << endl;  LAZYDEBUG(cout << "Sample size=" << m_samplesize << endl;)
1328  cout << "Buffers=" << m_buffsRequired << endl;  LAZYDEBUG(cout << "Buffers=" << m_buffsRequired << endl;)
1329    
1330    if (m_readytype!='E')     // if the whole sub-expression is Constant or Tagged, then evaluate it normally    if (m_readytype!='E')     // if the whole sub-expression is Constant or Tagged, then evaluate it normally
1331    {    {
# Line 1021  cout << "Buffers=" << m_buffsRequired << Line 1341  cout << "Buffers=" << m_buffsRequired <<
1341    int numthreads=1;    int numthreads=1;
1342  #ifdef _OPENMP  #ifdef _OPENMP
1343    numthreads=getNumberOfThreads();    numthreads=getNumberOfThreads();
   int threadnum=0;  
1344  #endif  #endif
1345    ValueType v(numthreads*threadbuffersize);    ValueType v(numthreads*threadbuffersize);
1346  cout << "Buffer created with size=" << v.size() << endl;  LAZYDEBUG(cout << "Buffer created with size=" << v.size() << endl;)
1347    DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(),  ValueType(getNoValues()));    DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(),  ValueType(getNoValues()));
1348    ValueType& resvec=result->getVector();    ValueType& resvec=result->getVector();
1349    DataReady_ptr resptr=DataReady_ptr(result);    DataReady_ptr resptr=DataReady_ptr(result);
# Line 1033  cout << "Buffer created with size=" << v Line 1352  cout << "Buffer created with size=" << v
1352    int totalsamples=getNumSamples();    int totalsamples=getNumSamples();
1353    const ValueType* res=0;   // Vector storing the answer    const ValueType* res=0;   // Vector storing the answer
1354    size_t resoffset=0;       // where in the vector to find the answer    size_t resoffset=0;       // where in the vector to find the answer
1355    #pragma omp parallel for private(sample,resoffset,outoffset,threadnum,res) schedule(static)  LAZYDEBUG(cout << "Total number of samples=" <<totalsamples << endl;)
1356      #pragma omp parallel for private(sample,resoffset,outoffset,res) schedule(static)
1357    for (sample=0;sample<totalsamples;++sample)    for (sample=0;sample<totalsamples;++sample)
1358    {    {
1359  cout << "################################# " << sample << endl;        if (sample==0)  {ENABLEDEBUG}
1360    LAZYDEBUG(cout << "################################# " << sample << endl;)
1361  #ifdef _OPENMP  #ifdef _OPENMP
1362      res=resolveSample(v,threadbuffersize*omp_get_thread_num(),sample,resoffset);      res=resolveSample(v,threadbuffersize*omp_get_thread_num(),sample,resoffset);
1363  #else  #else
1364      res=resolveSample(v,0,sample,resoffset);   // res would normally be v, but not if its a single IDENTITY op.      res=resolveSample(v,0,sample,resoffset);   // res would normally be v, but not if its a single IDENTITY op.
1365  #endif  #endif
1366  cerr << "-------------------------------- " << endl;  LAZYDEBUG(cerr << "-------------------------------- " << endl;)
1367    LAZYDEBUG(cerr<< "Copying sample#" << sample << endl;)
1368      outoffset=result->getPointOffset(sample,0);      outoffset=result->getPointOffset(sample,0);
1369  cerr << "offset=" << outoffset << endl;  LAZYDEBUG(cerr << "offset=" << outoffset << " from offset=" << resoffset << " " << m_samplesize << " doubles" << endl;)
1370      for (unsigned int i=0;i<m_samplesize;++i,++outoffset,++resoffset)   // copy values into the output vector      for (unsigned int i=0;i<m_samplesize;++i,++outoffset,++resoffset)   // copy values into the output vector
1371      {      {
1372    // LAZYDEBUG(cerr << "outoffset=" << outoffset << " resoffset=" << resoffset << endl;)
1373      resvec[outoffset]=(*res)[resoffset];      resvec[outoffset]=(*res)[resoffset];
1374      }      }
1375  cerr << "*********************************" << endl;  LAZYDEBUG(cerr << DataTypes::pointToString(resvec,getShape(),outoffset-m_samplesize+DataTypes::noValues(getShape()),"Final result:") << endl;)
1376    LAZYDEBUG(cerr << "*********************************" << endl;)
1377        DISABLEDEBUG
1378    }    }
1379    return resptr;    return resptr;
1380  }  }
# Line 1096  DataLazy::intoString(ostringstream& oss) Line 1421  DataLazy::intoString(ostringstream& oss)
1421      oss << ')';      oss << ')';
1422      break;      break;
1423    case G_UNARY:    case G_UNARY:
1424      case G_UNARY_P:
1425    case G_NP1OUT:    case G_NP1OUT:
1426      case G_NP1OUT_P:
1427      oss << opToString(m_op) << '(';      oss << opToString(m_op) << '(';
1428      m_left->intoString(oss);      m_left->intoString(oss);
1429      oss << ')';      oss << ')';

Legend:
Removed from v.2066  
changed lines
  Added in v.2157

  ViewVC Help
Powered by ViewVC 1.1.26