/[escript]/trunk/escript/src/DataLazy.cpp
ViewVC logotype

Diff of /trunk/escript/src/DataLazy.cpp

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 1879 by jfenwick, Tue Oct 14 03:54:42 2008 UTC revision 1898 by jfenwick, Mon Oct 20 01:20:18 2008 UTC
# Line 19  Line 19 
19  #ifdef PASO_MPI  #ifdef PASO_MPI
20  #include <mpi.h>  #include <mpi.h>
21  #endif  #endif
22    #ifdef _OPENMP
23    #include <omp.h>
24    #endif
25  #include "FunctionSpace.h"  #include "FunctionSpace.h"
26  #include "DataTypes.h"  #include "DataTypes.h"
27  #include "Data.h"  #include "Data.h"
28    #include "UnaryFuncs.h"     // for escript::fsign
29    
30  using namespace std;  using namespace std;
31  using namespace boost;  using namespace boost;
# Line 35  opToString(ES_optype op); Line 39  opToString(ES_optype op);
39  namespace  namespace
40  {  {
41    
42    
43    
44    enum ES_opgroup
45    {
46       G_UNKNOWN,
47       G_IDENTITY,
48       G_BINARY,
49       G_UNARY
50    };
51    
52    
53    
54    
55    string ES_opstrings[]={"UNKNOWN","IDENTITY","+","-","*","/","sin","cos","tan",
56                "asin","acos","atan","sinh","cosh","tanh","erf",
57                "asinh","acosh","atanh",
58                "log10","log","sign","abs","neg","pos","exp","sqrt",
59                "1/","where>0","where<0","where>=0","where<=0"};
60    int ES_opcount=32;
61    ES_opgroup opgroups[]={G_UNKNOWN,G_IDENTITY,G_BINARY,G_BINARY,G_BINARY,G_BINARY,G_UNARY,G_UNARY,G_UNARY, //9
62                G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,    // 16
63                G_UNARY,G_UNARY,G_UNARY,                    // 19
64                G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,        // 27
65                G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY};
66    inline
67    ES_opgroup
68    getOpgroup(ES_optype op)
69    {
70      return opgroups[op];
71    }
72    
73  // return the FunctionSpace of the result of "left op right"  // return the FunctionSpace of the result of "left op right"
74  FunctionSpace  FunctionSpace
75  resultFS(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)  resultFS(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
# Line 66  resultShape(DataAbstract_ptr left, DataA Line 101  resultShape(DataAbstract_ptr left, DataA
101  size_t  size_t
102  resultLength(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)  resultLength(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
103  {  {
104     switch(op)     switch (getOpgroup(op))
105     {     {
106  //   case IDENTITY: return left->getLength();     case G_BINARY: return left->getLength();
107     case ADD:    // the length is preserved in these ops     case G_UNARY: return left->getLength();
    case SUB:  
    case MUL:  
    case DIV: return left->getLength();  
108     default:     default:
109      throw DataException("Programmer Error - attempt to getLength() for operator "+opToString(op)+".");      throw DataException("Programmer Error - attempt to getLength() for operator "+opToString(op)+".");
   
110     }     }
111  }  }
112    
113  int  int
114  calcBuffs(const DataLazy_ptr& left, const DataLazy_ptr& right, ES_optype op)  calcBuffs(const DataLazy_ptr& left, const DataLazy_ptr& right, ES_optype op)
115  {  {
116     switch(op)     switch(getOpgroup(op))
117     {     {
118     case IDENTITY: return 0;     case G_IDENTITY: return 1;
119     case ADD:    // the length is preserved in these ops     case G_BINARY: return max(left->getBuffsRequired(),right->getBuffsRequired()+1);
120     case SUB:     case G_UNARY: return max(left->getBuffsRequired(),1);
    case MUL:  
    case DIV: return max(left->getBuffsRequired(),right->getBuffsRequired());  
121     default:     default:
122      throw DataException("Programmer Error - attempt to calcBuffs() for operator "+opToString(op)+".");      throw DataException("Programmer Error - attempt to calcBuffs() for operator "+opToString(op)+".");
123     }     }
124  }  }
125    
 string ES_opstrings[]={"UNKNOWN","IDENTITY","+","-","*","/"};  
 int ES_opcount=5;  
   
 // void performOp(ValueType& v, int startOffset, ES_optype op, int m_samplesize)  
 // {  
 //    switch(op)  
 //    {  
 //    case ADD:  DataMaths::binaryOp(v,getShape(),startOffset,v,getShape(),  
 //      startOffset+m_samplesize,plus<double>());  
 //        break;      
 //    case SUB:  DataMaths::binaryOp(v,getShape(),startOffset,v,getShape(),  
 //      startOffset+m_samplesize,minus<double>());  
 //        break;  
 //    case MUL:  DataMaths::binaryOp(v,getShape(),startOffset,v,getShape(),  
 //      startOffset+m_samplesize,multiplies<double>());  
 //        break;  
 //    case DIV:  DataMaths::binaryOp(v,getShape(),startOffset,v,getShape(),  
 //      startOffset+m_samplesize,divides<double>());  
 //        break;  
 //    default:  
 //  throw DataException("Programmer Error - attempt to performOp() for operator "+opToString(op)+".");  
 //    }  
 //  
 // }  
   
126  }   // end anonymous namespace  }   // end anonymous namespace
127    
128    
# Line 148  DataLazy::DataLazy(DataAbstract_ptr p) Line 152  DataLazy::DataLazy(DataAbstract_ptr p)
152     else     else
153     {     {
154      m_id=dynamic_pointer_cast<DataReady>(p);      m_id=dynamic_pointer_cast<DataReady>(p);
155        if(p->isConstant()) {m_readytype='C';}
156        else if(p->isExpanded()) {m_readytype='E';}
157        else if (p->isTagged()) {m_readytype='T';}
158        else {throw DataException("Unknown DataReady instance in DataLazy constructor.");}
159     }     }
160     m_length=p->getLength();     m_length=p->getLength();
161     m_buffsRequired=0;     m_buffsRequired=1;
162     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
163  cout << "(1)Lazy created with " << m_samplesize << endl;  cout << "(1)Lazy created with " << m_samplesize << endl;
164  }  }
165    
166  DataLazy::DataLazy(DataLazy_ptr left, DataLazy_ptr right, ES_optype op)  DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op)
167      : parent(resultFS(left,right,op), resultShape(left,right,op)),      : parent(left->getFunctionSpace(),left->getShape()),
     m_left(left),  
     m_right(right),  
168      m_op(op)      m_op(op)
169  {  {
170     m_length=resultLength(m_left,m_right,m_op);     if (getOpgroup(op)!=G_UNARY)
171       {
172        throw DataException("Programmer error - constructor DataLazy(left, op) will only process UNARY operations.");
173       }
174       DataLazy_ptr lleft;
175       if (!left->isLazy())
176       {
177        lleft=DataLazy_ptr(new DataLazy(left));
178       }
179       else
180       {
181        lleft=dynamic_pointer_cast<DataLazy>(left);
182       }
183       m_readytype=lleft->m_readytype;
184       m_length=left->getLength();
185       m_left=lleft;
186       m_buffsRequired=1;
187     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
    m_buffsRequired=calcBuffs(m_left, m_right, m_op);  
 cout << "(2)Lazy created with " << m_samplesize << endl;  
188  }  }
189    
190    
191    // DataLazy::DataLazy(DataLazy_ptr left, DataLazy_ptr right, ES_optype op)
192    //  : parent(resultFS(left,right,op), resultShape(left,right,op)),
193    //  m_left(left),
194    //  m_right(right),
195    //  m_op(op)
196    // {
197    //    if (getOpgroup(op)!=G_BINARY)
198    //    {
199    //  throw DataException("Programmer error - constructor DataLazy(left, right, op) will only process BINARY operations.");
200    //    }
201    //    m_length=resultLength(m_left,m_right,m_op);
202    //    m_samplesize=getNumDPPSample()*getNoValues();
203    //    m_buffsRequired=calcBuffs(m_left, m_right, m_op);
204    // cout << "(2)Lazy created with " << m_samplesize << endl;
205    // }
206    
207  DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)  DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
208      : parent(resultFS(left,right,op), resultShape(left,right,op)),      : parent(resultFS(left,right,op), resultShape(left,right,op)),
209      m_op(op)      m_op(op)
210  {  {
211       if (getOpgroup(op)!=G_BINARY)
212       {
213        throw DataException("Programmer error - constructor DataLazy(left, right, op) will only process BINARY operations.");
214       }
215     if (left->isLazy())     if (left->isLazy())
216     {     {
217      m_left=dynamic_pointer_cast<DataLazy>(left);      m_left=dynamic_pointer_cast<DataLazy>(left);
# Line 187  DataLazy::DataLazy(DataAbstract_ptr left Line 228  DataLazy::DataLazy(DataAbstract_ptr left
228     {     {
229      m_right=DataLazy_ptr(new DataLazy(right));      m_right=DataLazy_ptr(new DataLazy(right));
230     }     }
231       char lt=m_left->m_readytype;
232       char rt=m_right->m_readytype;
233       if (lt=='E' || rt=='E')
234       {
235        m_readytype='E';
236       }
237       else if (lt=='T' || rt=='T')
238       {
239        m_readytype='T';
240       }
241       else
242       {
243        m_readytype='C';
244       }
245     m_length=resultLength(m_left,m_right,m_op);     m_length=resultLength(m_left,m_right,m_op);
246     m_samplesize=getNumDPPSample()*getNoValues();     m_samplesize=getNumDPPSample()*getNoValues();
247     m_buffsRequired=calcBuffs(m_left, m_right,m_op);     m_buffsRequired=calcBuffs(m_left, m_right,m_op);
# Line 206  DataLazy::getBuffsRequired() const Line 260  DataLazy::getBuffsRequired() const
260      return m_buffsRequired;      return m_buffsRequired;
261  }  }
262    
263  void  
264  DataLazy::resolveSample(ValueType& v,int sampleNo,  size_t offset ) const  DataReady_ptr
265    DataLazy::collapseToReady()
266  {  {
267    if (m_op==IDENTITY)   // copy the contents into the vector    if (m_readytype=='E')
268      { // this is more an efficiency concern than anything else
269        throw DataException("Programmer Error - do not use collapse on Expanded data.");
270      }
271      if (m_op==IDENTITY)
272    {    {
273  cout << "Begin ID" << endl;      return m_id;
 cout << "dpps=" << getNumDPPSample() << " novals=" << getNoValues() << endl;  
     const ValueType& vec=m_id->getVector();  
     size_t srcOffset=m_id->getPointOffset(sampleNo, 0);  
 cout << "v.size()=" << v.size() << " vec=" << vec.size() << endl;  
     for (size_t i=0;i<m_samplesize;++i,++srcOffset,++offset)  
     {  
 cout << "Trying offset=" << offset << " srcOffset=" << srcOffset << endl;  
     v[offset]=vec[srcOffset];    
     }  
 cout << "End ID" << endl;  
     return;  
274    }    }
275    size_t rightoffset=offset+m_samplesize;    DataReady_ptr pleft=m_left->collapseToReady();
276    m_left->resolveSample(v,sampleNo,offset);    Data left(pleft);
277    m_right->resolveSample(v,sampleNo,rightoffset);    Data right;
278  //  for (int i=0;i<getNumDPPSample();++i)    if (getOpgroup(m_op)==G_BINARY)
279    {    {
280      switch(m_op)      right=Data(m_right->collapseToReady());
281      {    }
282      case ADD:       // since these are pointwise ops, pretend each sample is one point    Data result;
283      tensor_binary_operation(m_samplesize,&(v[offset]),&(v[rightoffset]),&(v[offset]),plus<double>());    switch(m_op)
284      {
285        case ADD:
286        result=left+right;
287      break;      break;
288      case SUB:            case SUB:      
289      tensor_binary_operation(m_samplesize,&(v[offset]),&(v[rightoffset]),&(v[offset]),minus<double>());      result=left-right;
290      break;      break;
291      case MUL:            case MUL:      
292      tensor_binary_operation(m_samplesize,&(v[offset]),&(v[rightoffset]),&(v[offset]),multiplies<double>());      result=left*right;
293      break;      break;
294      case DIV:            case DIV:      
295      tensor_binary_operation(m_samplesize,&(v[offset]),&(v[rightoffset]),&(v[offset]),divides<double>());      result=left/right;
296        break;
297        case SIN:
298        result=left.sin();  
299        break;
300        case COS:
301        result=left.cos();
302        break;
303        case TAN:
304        result=left.tan();
305        break;
306        case ASIN:
307        result=left.asin();
308        break;
309        case ACOS:
310        result=left.acos();
311        break;
312        case ATAN:
313        result=left.atan();
314        break;
315        case SINH:
316        result=left.sinh();
317        break;
318        case COSH:
319        result=left.cosh();
320        break;
321        case TANH:
322        result=left.tanh();
323        break;
324        case ERF:
325        result=left.erf();
326        break;
327       case ASINH:
328        result=left.asinh();
329        break;
330       case ACOSH:
331        result=left.acosh();
332        break;
333       case ATANH:
334        result=left.atanh();
335        break;
336        case LOG10:
337        result=left.log10();
338        break;
339        case LOG:
340        result=left.log();
341        break;
342        case SIGN:
343        result=left.sign();
344        break;
345        case ABS:
346        result=left.abs();
347        break;
348        case NEG:
349        result=left.neg();
350        break;
351        case POS:
352        // it doesn't mean anything for delayed.
353        // it will just trigger a deep copy of the lazy object
354        throw DataException("Programmer error - POS not supported for lazy data.");
355        break;
356        case EXP:
357        result=left.exp();
358        break;
359        case SQRT:
360        result=left.sqrt();
361        break;
362        case RECIP:
363        result=left.oneOver();
364        break;
365        case GZ:
366        result=left.wherePositive();
367        break;
368        case LZ:
369        result=left.whereNegative();
370        break;
371        case GEZ:
372        result=left.whereNonNegative();
373        break;
374        case LEZ:
375        result=left.whereNonPositive();
376      break;      break;
377      default:      default:
378      throw DataException("Programmer error - do not know how to resolve operator "+opToString(m_op)+".");      throw DataException("Programmer error - do not know how to resolve operator "+opToString(m_op)+".");
379      }
380      return result.borrowReadyPtr();
381    }
382    
383    void
384    DataLazy::collapse()
385    {
386      if (m_op==IDENTITY)
387      {
388        return;
389      }
390      if (m_readytype=='E')
391      { // this is more an efficiency concern than anything else
392        throw DataException("Programmer Error - do not use collapse on Expanded data.");
393      }
394      m_id=collapseToReady();
395      m_op=IDENTITY;
396    }
397    
398    DataTypes::ValueType*
399    DataLazy::resolveUnary(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const
400    {
401        // we assume that any collapsing has been done before we get here
402        // since we only have one argument we don't need to think about only
403        // processing single points.
404      if (m_readytype!='E')
405      {
406        throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");
407      }
408      const ValueType* vleft=m_left->resolveSample(v,offset,sampleNo,roffset);
409      const double* left=&((*vleft)[roffset]);
410      double* result=&(v[offset]);
411      roffset=offset;
412      switch (m_op)
413      {
414        case SIN:  
415        tensor_unary_operation(m_samplesize, left, result, ::sin);
416        break;
417        case COS:
418        tensor_unary_operation(m_samplesize, left, result, ::cos);
419        break;
420        case TAN:
421        tensor_unary_operation(m_samplesize, left, result, ::tan);
422        break;
423        case ASIN:
424        tensor_unary_operation(m_samplesize, left, result, ::asin);
425        break;
426        case ACOS:
427        tensor_unary_operation(m_samplesize, left, result, ::acos);
428        break;
429        case ATAN:
430        tensor_unary_operation(m_samplesize, left, result, ::atan);
431        break;
432        case SINH:
433        tensor_unary_operation(m_samplesize, left, result, ::sinh);
434        break;
435        case COSH:
436        tensor_unary_operation(m_samplesize, left, result, ::cosh);
437        break;
438        case TANH:
439        tensor_unary_operation(m_samplesize, left, result, ::tanh);
440        break;
441        case ERF:
442    #ifdef _WIN32
443        throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");
444    #else
445        tensor_unary_operation(m_samplesize, left, result, ::erf);
446        break;
447    #endif
448       case ASINH:
449    #ifdef _WIN32
450        tensor_unary_operation(m_samplesize, left, result, escript::asinh_substitute);
451    #else
452        tensor_unary_operation(m_samplesize, left, result, ::asinh);
453    #endif  
454        break;
455       case ACOSH:
456    #ifdef _WIN32
457        tensor_unary_operation(m_samplesize, left, result, escript::acosh_substitute);
458    #else
459        tensor_unary_operation(m_samplesize, left, result, ::acosh);
460    #endif  
461        break;
462       case ATANH:
463    #ifdef _WIN32
464        tensor_unary_operation(m_samplesize, left, result, escript::atanh_substitute);
465    #else
466        tensor_unary_operation(m_samplesize, left, result, ::atanh);
467    #endif  
468        break;
469        case LOG10:
470        tensor_unary_operation(m_samplesize, left, result, ::log10);
471        break;
472        case LOG:
473        tensor_unary_operation(m_samplesize, left, result, ::log);
474        break;
475        case SIGN:
476        tensor_unary_operation(m_samplesize, left, result, escript::fsign);
477        break;
478        case ABS:
479        tensor_unary_operation(m_samplesize, left, result, ::fabs);
480        break;
481        case NEG:
482        tensor_unary_operation(m_samplesize, left, result, negate<double>());
483        break;
484        case POS:
485        // it doesn't mean anything for delayed.
486        // it will just trigger a deep copy of the lazy object
487        throw DataException("Programmer error - POS not supported for lazy data.");
488        break;
489        case EXP:
490        tensor_unary_operation(m_samplesize, left, result, ::exp);
491        break;
492        case SQRT:
493        tensor_unary_operation(m_samplesize, left, result, ::sqrt);
494        break;
495        case RECIP:
496        tensor_unary_operation(m_samplesize, left, result, bind1st(divides<double>(),1.));
497        break;
498        case GZ:
499        tensor_unary_operation(m_samplesize, left, result, bind2nd(greater<double>(),0.0));
500        break;
501        case LZ:
502        tensor_unary_operation(m_samplesize, left, result, bind2nd(less<double>(),0.0));
503        break;
504        case GEZ:
505        tensor_unary_operation(m_samplesize, left, result, bind2nd(greater_equal<double>(),0.0));
506        break;
507        case LEZ:
508        tensor_unary_operation(m_samplesize, left, result, bind2nd(less_equal<double>(),0.0));
509        break;
510    
511        default:
512        throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");
513      }
514      return &v;
515    }
516    
517    
518    
519    // const double*
520    // DataLazy::resolveUnary(ValueType& v,int sampleNo,  size_t offset) const
521    // {
522    //  // we assume that any collapsing has been done before we get here
523    //  // since we only have one argument we don't need to think about only
524    //  // processing single points.
525    //   if (m_readytype!='E')
526    //   {
527    //     throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");
528    //   }
529    //   const double* left=m_left->resolveSample(v,sampleNo,offset);
530    //   double* result=&(v[offset]);
531    //   switch (m_op)
532    //   {
533    //     case SIN:    
534    //  tensor_unary_operation(m_samplesize, left, result, ::sin);
535    //  break;
536    //     case COS:
537    //  tensor_unary_operation(m_samplesize, left, result, ::cos);
538    //  break;
539    //     case TAN:
540    //  tensor_unary_operation(m_samplesize, left, result, ::tan);
541    //  break;
542    //     case ASIN:
543    //  tensor_unary_operation(m_samplesize, left, result, ::asin);
544    //  break;
545    //     case ACOS:
546    //  tensor_unary_operation(m_samplesize, left, result, ::acos);
547    //  break;
548    //     case ATAN:
549    //  tensor_unary_operation(m_samplesize, left, result, ::atan);
550    //  break;
551    //     case SINH:
552    //  tensor_unary_operation(m_samplesize, left, result, ::sinh);
553    //  break;
554    //     case COSH:
555    //  tensor_unary_operation(m_samplesize, left, result, ::cosh);
556    //  break;
557    //     case TANH:
558    //  tensor_unary_operation(m_samplesize, left, result, ::tanh);
559    //  break;
560    //     case ERF:
561    // #ifdef _WIN32
562    //  throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");
563    // #else
564    //  tensor_unary_operation(m_samplesize, left, result, ::erf);
565    //  break;
566    // #endif
567    //    case ASINH:
568    // #ifdef _WIN32
569    //  tensor_unary_operation(m_samplesize, left, result, escript::asinh_substitute);
570    // #else
571    //  tensor_unary_operation(m_samplesize, left, result, ::asinh);
572    // #endif  
573    //  break;
574    //    case ACOSH:
575    // #ifdef _WIN32
576    //  tensor_unary_operation(m_samplesize, left, result, escript::acosh_substitute);
577    // #else
578    //  tensor_unary_operation(m_samplesize, left, result, ::acosh);
579    // #endif  
580    //  break;
581    //    case ATANH:
582    // #ifdef _WIN32
583    //  tensor_unary_operation(m_samplesize, left, result, escript::atanh_substitute);
584    // #else
585    //  tensor_unary_operation(m_samplesize, left, result, ::atanh);
586    // #endif  
587    //  break;
588    //     case LOG10:
589    //  tensor_unary_operation(m_samplesize, left, result, ::log10);
590    //  break;
591    //     case LOG:
592    //  tensor_unary_operation(m_samplesize, left, result, ::log);
593    //  break;
594    //     case SIGN:
595    //  tensor_unary_operation(m_samplesize, left, result, escript::fsign);
596    //  break;
597    //     case ABS:
598    //  tensor_unary_operation(m_samplesize, left, result, ::fabs);
599    //  break;
600    //     case NEG:
601    //  tensor_unary_operation(m_samplesize, left, result, negate<double>());
602    //  break;
603    //     case POS:
604    //  // it doesn't mean anything for delayed.
605    //  // it will just trigger a deep copy of the lazy object
606    //  throw DataException("Programmer error - POS not supported for lazy data.");
607    //  break;
608    //     case EXP:
609    //  tensor_unary_operation(m_samplesize, left, result, ::exp);
610    //  break;
611    //     case SQRT:
612    //  tensor_unary_operation(m_samplesize, left, result, ::sqrt);
613    //  break;
614    //     case RECIP:
615    //  tensor_unary_operation(m_samplesize, left, result, bind1st(divides<double>(),1.));
616    //  break;
617    //     case GZ:
618    //  tensor_unary_operation(m_samplesize, left, result, bind2nd(greater<double>(),0.0));
619    //  break;
620    //     case LZ:
621    //  tensor_unary_operation(m_samplesize, left, result, bind2nd(less<double>(),0.0));
622    //  break;
623    //     case GEZ:
624    //  tensor_unary_operation(m_samplesize, left, result, bind2nd(greater_equal<double>(),0.0));
625    //  break;
626    //     case LEZ:
627    //  tensor_unary_operation(m_samplesize, left, result, bind2nd(less_equal<double>(),0.0));
628    //  break;
629    //
630    //     default:
631    //  throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");
632    //   }
633    //   return result;
634    // }
635    
636    #define PROC_OP(X) \
637        for (int i=0;i<steps;++i,resultp+=getNoValues()) \
638        { \
639    cout << "Step#" << i << " chunk=" << chunksize << endl; \
640    cout << left[0] << left[1] << left[2] << endl; \
641    cout << right[0] << right[1] << right[2] << endl; \
642           tensor_binary_operation(chunksize, left, right, resultp, X); \
643           left+=leftStep; \
644           right+=rightStep; \
645    cout << "Result=" << result << " " << result[0] << result[1] << result[2] << endl; \
646        }
647    
648    DataTypes::ValueType*
649    DataLazy::resolveBinary(ValueType& v,  size_t offset ,int sampleNo, size_t& roffset) const
650    {
651        // again we assume that all collapsing has already been done
652        // so we have at least one expanded child.
653        // however, we could still have one of the children being not expanded.
654    
655    cout << "Resolve binary: " << toString() << endl;
656    
657      size_t lroffset=0, rroffset=0;
658    
659      bool leftExp=(m_left->m_readytype=='E');
660      bool rightExp=(m_right->m_readytype=='E');
661      bool bigloops=((leftExp && rightExp) || (!leftExp && !rightExp)); // is processing in single step
662      int steps=(bigloops?1:getNumDPPSample());
663      size_t chunksize=(bigloops? m_samplesize : getNoValues());
664      int leftStep=((leftExp && !rightExp)? getNoValues() : 0);
665      int rightStep=((rightExp && !leftExp)? getNoValues() : 0);
666    
667      const ValueType* left=m_left->resolveSample(v,offset,sampleNo,lroffset);
668      const ValueType* right=m_right->resolveSample(v,offset,sampleNo,rroffset);    
669        // now we need to know which args are expanded
670    cout << "left=" << left << " right=" << right << endl;
671    cout << "(Length) l=" << left->size() << " r=" << right->size() << " res=" << v.size() << endl;
672      double* resultp=&(v[offset]);
673      switch(m_op)
674      {
675        case ADD:
676        for (int i=0;i<steps;++i,resultp+=getNoValues())
677        {
678    cerr << "Step#" << i << " chunk=" << chunksize << endl;
679    cerr << left << "[" << lroffset << "] " << right << "[" << rroffset << "]" << endl;
680           tensor_binary_operation(chunksize, &((*left)[lroffset]), &((*right)[rroffset]), resultp, plus<double>());
681           lroffset+=leftStep;
682           rroffset+=rightStep;
683    cerr << "left=" << lroffset << " right=" << rroffset << endl;
684        }
685        break;
686    // need to fill in the rest
687        default:
688        throw DataException("Programmer error - resolveBinary can not resolve operator "+opToString(m_op)+".");
689      }
690      roffset=offset;
691      return &v;
692    }
693    
694    
695    
696    // #define PROC_OP(X) \
697    //  for (int i=0;i<steps;++i,resultp+=getNoValues()) \
698    //  { \
699    // cout << "Step#" << i << " chunk=" << chunksize << endl; \
700    // cout << left[0] << left[1] << left[2] << endl; \
701    // cout << right[0] << right[1] << right[2] << endl; \
702    //     tensor_binary_operation(chunksize, left, right, resultp, X); \
703    //     left+=leftStep; \
704    //     right+=rightStep; \
705    // cout << "Result=" << result << " " << result[0] << result[1] << result[2] << endl; \
706    //  }
707    //
708    // const double*
709    // DataLazy::resolveBinary(ValueType& v,int sampleNo,  size_t offset) const
710    // {
711    //  // again we assume that all collapsing has already been done
712    //  // so we have at least one expanded child.
713    //  // however, we could still have one of the children being not expanded.
714    //
715    // cout << "Resolve binary: " << toString() << endl;
716    //
717    //   const double* left=m_left->resolveSample(v,sampleNo,offset);
718    // // cout << "Done Left " << /*left[0] << left[1] << left[2] << */endl;
719    //   const double* right=m_right->resolveSample(v,sampleNo,offset);
720    // // cout << "Done Right"  << /*right[0] << right[1] << right[2] <<*/ endl;
721    //      // now we need to know which args are expanded
722    //   bool leftExp=(m_left->m_readytype=='E');
723    //   bool rightExp=(m_right->m_readytype=='E');
724    //   bool bigloops=((leftExp && rightExp) || (!leftExp && !rightExp));  // is processing in single step
725    //   int steps=(bigloops?1:getNumSamples());
726    //   size_t chunksize=(bigloops? m_samplesize : getNoValues());
727    //   int leftStep=((leftExp && !rightExp)? getNoValues() : 0);
728    //   int rightStep=((rightExp && !leftExp)? getNoValues() : 0);
729    // cout << "left=" << left << " right=" << right << endl;
730    //   double* result=&(v[offset]);
731    //   double* resultp=result;
732    //   switch(m_op)
733    //   {
734    //     case ADD:
735    //  for (int i=0;i<steps;++i,resultp+=getNoValues())
736    //  {
737    // cout << "Step#" << i << " chunk=" << chunksize << endl; \
738    // // cout << left[0] << left[1] << left[2] << endl;
739    // // cout << right[0] << right[1] << right[2] << endl;
740    //     tensor_binary_operation(chunksize, left, right, resultp, plus<double>());
741    // cout << "left=" << left << " right=" << right << " resp=" << resultp << endl;
742    //     left+=leftStep;
743    //     right+=rightStep;
744    // cout << "left=" << left << " right=" << right << endl;
745    // // cout << "Result=" << result << " " << result[0] << result[1] << result[2] << endl;
746    //  }
747    //  break;
748    // // need to fill in the rest
749    //     default:
750    //  throw DataException("Programmer error - resolveBinay can not resolve operator "+opToString(m_op)+".");
751    //   }
752    // // cout << "About to return "  << result[0] << result[1] << result[2] << endl;;
753    //   return result;
754    // }
755    
756    // // the vector and the offset are a place where the method could write its data if it wishes
757    // // it is not obligated to do so. For example, if it has its own storage already, it can use that.
758    // // Hence the return value to indicate where the data is actually stored.
759    // // Regardless, the storage should be assumed to be used, even if it isn't.
760    // const double*
761    // DataLazy::resolveSample(ValueType& v,int sampleNo,  size_t offset )
762    // {
763    // cout << "Resolve sample " << toString() << endl;
764    //  // collapse so we have a 'E' node or an IDENTITY for some other type
765    //   if (m_readytype!='E' && m_op!=IDENTITY)
766    //   {
767    //  collapse();
768    //   }
769    //   if (m_op==IDENTITY)    
770    //   {
771    //     const ValueType& vec=m_id->getVector();
772    //     if (m_readytype=='C')
773    //     {
774    //  return &(vec[0]);
775    //     }
776    //     return &(vec[m_id->getPointOffset(sampleNo, 0)]);
777    //   }
778    //   if (m_readytype!='E')
779    //   {
780    //     throw DataException("Programmer Error - Collapse did not produce an expanded node.");
781    //   }
782    //   switch (getOpgroup(m_op))
783    //   {
784    //   case G_UNARY: return resolveUnary(v,sampleNo,offset);
785    //   case G_BINARY: return resolveBinary(v,sampleNo,offset);
786    //   default:
787    //     throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");
788    //   }
789    // }
790    
791    
792    
793    // the vector and the offset are a place where the method could write its data if it wishes
794    // it is not obligated to do so. For example, if it has its own storage already, it can use that.
795    // Hence the return value to indicate where the data is actually stored.
796    // Regardless, the storage should be assumed to be used, even if it isn't.
797    
798    // the roffset is the offset within the returned vector where the data begins
799    const DataTypes::ValueType*
800    DataLazy::resolveSample(ValueType& v, size_t offset, int sampleNo, size_t& roffset)
801    {
802    cout << "Resolve sample " << toString() << endl;
803        // collapse so we have a 'E' node or an IDENTITY for some other type
804      if (m_readytype!='E' && m_op!=IDENTITY)
805      {
806        collapse();
807      }
808      if (m_op==IDENTITY)  
809      {
810        const ValueType& vec=m_id->getVector();
811        if (m_readytype=='C')
812        {
813        roffset=0;
814        return &(vec);
815      }      }
816        roffset=m_id->getPointOffset(sampleNo, 0);
817        return &(vec);
818      }
819      if (m_readytype!='E')
820      {
821        throw DataException("Programmer Error - Collapse did not produce an expanded node.");
822      }
823      switch (getOpgroup(m_op))
824      {
825      case G_UNARY: return resolveUnary(v, offset,sampleNo,roffset);
826      case G_BINARY: return resolveBinary(v, offset,sampleNo,roffset);
827      default:
828        throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");
829    }    }
830  }  }
831    
832    
833    
834    
835    // This version uses double* trying again with vectors
836    // DataReady_ptr
837    // DataLazy::resolve()
838    // {
839    //
840    // cout << "Sample size=" << m_samplesize << endl;
841    // cout << "Buffers=" << m_buffsRequired << endl;
842    //
843    //   if (m_readytype!='E')
844    //   {
845    //     collapse();
846    //   }
847    //   if (m_op==IDENTITY)
848    //   {
849    //     return m_id;
850    //   }
851    //      // from this point on we must have m_op!=IDENTITY and m_readytype=='E'
852    //   size_t threadbuffersize=m_samplesize*(max(1,m_buffsRequired)+1);
853    //   int numthreads=1;
854    // #ifdef _OPENMP
855    //   numthreads=getNumberOfThreads();
856    //   int threadnum=0;
857    // #endif
858    //   ValueType v(numthreads*threadbuffersize);  
859    // cout << "Buffer created with size=" << v.size() << endl;
860    //   DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(),  ValueType(getNoValues()));
861    //   ValueType& resvec=result->getVector();
862    //   DataReady_ptr resptr=DataReady_ptr(result);
863    //   int sample;
864    //   int resoffset;
865    //   int totalsamples=getNumSamples();
866    //   const double* res=0;
867    //   #pragma omp parallel for private(sample,resoffset,threadnum,res) schedule(static)
868    //   for (sample=0;sample<totalsamples;++sample)
869    //   {
870    // cout << "################################# " << sample << endl;
871    // #ifdef _OPENMP
872    //     res=resolveSample(v,sample,threadbuffersize*omp_get_thread_num());
873    // #else
874    //     res=resolveSample(v,sample,0);   // this would normally be v, but not if its a single IDENTITY op.
875    // #endif
876    // cerr << "-------------------------------- " << endl;
877    //     resoffset=result->getPointOffset(sample,0);
878    // cerr << "offset=" << resoffset << endl;
879    //     for (unsigned int i=0;i<m_samplesize;++i,++resoffset)    // copy values into the output vector
880    //     {
881    //  resvec[resoffset]=res[i];
882    //     }
883    // cerr << "*********************************" << endl;
884    //   }
885    //   return resptr;
886    // }
887    
888    
889  DataReady_ptr  DataReady_ptr
890  DataLazy::resolve()  DataLazy::resolve()
891  {  {
   // This is broken!     We need to have a buffer per thread!  
   // so the allocation of v needs to move inside the loop somehow  
892    
893  cout << "Sample size=" << m_samplesize << endl;  cout << "Sample size=" << m_samplesize << endl;
894  cout << "Buffers=" << m_buffsRequired << endl;  cout << "Buffers=" << m_buffsRequired << endl;
895    
896    ValueType v(m_samplesize*max(1,m_buffsRequired));    if (m_readytype!='E')
897      {
898        collapse();
899      }
900      if (m_op==IDENTITY)
901      {
902        return m_id;
903      }
904        // from this point on we must have m_op!=IDENTITY and m_readytype=='E'
905      size_t threadbuffersize=m_samplesize*(max(1,m_buffsRequired)+1);
906      int numthreads=1;
907    #ifdef _OPENMP
908      numthreads=getNumberOfThreads();
909      int threadnum=0;
910    #endif
911      ValueType v(numthreads*threadbuffersize);
912  cout << "Buffer created with size=" << v.size() << endl;  cout << "Buffer created with size=" << v.size() << endl;
913    ValueType dummy(getNoValues());    DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(),  ValueType(getNoValues()));
   DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(),dummy);  
914    ValueType& resvec=result->getVector();    ValueType& resvec=result->getVector();
915    DataReady_ptr resptr=DataReady_ptr(result);    DataReady_ptr resptr=DataReady_ptr(result);
916    int sample;    int sample;
917    #pragma omp parallel for private(sample) schedule(static)    size_t outoffset;     // offset in the output data
918    for (sample=0;sample<getNumSamples();++sample)    int totalsamples=getNumSamples();
919      const ValueType* res=0;
920      size_t resoffset=0;
921      #pragma omp parallel for private(sample,resoffset,outoffset,threadnum,res) schedule(static)
922      for (sample=0;sample<totalsamples;++sample)
923    {    {
924      resolveSample(v,sample,0);  cout << "################################# " << sample << endl;
925      for (int i=0;i<m_samplesize;++i)    // copy values into the output vector  #ifdef _OPENMP
926        res=resolveSample(v,threadbuffersize*omp_get_thread_num(),sample,resoffset);
927    #else
928        res=resolveSample(v,0,sample,resoffset);   // this would normally be v, but not if its a single IDENTITY op.
929    #endif
930    cerr << "-------------------------------- " << endl;
931        outoffset=result->getPointOffset(sample,0);
932    cerr << "offset=" << outoffset << endl;
933        for (unsigned int i=0;i<m_samplesize;++i,++outoffset,++resoffset)   // copy values into the output vector
934      {      {
935      resvec[i]=v[i];      resvec[outoffset]=(*res)[resoffset];
936      }      }
937    cerr << "*********************************" << endl;
938    }    }
939    return resptr;    return resptr;
940  }  }
# Line 280  cout << "Buffer created with size=" << v Line 942  cout << "Buffer created with size=" << v
942  std::string  std::string
943  DataLazy::toString() const  DataLazy::toString() const
944  {  {
945    return "Lazy evaluation object. No details available.";    ostringstream oss;
946      oss << "Lazy Data:";
947      intoString(oss);
948      return oss.str();
949    }
950    
951    void
952    DataLazy::intoString(ostringstream& oss) const
953    {
954      switch (getOpgroup(m_op))
955      {
956      case G_IDENTITY:
957        if (m_id->isExpanded())
958        {
959           oss << "E";
960        }
961        else if (m_id->isTagged())
962        {
963          oss << "T";
964        }
965        else if (m_id->isConstant())
966        {
967          oss << "C";
968        }
969        else
970        {
971          oss << "?";
972        }
973        oss << '@' << m_id.get();
974        break;
975      case G_BINARY:
976        oss << '(';
977        m_left->intoString(oss);
978        oss << ' ' << opToString(m_op) << ' ';
979        m_right->intoString(oss);
980        oss << ')';
981        break;
982      case G_UNARY:
983        oss << opToString(m_op) << '(';
984        m_left->intoString(oss);
985        oss << ')';
986        break;
987      default:
988        oss << "UNKNOWN";
989      }
990  }  }
991    
992  // Note that in this case, deepCopy does not make copies of the leaves.  // Note that in this case, deepCopy does not make copies of the leaves.
# Line 316  DataLazy::getPointOffset(int sampleNo, Line 1022  DataLazy::getPointOffset(int sampleNo,
1022    throw DataException("getPointOffset - not implemented for Lazy objects - yet.");    throw DataException("getPointOffset - not implemented for Lazy objects - yet.");
1023  }  }
1024    
 // // The startOffset is where to write results in the output vector v  
 // void  
 // DataLazy::processSample(ValueType& v, int sampleNo, size_t startOffset)  
 // {  
 //     m_left.processSample(v,sampleNo,startOffset);  
 //     m_right.processSample(v,sampleNo,startOffset+getSampleSize());  
 //     int i;  
 //     #pragma omp parallel for private(i) schedule(static)  
 //     for (i=0;i<getSampleSize();++i)  
 //     {  
 //      performOp(v,startOffset+i*m_pointsize,ES_optype,m_samplesize);  
 //     }  
 // }  
   
   
1025  }   // end namespace  }   // end namespace

Legend:
Removed from v.1879  
changed lines
  Added in v.1898

  ViewVC Help
Powered by ViewVC 1.1.26