/[escript]/trunk/escript/src/DataLazy.cpp
ViewVC logotype

Annotation of /trunk/escript/src/DataLazy.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1898 - (hide annotations)
Mon Oct 20 01:20:18 2008 UTC (10 years, 8 months ago) by jfenwick
Original Path: branches/schroedinger/escript/src/DataLazy.cpp
File size: 29022 byte(s)
Branch commit.
Now can add Tagged and Constant as well as Expanded.
Other operations to follow.


1 jfenwick 1865
2     /*******************************************************
3     *
4     * Copyright (c) 2003-2008 by University of Queensland
5     * Earth Systems Science Computational Center (ESSCC)
6     * http://www.uq.edu.au/esscc
7     *
8     * Primary Business: Queensland, Australia
9     * Licensed under the Open Software License version 3.0
10     * http://www.opensource.org/licenses/osl-3.0.php
11     *
12     *******************************************************/
13    
14    
15     #include "DataLazy.h"
16     #ifdef USE_NETCDF
17     #include <netcdfcpp.h>
18     #endif
19     #ifdef PASO_MPI
20     #include <mpi.h>
21     #endif
22 jfenwick 1885 #ifdef _OPENMP
23     #include <omp.h>
24     #endif
25 jfenwick 1865 #include "FunctionSpace.h"
26     #include "DataTypes.h"
27 jfenwick 1868 #include "Data.h"
28 jfenwick 1886 #include "UnaryFuncs.h" // for escript::fsign
29 jfenwick 1865
30     using namespace std;
31 jfenwick 1868 using namespace boost;
32 jfenwick 1865
33     namespace escript
34     {
35    
36     const std::string&
37     opToString(ES_optype op);
38    
39     namespace
40     {
41    
42 jfenwick 1886
43    
44     enum ES_opgroup
45     {
46     G_UNKNOWN,
47     G_IDENTITY,
48     G_BINARY,
49     G_UNARY
50     };
51    
52    
53    
54    
55     string ES_opstrings[]={"UNKNOWN","IDENTITY","+","-","*","/","sin","cos","tan",
56     "asin","acos","atan","sinh","cosh","tanh","erf",
57     "asinh","acosh","atanh",
58 jfenwick 1888 "log10","log","sign","abs","neg","pos","exp","sqrt",
59     "1/","where>0","where<0","where>=0","where<=0"};
60     int ES_opcount=32;
61 jfenwick 1886 ES_opgroup opgroups[]={G_UNKNOWN,G_IDENTITY,G_BINARY,G_BINARY,G_BINARY,G_BINARY,G_UNARY,G_UNARY,G_UNARY, //9
62     G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY, // 16
63     G_UNARY,G_UNARY,G_UNARY, // 19
64 jfenwick 1888 G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY, // 27
65     G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY};
66 jfenwick 1886 inline
67     ES_opgroup
68     getOpgroup(ES_optype op)
69     {
70     return opgroups[op];
71     }
72    
73 jfenwick 1865 // return the FunctionSpace of the result of "left op right"
74     FunctionSpace
75     resultFS(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
76     {
77     // perhaps this should call interpolate and throw or something?
78     // maybe we need an interpolate node -
79     // that way, if interpolate is required in any other op we can just throw a
80     // programming error exception.
81 jfenwick 1879
82    
83     if (left->getFunctionSpace()!=right->getFunctionSpace())
84     {
85     throw DataException("FunctionSpaces not equal - interpolation not supported on lazy data.");
86     }
87 jfenwick 1865 return left->getFunctionSpace();
88     }
89    
90     // return the shape of the result of "left op right"
91     DataTypes::ShapeType
92     resultShape(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
93     {
94 jfenwick 1879 if (left->getShape()!=right->getShape())
95     {
96     throw DataException("Shapes not the same - shapes must match for lazy data.");
97     }
98     return left->getShape();
99 jfenwick 1865 }
100    
101     size_t
102     resultLength(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
103     {
104 jfenwick 1886 switch (getOpgroup(op))
105 jfenwick 1865 {
106 jfenwick 1886 case G_BINARY: return left->getLength();
107     case G_UNARY: return left->getLength();
108 jfenwick 1865 default:
109     throw DataException("Programmer Error - attempt to getLength() for operator "+opToString(op)+".");
110     }
111     }
112    
113 jfenwick 1879 int
114     calcBuffs(const DataLazy_ptr& left, const DataLazy_ptr& right, ES_optype op)
115     {
116 jfenwick 1886 switch(getOpgroup(op))
117 jfenwick 1879 {
118 jfenwick 1886 case G_IDENTITY: return 1;
119     case G_BINARY: return max(left->getBuffsRequired(),right->getBuffsRequired()+1);
120     case G_UNARY: return max(left->getBuffsRequired(),1);
121 jfenwick 1879 default:
122     throw DataException("Programmer Error - attempt to calcBuffs() for operator "+opToString(op)+".");
123     }
124     }
125    
126 jfenwick 1865 } // end anonymous namespace
127    
128    
129     const std::string&
130     opToString(ES_optype op)
131     {
132     if (op<0 || op>=ES_opcount)
133     {
134     op=UNKNOWNOP;
135     }
136     return ES_opstrings[op];
137     }
138    
139    
140     DataLazy::DataLazy(DataAbstract_ptr p)
141     : parent(p->getFunctionSpace(),p->getShape()),
142     m_op(IDENTITY)
143     {
144 jfenwick 1879 if (p->isLazy())
145     {
146     // TODO: fix this. We could make the new node a copy of p?
147     // I don't want identity of Lazy.
148     // Question: Why would that be so bad?
149     // Answer: We assume that the child of ID is something we can call getVector on
150     throw DataException("Programmer error - attempt to create identity from a DataLazy.");
151     }
152     else
153     {
154     m_id=dynamic_pointer_cast<DataReady>(p);
155 jfenwick 1889 if(p->isConstant()) {m_readytype='C';}
156     else if(p->isExpanded()) {m_readytype='E';}
157     else if (p->isTagged()) {m_readytype='T';}
158     else {throw DataException("Unknown DataReady instance in DataLazy constructor.");}
159 jfenwick 1879 }
160     m_length=p->getLength();
161 jfenwick 1886 m_buffsRequired=1;
162 jfenwick 1879 m_samplesize=getNumDPPSample()*getNoValues();
163     cout << "(1)Lazy created with " << m_samplesize << endl;
164 jfenwick 1865 }
165    
166 jfenwick 1886 DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op)
167     : parent(left->getFunctionSpace(),left->getShape()),
168     m_op(op)
169     {
170     if (getOpgroup(op)!=G_UNARY)
171     {
172     throw DataException("Programmer error - constructor DataLazy(left, op) will only process UNARY operations.");
173     }
174     DataLazy_ptr lleft;
175     if (!left->isLazy())
176     {
177     lleft=DataLazy_ptr(new DataLazy(left));
178     }
179     else
180     {
181     lleft=dynamic_pointer_cast<DataLazy>(left);
182     }
183 jfenwick 1889 m_readytype=lleft->m_readytype;
184 jfenwick 1886 m_length=left->getLength();
185     m_left=lleft;
186     m_buffsRequired=1;
187     m_samplesize=getNumDPPSample()*getNoValues();
188     }
189    
190    
191 jfenwick 1889 // DataLazy::DataLazy(DataLazy_ptr left, DataLazy_ptr right, ES_optype op)
192     // : parent(resultFS(left,right,op), resultShape(left,right,op)),
193     // m_left(left),
194     // m_right(right),
195     // m_op(op)
196     // {
197     // if (getOpgroup(op)!=G_BINARY)
198     // {
199     // throw DataException("Programmer error - constructor DataLazy(left, right, op) will only process BINARY operations.");
200     // }
201     // m_length=resultLength(m_left,m_right,m_op);
202     // m_samplesize=getNumDPPSample()*getNoValues();
203     // m_buffsRequired=calcBuffs(m_left, m_right, m_op);
204     // cout << "(2)Lazy created with " << m_samplesize << endl;
205     // }
206 jfenwick 1865
207 jfenwick 1879 DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
208     : parent(resultFS(left,right,op), resultShape(left,right,op)),
209     m_op(op)
210     {
211 jfenwick 1886 if (getOpgroup(op)!=G_BINARY)
212     {
213 jfenwick 1889 throw DataException("Programmer error - constructor DataLazy(left, right, op) will only process BINARY operations.");
214 jfenwick 1886 }
215 jfenwick 1879 if (left->isLazy())
216     {
217     m_left=dynamic_pointer_cast<DataLazy>(left);
218     }
219     else
220     {
221     m_left=DataLazy_ptr(new DataLazy(left));
222     }
223     if (right->isLazy())
224     {
225     m_right=dynamic_pointer_cast<DataLazy>(right);
226     }
227     else
228     {
229     m_right=DataLazy_ptr(new DataLazy(right));
230     }
231 jfenwick 1889 char lt=m_left->m_readytype;
232     char rt=m_right->m_readytype;
233     if (lt=='E' || rt=='E')
234     {
235     m_readytype='E';
236     }
237     else if (lt=='T' || rt=='T')
238     {
239     m_readytype='T';
240     }
241     else
242     {
243     m_readytype='C';
244     }
245 jfenwick 1879 m_length=resultLength(m_left,m_right,m_op);
246     m_samplesize=getNumDPPSample()*getNoValues();
247     m_buffsRequired=calcBuffs(m_left, m_right,m_op);
248     cout << "(3)Lazy created with " << m_samplesize << endl;
249     }
250    
251    
252 jfenwick 1865 DataLazy::~DataLazy()
253     {
254     }
255    
256 jfenwick 1879
257     int
258     DataLazy::getBuffsRequired() const
259 jfenwick 1865 {
260 jfenwick 1879 return m_buffsRequired;
261     }
262    
263 jfenwick 1884
264 jfenwick 1889 DataReady_ptr
265     DataLazy::collapseToReady()
266     {
267     if (m_readytype=='E')
268     { // this is more an efficiency concern than anything else
269     throw DataException("Programmer Error - do not use collapse on Expanded data.");
270     }
271     if (m_op==IDENTITY)
272     {
273     return m_id;
274     }
275     DataReady_ptr pleft=m_left->collapseToReady();
276     Data left(pleft);
277     Data right;
278     if (getOpgroup(m_op)==G_BINARY)
279     {
280     right=Data(m_right->collapseToReady());
281     }
282     Data result;
283     switch(m_op)
284     {
285     case ADD:
286     result=left+right;
287     break;
288     case SUB:
289     result=left-right;
290     break;
291     case MUL:
292     result=left*right;
293     break;
294     case DIV:
295     result=left/right;
296     break;
297     case SIN:
298     result=left.sin();
299     break;
300     case COS:
301     result=left.cos();
302     break;
303     case TAN:
304     result=left.tan();
305     break;
306     case ASIN:
307     result=left.asin();
308     break;
309     case ACOS:
310     result=left.acos();
311     break;
312     case ATAN:
313     result=left.atan();
314     break;
315     case SINH:
316     result=left.sinh();
317     break;
318     case COSH:
319     result=left.cosh();
320     break;
321     case TANH:
322     result=left.tanh();
323     break;
324     case ERF:
325     result=left.erf();
326     break;
327     case ASINH:
328     result=left.asinh();
329     break;
330     case ACOSH:
331     result=left.acosh();
332     break;
333     case ATANH:
334     result=left.atanh();
335     break;
336     case LOG10:
337     result=left.log10();
338     break;
339     case LOG:
340     result=left.log();
341     break;
342     case SIGN:
343     result=left.sign();
344     break;
345     case ABS:
346     result=left.abs();
347     break;
348     case NEG:
349     result=left.neg();
350     break;
351     case POS:
352     // it doesn't mean anything for delayed.
353     // it will just trigger a deep copy of the lazy object
354     throw DataException("Programmer error - POS not supported for lazy data.");
355     break;
356     case EXP:
357     result=left.exp();
358     break;
359     case SQRT:
360     result=left.sqrt();
361     break;
362     case RECIP:
363     result=left.oneOver();
364     break;
365     case GZ:
366     result=left.wherePositive();
367     break;
368     case LZ:
369     result=left.whereNegative();
370     break;
371     case GEZ:
372     result=left.whereNonNegative();
373     break;
374     case LEZ:
375     result=left.whereNonPositive();
376     break;
377     default:
378     throw DataException("Programmer error - do not know how to resolve operator "+opToString(m_op)+".");
379     }
380     return result.borrowReadyPtr();
381     }
382    
383     void
384     DataLazy::collapse()
385     {
386     if (m_op==IDENTITY)
387     {
388     return;
389     }
390     if (m_readytype=='E')
391     { // this is more an efficiency concern than anything else
392     throw DataException("Programmer Error - do not use collapse on Expanded data.");
393     }
394     m_id=collapseToReady();
395     m_op=IDENTITY;
396     }
397    
398 jfenwick 1898 DataTypes::ValueType*
399     DataLazy::resolveUnary(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const
400 jfenwick 1889 {
401     // we assume that any collapsing has been done before we get here
402     // since we only have one argument we don't need to think about only
403     // processing single points.
404     if (m_readytype!='E')
405     {
406     throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");
407     }
408 jfenwick 1898 const ValueType* vleft=m_left->resolveSample(v,offset,sampleNo,roffset);
409     const double* left=&((*vleft)[roffset]);
410 jfenwick 1889 double* result=&(v[offset]);
411 jfenwick 1898 roffset=offset;
412 jfenwick 1889 switch (m_op)
413     {
414     case SIN:
415     tensor_unary_operation(m_samplesize, left, result, ::sin);
416     break;
417     case COS:
418     tensor_unary_operation(m_samplesize, left, result, ::cos);
419     break;
420     case TAN:
421     tensor_unary_operation(m_samplesize, left, result, ::tan);
422     break;
423     case ASIN:
424     tensor_unary_operation(m_samplesize, left, result, ::asin);
425     break;
426     case ACOS:
427     tensor_unary_operation(m_samplesize, left, result, ::acos);
428     break;
429     case ATAN:
430     tensor_unary_operation(m_samplesize, left, result, ::atan);
431     break;
432     case SINH:
433     tensor_unary_operation(m_samplesize, left, result, ::sinh);
434     break;
435     case COSH:
436     tensor_unary_operation(m_samplesize, left, result, ::cosh);
437     break;
438     case TANH:
439     tensor_unary_operation(m_samplesize, left, result, ::tanh);
440     break;
441     case ERF:
442     #ifdef _WIN32
443     throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");
444     #else
445     tensor_unary_operation(m_samplesize, left, result, ::erf);
446     break;
447     #endif
448     case ASINH:
449     #ifdef _WIN32
450     tensor_unary_operation(m_samplesize, left, result, escript::asinh_substitute);
451     #else
452     tensor_unary_operation(m_samplesize, left, result, ::asinh);
453     #endif
454     break;
455     case ACOSH:
456     #ifdef _WIN32
457     tensor_unary_operation(m_samplesize, left, result, escript::acosh_substitute);
458     #else
459     tensor_unary_operation(m_samplesize, left, result, ::acosh);
460     #endif
461     break;
462     case ATANH:
463     #ifdef _WIN32
464     tensor_unary_operation(m_samplesize, left, result, escript::atanh_substitute);
465     #else
466     tensor_unary_operation(m_samplesize, left, result, ::atanh);
467     #endif
468     break;
469     case LOG10:
470     tensor_unary_operation(m_samplesize, left, result, ::log10);
471     break;
472     case LOG:
473     tensor_unary_operation(m_samplesize, left, result, ::log);
474     break;
475     case SIGN:
476     tensor_unary_operation(m_samplesize, left, result, escript::fsign);
477     break;
478     case ABS:
479     tensor_unary_operation(m_samplesize, left, result, ::fabs);
480     break;
481     case NEG:
482     tensor_unary_operation(m_samplesize, left, result, negate<double>());
483     break;
484     case POS:
485     // it doesn't mean anything for delayed.
486     // it will just trigger a deep copy of the lazy object
487     throw DataException("Programmer error - POS not supported for lazy data.");
488     break;
489     case EXP:
490     tensor_unary_operation(m_samplesize, left, result, ::exp);
491     break;
492     case SQRT:
493     tensor_unary_operation(m_samplesize, left, result, ::sqrt);
494     break;
495     case RECIP:
496     tensor_unary_operation(m_samplesize, left, result, bind1st(divides<double>(),1.));
497     break;
498     case GZ:
499     tensor_unary_operation(m_samplesize, left, result, bind2nd(greater<double>(),0.0));
500     break;
501     case LZ:
502     tensor_unary_operation(m_samplesize, left, result, bind2nd(less<double>(),0.0));
503     break;
504     case GEZ:
505     tensor_unary_operation(m_samplesize, left, result, bind2nd(greater_equal<double>(),0.0));
506     break;
507     case LEZ:
508     tensor_unary_operation(m_samplesize, left, result, bind2nd(less_equal<double>(),0.0));
509     break;
510    
511     default:
512     throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");
513     }
514 jfenwick 1898 return &v;
515 jfenwick 1889 }
516    
517    
518 jfenwick 1898
519     // const double*
520     // DataLazy::resolveUnary(ValueType& v,int sampleNo, size_t offset) const
521     // {
522     // // we assume that any collapsing has been done before we get here
523     // // since we only have one argument we don't need to think about only
524     // // processing single points.
525     // if (m_readytype!='E')
526     // {
527     // throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");
528     // }
529     // const double* left=m_left->resolveSample(v,sampleNo,offset);
530     // double* result=&(v[offset]);
531     // switch (m_op)
532     // {
533     // case SIN:
534     // tensor_unary_operation(m_samplesize, left, result, ::sin);
535     // break;
536     // case COS:
537     // tensor_unary_operation(m_samplesize, left, result, ::cos);
538     // break;
539     // case TAN:
540     // tensor_unary_operation(m_samplesize, left, result, ::tan);
541     // break;
542     // case ASIN:
543     // tensor_unary_operation(m_samplesize, left, result, ::asin);
544     // break;
545     // case ACOS:
546     // tensor_unary_operation(m_samplesize, left, result, ::acos);
547     // break;
548     // case ATAN:
549     // tensor_unary_operation(m_samplesize, left, result, ::atan);
550     // break;
551     // case SINH:
552     // tensor_unary_operation(m_samplesize, left, result, ::sinh);
553     // break;
554     // case COSH:
555     // tensor_unary_operation(m_samplesize, left, result, ::cosh);
556     // break;
557     // case TANH:
558     // tensor_unary_operation(m_samplesize, left, result, ::tanh);
559     // break;
560     // case ERF:
561     // #ifdef _WIN32
562     // throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");
563     // #else
564     // tensor_unary_operation(m_samplesize, left, result, ::erf);
565     // break;
566     // #endif
567     // case ASINH:
568     // #ifdef _WIN32
569     // tensor_unary_operation(m_samplesize, left, result, escript::asinh_substitute);
570     // #else
571     // tensor_unary_operation(m_samplesize, left, result, ::asinh);
572     // #endif
573     // break;
574     // case ACOSH:
575     // #ifdef _WIN32
576     // tensor_unary_operation(m_samplesize, left, result, escript::acosh_substitute);
577     // #else
578     // tensor_unary_operation(m_samplesize, left, result, ::acosh);
579     // #endif
580     // break;
581     // case ATANH:
582     // #ifdef _WIN32
583     // tensor_unary_operation(m_samplesize, left, result, escript::atanh_substitute);
584     // #else
585     // tensor_unary_operation(m_samplesize, left, result, ::atanh);
586     // #endif
587     // break;
588     // case LOG10:
589     // tensor_unary_operation(m_samplesize, left, result, ::log10);
590     // break;
591     // case LOG:
592     // tensor_unary_operation(m_samplesize, left, result, ::log);
593     // break;
594     // case SIGN:
595     // tensor_unary_operation(m_samplesize, left, result, escript::fsign);
596     // break;
597     // case ABS:
598     // tensor_unary_operation(m_samplesize, left, result, ::fabs);
599     // break;
600     // case NEG:
601     // tensor_unary_operation(m_samplesize, left, result, negate<double>());
602     // break;
603     // case POS:
604     // // it doesn't mean anything for delayed.
605     // // it will just trigger a deep copy of the lazy object
606     // throw DataException("Programmer error - POS not supported for lazy data.");
607     // break;
608     // case EXP:
609     // tensor_unary_operation(m_samplesize, left, result, ::exp);
610     // break;
611     // case SQRT:
612     // tensor_unary_operation(m_samplesize, left, result, ::sqrt);
613     // break;
614     // case RECIP:
615     // tensor_unary_operation(m_samplesize, left, result, bind1st(divides<double>(),1.));
616     // break;
617     // case GZ:
618     // tensor_unary_operation(m_samplesize, left, result, bind2nd(greater<double>(),0.0));
619     // break;
620     // case LZ:
621     // tensor_unary_operation(m_samplesize, left, result, bind2nd(less<double>(),0.0));
622     // break;
623     // case GEZ:
624     // tensor_unary_operation(m_samplesize, left, result, bind2nd(greater_equal<double>(),0.0));
625     // break;
626     // case LEZ:
627     // tensor_unary_operation(m_samplesize, left, result, bind2nd(less_equal<double>(),0.0));
628     // break;
629     //
630     // default:
631     // throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");
632     // }
633     // return result;
634     // }
635    
636 jfenwick 1889 #define PROC_OP(X) \
637     for (int i=0;i<steps;++i,resultp+=getNoValues()) \
638     { \
639     cout << "Step#" << i << " chunk=" << chunksize << endl; \
640     cout << left[0] << left[1] << left[2] << endl; \
641     cout << right[0] << right[1] << right[2] << endl; \
642     tensor_binary_operation(chunksize, left, right, resultp, X); \
643     left+=leftStep; \
644     right+=rightStep; \
645     cout << "Result=" << result << " " << result[0] << result[1] << result[2] << endl; \
646     }
647    
648 jfenwick 1898 DataTypes::ValueType*
649     DataLazy::resolveBinary(ValueType& v, size_t offset ,int sampleNo, size_t& roffset) const
650 jfenwick 1889 {
651     // again we assume that all collapsing has already been done
652     // so we have at least one expanded child.
653     // however, we could still have one of the children being not expanded.
654    
655     cout << "Resolve binary: " << toString() << endl;
656    
657 jfenwick 1898 size_t lroffset=0, rroffset=0;
658    
659 jfenwick 1889 bool leftExp=(m_left->m_readytype=='E');
660     bool rightExp=(m_right->m_readytype=='E');
661     bool bigloops=((leftExp && rightExp) || (!leftExp && !rightExp)); // is processing in single step
662 jfenwick 1898 int steps=(bigloops?1:getNumDPPSample());
663 jfenwick 1889 size_t chunksize=(bigloops? m_samplesize : getNoValues());
664 jfenwick 1898 int leftStep=((leftExp && !rightExp)? getNoValues() : 0);
665     int rightStep=((rightExp && !leftExp)? getNoValues() : 0);
666    
667     const ValueType* left=m_left->resolveSample(v,offset,sampleNo,lroffset);
668     const ValueType* right=m_right->resolveSample(v,offset,sampleNo,rroffset);
669     // now we need to know which args are expanded
670 jfenwick 1889 cout << "left=" << left << " right=" << right << endl;
671 jfenwick 1898 cout << "(Length) l=" << left->size() << " r=" << right->size() << " res=" << v.size() << endl;
672     double* resultp=&(v[offset]);
673 jfenwick 1889 switch(m_op)
674     {
675     case ADD:
676 jfenwick 1898 for (int i=0;i<steps;++i,resultp+=getNoValues())
677     {
678     cerr << "Step#" << i << " chunk=" << chunksize << endl;
679     cerr << left << "[" << lroffset << "] " << right << "[" << rroffset << "]" << endl;
680     tensor_binary_operation(chunksize, &((*left)[lroffset]), &((*right)[rroffset]), resultp, plus<double>());
681     lroffset+=leftStep;
682     rroffset+=rightStep;
683     cerr << "left=" << lroffset << " right=" << rroffset << endl;
684     }
685 jfenwick 1889 break;
686     // need to fill in the rest
687     default:
688 jfenwick 1898 throw DataException("Programmer error - resolveBinary can not resolve operator "+opToString(m_op)+".");
689 jfenwick 1889 }
690 jfenwick 1898 roffset=offset;
691     return &v;
692 jfenwick 1889 }
693    
694 jfenwick 1898
695    
696     // #define PROC_OP(X) \
697     // for (int i=0;i<steps;++i,resultp+=getNoValues()) \
698     // { \
699     // cout << "Step#" << i << " chunk=" << chunksize << endl; \
700     // cout << left[0] << left[1] << left[2] << endl; \
701     // cout << right[0] << right[1] << right[2] << endl; \
702     // tensor_binary_operation(chunksize, left, right, resultp, X); \
703     // left+=leftStep; \
704     // right+=rightStep; \
705     // cout << "Result=" << result << " " << result[0] << result[1] << result[2] << endl; \
706     // }
707     //
708     // const double*
709     // DataLazy::resolveBinary(ValueType& v,int sampleNo, size_t offset) const
710     // {
711     // // again we assume that all collapsing has already been done
712     // // so we have at least one expanded child.
713     // // however, we could still have one of the children being not expanded.
714     //
715     // cout << "Resolve binary: " << toString() << endl;
716     //
717     // const double* left=m_left->resolveSample(v,sampleNo,offset);
718     // // cout << "Done Left " << /*left[0] << left[1] << left[2] << */endl;
719     // const double* right=m_right->resolveSample(v,sampleNo,offset);
720     // // cout << "Done Right" << /*right[0] << right[1] << right[2] <<*/ endl;
721     // // now we need to know which args are expanded
722     // bool leftExp=(m_left->m_readytype=='E');
723     // bool rightExp=(m_right->m_readytype=='E');
724     // bool bigloops=((leftExp && rightExp) || (!leftExp && !rightExp)); // is processing in single step
725     // int steps=(bigloops?1:getNumSamples());
726     // size_t chunksize=(bigloops? m_samplesize : getNoValues());
727     // int leftStep=((leftExp && !rightExp)? getNoValues() : 0);
728     // int rightStep=((rightExp && !leftExp)? getNoValues() : 0);
729     // cout << "left=" << left << " right=" << right << endl;
730     // double* result=&(v[offset]);
731     // double* resultp=result;
732     // switch(m_op)
733     // {
734     // case ADD:
735     // for (int i=0;i<steps;++i,resultp+=getNoValues())
736     // {
737     // cout << "Step#" << i << " chunk=" << chunksize << endl; \
738     // // cout << left[0] << left[1] << left[2] << endl;
739     // // cout << right[0] << right[1] << right[2] << endl;
740     // tensor_binary_operation(chunksize, left, right, resultp, plus<double>());
741     // cout << "left=" << left << " right=" << right << " resp=" << resultp << endl;
742     // left+=leftStep;
743     // right+=rightStep;
744     // cout << "left=" << left << " right=" << right << endl;
745     // // cout << "Result=" << result << " " << result[0] << result[1] << result[2] << endl;
746     // }
747     // break;
748     // // need to fill in the rest
749     // default:
750     // throw DataException("Programmer error - resolveBinay can not resolve operator "+opToString(m_op)+".");
751     // }
752     // // cout << "About to return " << result[0] << result[1] << result[2] << endl;;
753     // return result;
754     // }
755    
756     // // the vector and the offset are a place where the method could write its data if it wishes
757     // // it is not obligated to do so. For example, if it has its own storage already, it can use that.
758     // // Hence the return value to indicate where the data is actually stored.
759     // // Regardless, the storage should be assumed to be used, even if it isn't.
760     // const double*
761     // DataLazy::resolveSample(ValueType& v,int sampleNo, size_t offset )
762     // {
763     // cout << "Resolve sample " << toString() << endl;
764     // // collapse so we have a 'E' node or an IDENTITY for some other type
765     // if (m_readytype!='E' && m_op!=IDENTITY)
766     // {
767     // collapse();
768     // }
769     // if (m_op==IDENTITY)
770     // {
771     // const ValueType& vec=m_id->getVector();
772     // if (m_readytype=='C')
773     // {
774     // return &(vec[0]);
775     // }
776     // return &(vec[m_id->getPointOffset(sampleNo, 0)]);
777     // }
778     // if (m_readytype!='E')
779     // {
780     // throw DataException("Programmer Error - Collapse did not produce an expanded node.");
781     // }
782     // switch (getOpgroup(m_op))
783     // {
784     // case G_UNARY: return resolveUnary(v,sampleNo,offset);
785     // case G_BINARY: return resolveBinary(v,sampleNo,offset);
786     // default:
787     // throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");
788     // }
789     // }
790    
791    
792    
793 jfenwick 1884 // the vector and the offset are a place where the method could write its data if it wishes
794     // it is not obligated to do so. For example, if it has its own storage already, it can use that.
795 jfenwick 1885 // Hence the return value to indicate where the data is actually stored.
796     // Regardless, the storage should be assumed to be used, even if it isn't.
797 jfenwick 1898
798     // the roffset is the offset within the returned vector where the data begins
799     const DataTypes::ValueType*
800     DataLazy::resolveSample(ValueType& v, size_t offset, int sampleNo, size_t& roffset)
801 jfenwick 1879 {
802 jfenwick 1889 cout << "Resolve sample " << toString() << endl;
803     // collapse so we have a 'E' node or an IDENTITY for some other type
804     if (m_readytype!='E' && m_op!=IDENTITY)
805     {
806     collapse();
807     }
808 jfenwick 1885 if (m_op==IDENTITY)
809 jfenwick 1865 {
810 jfenwick 1879 const ValueType& vec=m_id->getVector();
811 jfenwick 1889 if (m_readytype=='C')
812     {
813 jfenwick 1898 roffset=0;
814     return &(vec);
815 jfenwick 1889 }
816 jfenwick 1898 roffset=m_id->getPointOffset(sampleNo, 0);
817     return &(vec);
818 jfenwick 1865 }
819 jfenwick 1889 if (m_readytype!='E')
820     {
821     throw DataException("Programmer Error - Collapse did not produce an expanded node.");
822     }
823     switch (getOpgroup(m_op))
824     {
825 jfenwick 1898 case G_UNARY: return resolveUnary(v, offset,sampleNo,roffset);
826     case G_BINARY: return resolveBinary(v, offset,sampleNo,roffset);
827 jfenwick 1889 default:
828     throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");
829     }
830     }
831    
832    
833 jfenwick 1888
834 jfenwick 1865
835 jfenwick 1898 // This version uses double* trying again with vectors
836     // DataReady_ptr
837     // DataLazy::resolve()
838     // {
839     //
840     // cout << "Sample size=" << m_samplesize << endl;
841     // cout << "Buffers=" << m_buffsRequired << endl;
842     //
843     // if (m_readytype!='E')
844     // {
845     // collapse();
846     // }
847     // if (m_op==IDENTITY)
848     // {
849     // return m_id;
850     // }
851     // // from this point on we must have m_op!=IDENTITY and m_readytype=='E'
852     // size_t threadbuffersize=m_samplesize*(max(1,m_buffsRequired)+1);
853     // int numthreads=1;
854     // #ifdef _OPENMP
855     // numthreads=getNumberOfThreads();
856     // int threadnum=0;
857     // #endif
858     // ValueType v(numthreads*threadbuffersize);
859     // cout << "Buffer created with size=" << v.size() << endl;
860     // DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(), ValueType(getNoValues()));
861     // ValueType& resvec=result->getVector();
862     // DataReady_ptr resptr=DataReady_ptr(result);
863     // int sample;
864     // int resoffset;
865     // int totalsamples=getNumSamples();
866     // const double* res=0;
867     // #pragma omp parallel for private(sample,resoffset,threadnum,res) schedule(static)
868     // for (sample=0;sample<totalsamples;++sample)
869     // {
870     // cout << "################################# " << sample << endl;
871     // #ifdef _OPENMP
872     // res=resolveSample(v,sample,threadbuffersize*omp_get_thread_num());
873     // #else
874     // res=resolveSample(v,sample,0); // this would normally be v, but not if its a single IDENTITY op.
875     // #endif
876     // cerr << "-------------------------------- " << endl;
877     // resoffset=result->getPointOffset(sample,0);
878     // cerr << "offset=" << resoffset << endl;
879     // for (unsigned int i=0;i<m_samplesize;++i,++resoffset) // copy values into the output vector
880     // {
881     // resvec[resoffset]=res[i];
882     // }
883     // cerr << "*********************************" << endl;
884     // }
885     // return resptr;
886     // }
887    
888    
889 jfenwick 1879 DataReady_ptr
890     DataLazy::resolve()
891     {
892    
893     cout << "Sample size=" << m_samplesize << endl;
894     cout << "Buffers=" << m_buffsRequired << endl;
895    
896 jfenwick 1889 if (m_readytype!='E')
897     {
898     collapse();
899     }
900     if (m_op==IDENTITY)
901     {
902     return m_id;
903     }
904     // from this point on we must have m_op!=IDENTITY and m_readytype=='E'
905 jfenwick 1885 size_t threadbuffersize=m_samplesize*(max(1,m_buffsRequired)+1);
906     int numthreads=1;
907     #ifdef _OPENMP
908 jfenwick 1889 numthreads=getNumberOfThreads();
909 jfenwick 1885 int threadnum=0;
910     #endif
911 jfenwick 1886 ValueType v(numthreads*threadbuffersize);
912 jfenwick 1879 cout << "Buffer created with size=" << v.size() << endl;
913 jfenwick 1898 DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(), ValueType(getNoValues()));
914 jfenwick 1879 ValueType& resvec=result->getVector();
915     DataReady_ptr resptr=DataReady_ptr(result);
916     int sample;
917 jfenwick 1898 size_t outoffset; // offset in the output data
918 jfenwick 1885 int totalsamples=getNumSamples();
919 jfenwick 1898 const ValueType* res=0;
920     size_t resoffset=0;
921     #pragma omp parallel for private(sample,resoffset,outoffset,threadnum,res) schedule(static)
922 jfenwick 1885 for (sample=0;sample<totalsamples;++sample)
923 jfenwick 1879 {
924 jfenwick 1898 cout << "################################# " << sample << endl;
925 jfenwick 1885 #ifdef _OPENMP
926 jfenwick 1898 res=resolveSample(v,threadbuffersize*omp_get_thread_num(),sample,resoffset);
927 jfenwick 1885 #else
928 jfenwick 1898 res=resolveSample(v,0,sample,resoffset); // this would normally be v, but not if its a single IDENTITY op.
929 jfenwick 1885 #endif
930 jfenwick 1898 cerr << "-------------------------------- " << endl;
931     outoffset=result->getPointOffset(sample,0);
932     cerr << "offset=" << outoffset << endl;
933     for (unsigned int i=0;i<m_samplesize;++i,++outoffset,++resoffset) // copy values into the output vector
934 jfenwick 1879 {
935 jfenwick 1898 resvec[outoffset]=(*res)[resoffset];
936 jfenwick 1879 }
937 jfenwick 1898 cerr << "*********************************" << endl;
938 jfenwick 1879 }
939     return resptr;
940     }
941    
942 jfenwick 1865 std::string
943     DataLazy::toString() const
944     {
945 jfenwick 1886 ostringstream oss;
946     oss << "Lazy Data:";
947     intoString(oss);
948     return oss.str();
949 jfenwick 1865 }
950    
951 jfenwick 1886 void
952     DataLazy::intoString(ostringstream& oss) const
953     {
954     switch (getOpgroup(m_op))
955     {
956 jfenwick 1889 case G_IDENTITY:
957     if (m_id->isExpanded())
958     {
959     oss << "E";
960     }
961     else if (m_id->isTagged())
962     {
963     oss << "T";
964     }
965     else if (m_id->isConstant())
966     {
967     oss << "C";
968     }
969     else
970     {
971     oss << "?";
972     }
973 jfenwick 1886 oss << '@' << m_id.get();
974     break;
975     case G_BINARY:
976     oss << '(';
977     m_left->intoString(oss);
978     oss << ' ' << opToString(m_op) << ' ';
979     m_right->intoString(oss);
980     oss << ')';
981     break;
982     case G_UNARY:
983     oss << opToString(m_op) << '(';
984     m_left->intoString(oss);
985     oss << ')';
986     break;
987     default:
988     oss << "UNKNOWN";
989     }
990     }
991    
992 jfenwick 1865 // Note that in this case, deepCopy does not make copies of the leaves.
993     // Hopefully copy on write (or whatever we end up using) will take care of this.
994     DataAbstract*
995     DataLazy::deepCopy()
996     {
997     if (m_op==IDENTITY)
998     {
999     return new DataLazy(m_left); // we don't need to copy the child here
1000     }
1001     return new DataLazy(m_left->deepCopy()->getPtr(),m_right->deepCopy()->getPtr(),m_op);
1002     }
1003    
1004    
1005     DataTypes::ValueType::size_type
1006     DataLazy::getLength() const
1007     {
1008 jfenwick 1879 return m_length;
1009 jfenwick 1865 }
1010    
1011    
1012     DataAbstract*
1013     DataLazy::getSlice(const DataTypes::RegionType& region) const
1014     {
1015 jfenwick 1879 throw DataException("getSlice - not implemented for Lazy objects.");
1016 jfenwick 1865 }
1017    
1018     DataTypes::ValueType::size_type
1019     DataLazy::getPointOffset(int sampleNo,
1020     int dataPointNo) const
1021     {
1022     throw DataException("getPointOffset - not implemented for Lazy objects - yet.");
1023     }
1024    
1025     } // end namespace

  ViewVC Help
Powered by ViewVC 1.1.26