/[escript]/branches/clazy/escriptcore/src/DataLazy.cpp
ViewVC logotype

Contents of /branches/clazy/escriptcore/src/DataLazy.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2084 - (show annotations)
Fri Nov 21 05:20:42 2008 UTC (10 years, 5 months ago) by jfenwick
Original Path: trunk/escript/src/DataLazy.cpp
File size: 42745 byte(s)
Fixed a warning in cpp unit tests under dodebug
Pointed the url for python doco at shake200 rather than iservo.
Added support for trace and transpose to LazyData.
Fixed bug in trace to initialise running totals.
1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2008 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 #include "DataLazy.h"
16 #ifdef USE_NETCDF
17 #include <netcdfcpp.h>
18 #endif
19 #ifdef PASO_MPI
20 #include <mpi.h>
21 #endif
22 #ifdef _OPENMP
23 #include <omp.h>
24 #endif
25 #include "FunctionSpace.h"
26 #include "DataTypes.h"
27 #include "Data.h"
28 #include "UnaryFuncs.h" // for escript::fsign
29 #include "Utils.h"
30
31 /*
32 How does DataLazy work?
33 ~~~~~~~~~~~~~~~~~~~~~~~
34
35 Each instance represents a single operation on one or two other DataLazy instances. These arguments are normally
36 denoted left and right.
37
38 A special operation, IDENTITY, stores an instance of DataReady in the m_id member.
39 This means that all "internal" nodes in the structure are instances of DataLazy.
40
41 Each operation has a string representation as well as an opgroup - eg G_IDENTITY, G_BINARY, ...
42 Note that IDENITY is not considered a unary operation.
43
44 I am avoiding calling the structure formed a tree because it is not guaranteed to be one (eg c=a+a).
45 It must however form a DAG (directed acyclic graph).
46 I will refer to individual DataLazy objects with the structure as nodes.
47
48 Each node also stores:
49 - m_readytype \in {'E','T','C','?'} ~ indicates what sort of DataReady would be produced if the expression was
50 evaluated.
51 - m_buffsrequired ~ the larged number of samples which would need to be kept simultaneously in order to
52 evaluate the expression.
53 - m_samplesize ~ the number of doubles stored in a sample.
54
55 When a new node is created, the above values are computed based on the values in the child nodes.
56 Eg: if left requires 4 samples and right requires 6 then left+right requires 7 samples.
57
58 The resolve method, which produces a DataReady from a DataLazy, does the following:
59 1) Create a DataReady to hold the new result.
60 2) Allocate a vector (v) big enough to hold m_buffsrequired samples.
61 3) For each sample, call resolveSample with v, to get its values and copy them into the result object.
62
63 (In the case of OMP, multiple samples are resolved in parallel so the vector needs to be larger.)
64
65 resolveSample returns a Vector* and an offset within that vector where the result is stored.
66 Normally, this would be v, but for identity nodes their internal vector is returned instead.
67
68 The convention that I use, is that the resolve methods should store their results starting at the offset they are passed.
69
70 For expressions which evaluate to Constant or Tagged, there is a different evaluation method.
71 The collapse method invokes the (non-lazy) operations on the Data class to evaluate the expression.
72
73 To add a new operator you need to do the following (plus anything I might have forgotten):
74 1) Add to the ES_optype.
75 2) determine what opgroup your operation belongs to (X)
76 3) add a string for the op to the end of ES_opstrings
77 4) increase ES_opcount
78 5) add an entry (X) to opgroups
79 6) add an entry to the switch in collapseToReady
80 7) add an entry to resolveX
81 */
82
83
84 using namespace std;
85 using namespace boost;
86
87 namespace escript
88 {
89
90 namespace
91 {
92
93 enum ES_opgroup
94 {
95 G_UNKNOWN,
96 G_IDENTITY,
97 G_BINARY, // pointwise operations with two arguments
98 G_UNARY, // pointwise operations with one argument
99 G_NP1OUT, // non-pointwise op with one output
100 G_NP1OUT_P, // non-pointwise op with one output requiring a parameter
101 G_TENSORPROD // general tensor product
102 };
103
104
105
106
107 string ES_opstrings[]={"UNKNOWN","IDENTITY","+","-","*","/","^",
108 "sin","cos","tan",
109 "asin","acos","atan","sinh","cosh","tanh","erf",
110 "asinh","acosh","atanh",
111 "log10","log","sign","abs","neg","pos","exp","sqrt",
112 "1/","where>0","where<0","where>=0","where<=0",
113 "symmetric","nonsymmetric",
114 "prod",
115 "transpose",
116 "trace"};
117 int ES_opcount=38;
118 ES_opgroup opgroups[]={G_UNKNOWN,G_IDENTITY,G_BINARY,G_BINARY,G_BINARY,G_BINARY, G_BINARY,
119 G_UNARY,G_UNARY,G_UNARY, //10
120 G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY, // 17
121 G_UNARY,G_UNARY,G_UNARY, // 20
122 G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY, // 28
123 G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY, // 33
124 G_NP1OUT,G_NP1OUT,
125 G_TENSORPROD,
126 G_NP1OUT_P, G_NP1OUT_P};
127 inline
128 ES_opgroup
129 getOpgroup(ES_optype op)
130 {
131 return opgroups[op];
132 }
133
134 // return the FunctionSpace of the result of "left op right"
135 FunctionSpace
136 resultFS(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
137 {
138 // perhaps this should call interpolate and throw or something?
139 // maybe we need an interpolate node -
140 // that way, if interpolate is required in any other op we can just throw a
141 // programming error exception.
142
143 FunctionSpace l=left->getFunctionSpace();
144 FunctionSpace r=right->getFunctionSpace();
145 if (l!=r)
146 {
147 if (r.probeInterpolation(l))
148 {
149 return l;
150 }
151 if (l.probeInterpolation(r))
152 {
153 return r;
154 }
155 throw DataException("Cannot interpolate between the FunctionSpaces given for operation "+opToString(op)+".");
156 }
157 return l;
158 }
159
160 // return the shape of the result of "left op right"
161 // the shapes resulting from tensor product are more complex to compute so are worked out elsewhere
162 DataTypes::ShapeType
163 resultShape(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
164 {
165 if (left->getShape()!=right->getShape())
166 {
167 if ((getOpgroup(op)!=G_BINARY) && (getOpgroup(op)!=G_NP1OUT))
168 {
169 throw DataException("Shapes not the name - shapes must match for (point)binary operations.");
170 }
171 if (left->getRank()==0) // we need to allow scalar * anything
172 {
173 return right->getShape();
174 }
175 if (right->getRank()==0)
176 {
177 return left->getShape();
178 }
179 throw DataException("Shapes not the same - arguments must have matching shapes (or be scalars) for (point)binary operations on lazy data.");
180 }
181 return left->getShape();
182 }
183
184 // return the shape for "op left"
185
186 DataTypes::ShapeType
187 resultShape(DataAbstract_ptr left, ES_optype op)
188 {
189 switch(op)
190 {
191 case TRANS:
192 return left->getShape();
193 break;
194 case TRACE:
195 return DataTypes::scalarShape;
196 break;
197 default:
198 cout << op << endl;
199 throw DataException("Programmer error - resultShape(left,op) can't compute shapes for operator "+opToString(op)+".");
200 }
201 }
202
203 // determine the output shape for the general tensor product operation
204 // the additional parameters return information required later for the product
205 // the majority of this code is copy pasted from C_General_Tensor_Product
206 DataTypes::ShapeType
207 GTPShape(DataAbstract_ptr left, DataAbstract_ptr right, int axis_offset, int transpose, int& SL, int& SM, int& SR)
208 {
209
210 // Get rank and shape of inputs
211 int rank0 = left->getRank();
212 int rank1 = right->getRank();
213 const DataTypes::ShapeType& shape0 = left->getShape();
214 const DataTypes::ShapeType& shape1 = right->getShape();
215
216 // Prepare for the loops of the product and verify compatibility of shapes
217 int start0=0, start1=0;
218 if (transpose == 0) {}
219 else if (transpose == 1) { start0 = axis_offset; }
220 else if (transpose == 2) { start1 = rank1-axis_offset; }
221 else { throw DataException("DataLazy GeneralTensorProduct Constructor: Error - transpose should be 0, 1 or 2"); }
222
223
224 // Adjust the shapes for transpose
225 DataTypes::ShapeType tmpShape0(rank0); // pre-sizing the vectors rather
226 DataTypes::ShapeType tmpShape1(rank1); // than using push_back
227 for (int i=0; i<rank0; i++) { tmpShape0[i]=shape0[(i+start0)%rank0]; }
228 for (int i=0; i<rank1; i++) { tmpShape1[i]=shape1[(i+start1)%rank1]; }
229
230 // Prepare for the loops of the product
231 SL=1, SM=1, SR=1;
232 for (int i=0; i<rank0-axis_offset; i++) {
233 SL *= tmpShape0[i];
234 }
235 for (int i=rank0-axis_offset; i<rank0; i++) {
236 if (tmpShape0[i] != tmpShape1[i-(rank0-axis_offset)]) {
237 throw DataException("C_GeneralTensorProduct: Error - incompatible shapes");
238 }
239 SM *= tmpShape0[i];
240 }
241 for (int i=axis_offset; i<rank1; i++) {
242 SR *= tmpShape1[i];
243 }
244
245 // Define the shape of the output (rank of shape is the sum of the loop ranges below)
246 DataTypes::ShapeType shape2(rank0+rank1-2*axis_offset);
247 { // block to limit the scope of out_index
248 int out_index=0;
249 for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z
250 for (int i=axis_offset; i<rank1; i++, ++out_index) { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z
251 }
252 return shape2;
253 }
254
255
256 // determine the number of points in the result of "left op right"
257 // note that determining the resultLength for G_TENSORPROD is more complex and will not be processed here
258 // size_t
259 // resultLength(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
260 // {
261 // switch (getOpgroup(op))
262 // {
263 // case G_BINARY: return left->getLength();
264 // case G_UNARY: return left->getLength();
265 // case G_NP1OUT: return left->getLength();
266 // default:
267 // throw DataException("Programmer Error - attempt to getLength() for operator "+opToString(op)+".");
268 // }
269 // }
270
271 // determine the number of samples requires to evaluate an expression combining left and right
272 // NP1OUT needs an extra buffer because we can't write the answers over the top of the input.
273 // The same goes for G_TENSORPROD
274 int
275 calcBuffs(const DataLazy_ptr& left, const DataLazy_ptr& right, ES_optype op)
276 {
277 switch(getOpgroup(op))
278 {
279 case G_IDENTITY: return 1;
280 case G_BINARY: return max(left->getBuffsRequired(),right->getBuffsRequired()+1);
281 case G_UNARY: return max(left->getBuffsRequired(),1);
282 case G_NP1OUT: return 1+max(left->getBuffsRequired(),1);
283 case G_NP1OUT_P: return 1+max(left->getBuffsRequired(),1);
284 case G_TENSORPROD: return 1+max(left->getBuffsRequired(),right->getBuffsRequired()+1);
285 default:
286 throw DataException("Programmer Error - attempt to calcBuffs() for operator "+opToString(op)+".");
287 }
288 }
289
290
291 } // end anonymous namespace
292
293
294
295 // Return a string representing the operation
296 const std::string&
297 opToString(ES_optype op)
298 {
299 if (op<0 || op>=ES_opcount)
300 {
301 op=UNKNOWNOP;
302 }
303 return ES_opstrings[op];
304 }
305
306
307 DataLazy::DataLazy(DataAbstract_ptr p)
308 : parent(p->getFunctionSpace(),p->getShape()),
309 m_op(IDENTITY),
310 m_axis_offset(0),
311 m_transpose(0),
312 m_SL(0), m_SM(0), m_SR(0)
313 {
314 if (p->isLazy())
315 {
316 // I don't want identity of Lazy.
317 // Question: Why would that be so bad?
318 // Answer: We assume that the child of ID is something we can call getVector on
319 throw DataException("Programmer error - attempt to create identity from a DataLazy.");
320 }
321 else
322 {
323 m_id=dynamic_pointer_cast<DataReady>(p);
324 if(p->isConstant()) {m_readytype='C';}
325 else if(p->isExpanded()) {m_readytype='E';}
326 else if (p->isTagged()) {m_readytype='T';}
327 else {throw DataException("Unknown DataReady instance in DataLazy constructor.");}
328 }
329 m_buffsRequired=1;
330 m_samplesize=getNumDPPSample()*getNoValues();
331 m_maxsamplesize=m_samplesize;
332 cout << "(1)Lazy created with " << m_samplesize << endl;
333 }
334
335
336
337
338 DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op)
339 : parent(left->getFunctionSpace(),left->getShape()),
340 m_op(op),
341 m_axis_offset(0),
342 m_transpose(0),
343 m_SL(0), m_SM(0), m_SR(0)
344 {
345 if ((getOpgroup(op)!=G_UNARY) && (getOpgroup(op)!=G_NP1OUT))
346 {
347 throw DataException("Programmer error - constructor DataLazy(left, op) will only process UNARY operations.");
348 }
349
350 DataLazy_ptr lleft;
351 if (!left->isLazy())
352 {
353 lleft=DataLazy_ptr(new DataLazy(left));
354 }
355 else
356 {
357 lleft=dynamic_pointer_cast<DataLazy>(left);
358 }
359 m_readytype=lleft->m_readytype;
360 m_left=lleft;
361 m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point
362 m_samplesize=getNumDPPSample()*getNoValues();
363 m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());
364 }
365
366
367 // In this constructor we need to consider interpolation
368 DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
369 : parent(resultFS(left,right,op), resultShape(left,right,op)),
370 m_op(op),
371 m_SL(0), m_SM(0), m_SR(0)
372 {
373 if ((getOpgroup(op)!=G_BINARY))
374 {
375 throw DataException("Programmer error - constructor DataLazy(left, right, op) will only process BINARY operations.");
376 }
377
378 if (getFunctionSpace()!=left->getFunctionSpace()) // left needs to be interpolated
379 {
380 FunctionSpace fs=getFunctionSpace();
381 Data ltemp(left);
382 Data tmp(ltemp,fs);
383 left=tmp.borrowDataPtr();
384 }
385 if (getFunctionSpace()!=right->getFunctionSpace()) // right needs to be interpolated
386 {
387 Data tmp(Data(right),getFunctionSpace());
388 right=tmp.borrowDataPtr();
389 }
390 left->operandCheck(*right);
391
392 if (left->isLazy()) // the children need to be DataLazy. Wrap them in IDENTITY if required
393 {
394 m_left=dynamic_pointer_cast<DataLazy>(left);
395 }
396 else
397 {
398 m_left=DataLazy_ptr(new DataLazy(left));
399 }
400 if (right->isLazy())
401 {
402 m_right=dynamic_pointer_cast<DataLazy>(right);
403 }
404 else
405 {
406 m_right=DataLazy_ptr(new DataLazy(right));
407 }
408 char lt=m_left->m_readytype;
409 char rt=m_right->m_readytype;
410 if (lt=='E' || rt=='E')
411 {
412 m_readytype='E';
413 }
414 else if (lt=='T' || rt=='T')
415 {
416 m_readytype='T';
417 }
418 else
419 {
420 m_readytype='C';
421 }
422 m_samplesize=getNumDPPSample()*getNoValues();
423 m_maxsamplesize=max(max(m_samplesize,m_right->getMaxSampleSize()),m_left->getMaxSampleSize());
424 m_buffsRequired=calcBuffs(m_left, m_right,m_op);
425 cout << "(3)Lazy created with " << m_samplesize << endl;
426 }
427
428 DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op, int axis_offset, int transpose)
429 : parent(resultFS(left,right,op), GTPShape(left,right, axis_offset, transpose, m_SL,m_SM, m_SR)),
430 m_op(op),
431 m_axis_offset(axis_offset),
432 m_transpose(transpose)
433 {
434 if ((getOpgroup(op)!=G_TENSORPROD))
435 {
436 throw DataException("Programmer error - constructor DataLazy(left, right, op, ax, tr) will only process BINARY operations which require parameters.");
437 }
438 if ((transpose>2) || (transpose<0))
439 {
440 throw DataException("DataLazy GeneralTensorProduct constructor: Error - transpose should be 0, 1 or 2");
441 }
442 if (getFunctionSpace()!=left->getFunctionSpace()) // left needs to be interpolated
443 {
444 FunctionSpace fs=getFunctionSpace();
445 Data ltemp(left);
446 Data tmp(ltemp,fs);
447 left=tmp.borrowDataPtr();
448 }
449 if (getFunctionSpace()!=right->getFunctionSpace()) // right needs to be interpolated
450 {
451 Data tmp(Data(right),getFunctionSpace());
452 right=tmp.borrowDataPtr();
453 }
454 left->operandCheck(*right);
455
456 if (left->isLazy()) // the children need to be DataLazy. Wrap them in IDENTITY if required
457 {
458 m_left=dynamic_pointer_cast<DataLazy>(left);
459 }
460 else
461 {
462 m_left=DataLazy_ptr(new DataLazy(left));
463 }
464 if (right->isLazy())
465 {
466 m_right=dynamic_pointer_cast<DataLazy>(right);
467 }
468 else
469 {
470 m_right=DataLazy_ptr(new DataLazy(right));
471 }
472 char lt=m_left->m_readytype;
473 char rt=m_right->m_readytype;
474 if (lt=='E' || rt=='E')
475 {
476 m_readytype='E';
477 }
478 else if (lt=='T' || rt=='T')
479 {
480 m_readytype='T';
481 }
482 else
483 {
484 m_readytype='C';
485 }
486 m_samplesize=getNumDPPSample()*getNoValues();
487 m_maxsamplesize=max(max(m_samplesize,m_right->getMaxSampleSize()),m_left->getMaxSampleSize());
488 m_buffsRequired=calcBuffs(m_left, m_right,m_op);
489 cout << "(4)Lazy created with " << m_samplesize << endl;
490 }
491
492
493 DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, int axis_offset)
494 : parent(left->getFunctionSpace(), resultShape(left,op)),
495 m_op(op),
496 m_axis_offset(axis_offset),
497 m_transpose(0)
498 {
499 if ((getOpgroup(op)!=G_NP1OUT_P))
500 {
501 throw DataException("Programmer error - constructor DataLazy(left, op, ax) will only process UNARY operations which require parameters.");
502 }
503 DataLazy_ptr lleft;
504 if (!left->isLazy())
505 {
506 lleft=DataLazy_ptr(new DataLazy(left));
507 }
508 else
509 {
510 lleft=dynamic_pointer_cast<DataLazy>(left);
511 }
512 m_readytype=lleft->m_readytype;
513 m_left=lleft;
514 m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point
515 m_samplesize=getNumDPPSample()*getNoValues();
516 m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());
517 cout << "(5)Lazy created with " << m_samplesize << endl;
518 }
519
520
521 DataLazy::~DataLazy()
522 {
523 }
524
525
526 int
527 DataLazy::getBuffsRequired() const
528 {
529 return m_buffsRequired;
530 }
531
532
533 size_t
534 DataLazy::getMaxSampleSize() const
535 {
536 return m_maxsamplesize;
537 }
538
539 /*
540 \brief Evaluates the expression using methods on Data.
541 This does the work for the collapse method.
542 For reasons of efficiency do not call this method on DataExpanded nodes.
543 */
544 DataReady_ptr
545 DataLazy::collapseToReady()
546 {
547 if (m_readytype=='E')
548 { // this is more an efficiency concern than anything else
549 throw DataException("Programmer Error - do not use collapse on Expanded data.");
550 }
551 if (m_op==IDENTITY)
552 {
553 return m_id;
554 }
555 DataReady_ptr pleft=m_left->collapseToReady();
556 Data left(pleft);
557 Data right;
558 if ((getOpgroup(m_op)==G_BINARY) || (getOpgroup(m_op)==G_TENSORPROD))
559 {
560 right=Data(m_right->collapseToReady());
561 }
562 Data result;
563 switch(m_op)
564 {
565 case ADD:
566 result=left+right;
567 break;
568 case SUB:
569 result=left-right;
570 break;
571 case MUL:
572 result=left*right;
573 break;
574 case DIV:
575 result=left/right;
576 break;
577 case SIN:
578 result=left.sin();
579 break;
580 case COS:
581 result=left.cos();
582 break;
583 case TAN:
584 result=left.tan();
585 break;
586 case ASIN:
587 result=left.asin();
588 break;
589 case ACOS:
590 result=left.acos();
591 break;
592 case ATAN:
593 result=left.atan();
594 break;
595 case SINH:
596 result=left.sinh();
597 break;
598 case COSH:
599 result=left.cosh();
600 break;
601 case TANH:
602 result=left.tanh();
603 break;
604 case ERF:
605 result=left.erf();
606 break;
607 case ASINH:
608 result=left.asinh();
609 break;
610 case ACOSH:
611 result=left.acosh();
612 break;
613 case ATANH:
614 result=left.atanh();
615 break;
616 case LOG10:
617 result=left.log10();
618 break;
619 case LOG:
620 result=left.log();
621 break;
622 case SIGN:
623 result=left.sign();
624 break;
625 case ABS:
626 result=left.abs();
627 break;
628 case NEG:
629 result=left.neg();
630 break;
631 case POS:
632 // it doesn't mean anything for delayed.
633 // it will just trigger a deep copy of the lazy object
634 throw DataException("Programmer error - POS not supported for lazy data.");
635 break;
636 case EXP:
637 result=left.exp();
638 break;
639 case SQRT:
640 result=left.sqrt();
641 break;
642 case RECIP:
643 result=left.oneOver();
644 break;
645 case GZ:
646 result=left.wherePositive();
647 break;
648 case LZ:
649 result=left.whereNegative();
650 break;
651 case GEZ:
652 result=left.whereNonNegative();
653 break;
654 case LEZ:
655 result=left.whereNonPositive();
656 break;
657 case SYM:
658 result=left.symmetric();
659 break;
660 case NSYM:
661 result=left.nonsymmetric();
662 break;
663 case PROD:
664 result=C_GeneralTensorProduct(left,right,m_axis_offset, m_transpose);
665 break;
666 case TRANS:
667 result=left.transpose(m_axis_offset);
668 break;
669 case TRACE:
670 result=left.trace(m_axis_offset);
671 break;
672 default:
673 throw DataException("Programmer error - collapseToReady does not know how to resolve operator "+opToString(m_op)+".");
674 }
675 return result.borrowReadyPtr();
676 }
677
678 /*
679 \brief Converts the DataLazy into an IDENTITY storing the value of the expression.
680 This method uses the original methods on the Data class to evaluate the expressions.
681 For this reason, it should not be used on DataExpanded instances. (To do so would defeat
682 the purpose of using DataLazy in the first place).
683 */
684 void
685 DataLazy::collapse()
686 {
687 if (m_op==IDENTITY)
688 {
689 return;
690 }
691 if (m_readytype=='E')
692 { // this is more an efficiency concern than anything else
693 throw DataException("Programmer Error - do not use collapse on Expanded data.");
694 }
695 m_id=collapseToReady();
696 m_op=IDENTITY;
697 }
698
699 /*
700 \brief Compute the value of the expression (unary operation) for the given sample.
701 \return Vector which stores the value of the subexpression for the given sample.
702 \param v A vector to store intermediate results.
703 \param offset Index in v to begin storing results.
704 \param sampleNo Sample number to evaluate.
705 \param roffset (output parameter) the offset in the return vector where the result begins.
706
707 The return value will be an existing vector so do not deallocate it.
708 If the result is stored in v it should be stored at the offset given.
709 Everything from offset to the end of v should be considered available for this method to use.
710 */
711 DataTypes::ValueType*
712 DataLazy::resolveUnary(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const
713 {
714 // we assume that any collapsing has been done before we get here
715 // since we only have one argument we don't need to think about only
716 // processing single points.
717 if (m_readytype!='E')
718 {
719 throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");
720 }
721 const ValueType* vleft=m_left->resolveSample(v,offset,sampleNo,roffset);
722 const double* left=&((*vleft)[roffset]);
723 double* result=&(v[offset]);
724 roffset=offset;
725 switch (m_op)
726 {
727 case SIN:
728 tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sin);
729 break;
730 case COS:
731 tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::cos);
732 break;
733 case TAN:
734 tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::tan);
735 break;
736 case ASIN:
737 tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::asin);
738 break;
739 case ACOS:
740 tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::acos);
741 break;
742 case ATAN:
743 tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::atan);
744 break;
745 case SINH:
746 tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sinh);
747 break;
748 case COSH:
749 tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::cosh);
750 break;
751 case TANH:
752 tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::tanh);
753 break;
754 case ERF:
755 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
756 throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");
757 #else
758 tensor_unary_operation(m_samplesize, left, result, ::erf);
759 break;
760 #endif
761 case ASINH:
762 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
763 tensor_unary_operation(m_samplesize, left, result, escript::asinh_substitute);
764 #else
765 tensor_unary_operation(m_samplesize, left, result, ::asinh);
766 #endif
767 break;
768 case ACOSH:
769 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
770 tensor_unary_operation(m_samplesize, left, result, escript::acosh_substitute);
771 #else
772 tensor_unary_operation(m_samplesize, left, result, ::acosh);
773 #endif
774 break;
775 case ATANH:
776 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
777 tensor_unary_operation(m_samplesize, left, result, escript::atanh_substitute);
778 #else
779 tensor_unary_operation(m_samplesize, left, result, ::atanh);
780 #endif
781 break;
782 case LOG10:
783 tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::log10);
784 break;
785 case LOG:
786 tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::log);
787 break;
788 case SIGN:
789 tensor_unary_operation(m_samplesize, left, result, escript::fsign);
790 break;
791 case ABS:
792 tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::fabs);
793 break;
794 case NEG:
795 tensor_unary_operation(m_samplesize, left, result, negate<double>());
796 break;
797 case POS:
798 // it doesn't mean anything for delayed.
799 // it will just trigger a deep copy of the lazy object
800 throw DataException("Programmer error - POS not supported for lazy data.");
801 break;
802 case EXP:
803 tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::exp);
804 break;
805 case SQRT:
806 tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sqrt);
807 break;
808 case RECIP:
809 tensor_unary_operation(m_samplesize, left, result, bind1st(divides<double>(),1.));
810 break;
811 case GZ:
812 tensor_unary_operation(m_samplesize, left, result, bind2nd(greater<double>(),0.0));
813 break;
814 case LZ:
815 tensor_unary_operation(m_samplesize, left, result, bind2nd(less<double>(),0.0));
816 break;
817 case GEZ:
818 tensor_unary_operation(m_samplesize, left, result, bind2nd(greater_equal<double>(),0.0));
819 break;
820 case LEZ:
821 tensor_unary_operation(m_samplesize, left, result, bind2nd(less_equal<double>(),0.0));
822 break;
823
824 default:
825 throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");
826 }
827 return &v;
828 }
829
830
831 /*
832 \brief Compute the value of the expression (unary operation) for the given sample.
833 \return Vector which stores the value of the subexpression for the given sample.
834 \param v A vector to store intermediate results.
835 \param offset Index in v to begin storing results.
836 \param sampleNo Sample number to evaluate.
837 \param roffset (output parameter) the offset in the return vector where the result begins.
838
839 The return value will be an existing vector so do not deallocate it.
840 If the result is stored in v it should be stored at the offset given.
841 Everything from offset to the end of v should be considered available for this method to use.
842 */
843 DataTypes::ValueType*
844 DataLazy::resolveNP1OUT(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const
845 {
846 // we assume that any collapsing has been done before we get here
847 // since we only have one argument we don't need to think about only
848 // processing single points.
849 if (m_readytype!='E')
850 {
851 throw DataException("Programmer error - resolveNP1OUT should only be called on expanded Data.");
852 }
853 // since we can't write the result over the input, we need a result offset further along
854 size_t subroffset=roffset+m_samplesize;
855 const ValueType* vleft=m_left->resolveSample(v,offset,sampleNo,subroffset);
856 roffset=offset;
857 switch (m_op)
858 {
859 case SYM:
860 DataMaths::symmetric(*vleft,m_left->getShape(),subroffset, v, getShape(), offset);
861 break;
862 case NSYM:
863 DataMaths::nonsymmetric(*vleft,m_left->getShape(),subroffset, v, getShape(), offset);
864 break;
865 default:
866 throw DataException("Programmer error - resolveNP1OUT can not resolve operator "+opToString(m_op)+".");
867 }
868 return &v;
869 }
870
871 /*
872 \brief Compute the value of the expression (unary operation) for the given sample.
873 \return Vector which stores the value of the subexpression for the given sample.
874 \param v A vector to store intermediate results.
875 \param offset Index in v to begin storing results.
876 \param sampleNo Sample number to evaluate.
877 \param roffset (output parameter) the offset in the return vector where the result begins.
878
879 The return value will be an existing vector so do not deallocate it.
880 If the result is stored in v it should be stored at the offset given.
881 Everything from offset to the end of v should be considered available for this method to use.
882 */
883 DataTypes::ValueType*
884 DataLazy::resolveNP1OUT_P(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const
885 {
886 // we assume that any collapsing has been done before we get here
887 // since we only have one argument we don't need to think about only
888 // processing single points.
889 if (m_readytype!='E')
890 {
891 throw DataException("Programmer error - resolveNP1OUT_P should only be called on expanded Data.");
892 }
893 // since we can't write the result over the input, we need a result offset further along
894 size_t subroffset=roffset+m_samplesize;
895 const ValueType* vleft=m_left->resolveSample(v,offset,sampleNo,subroffset);
896 roffset=offset;
897 switch (m_op)
898 {
899 case TRACE:
900 DataMaths::trace(*vleft,m_left->getShape(),subroffset, v,getShape(),offset,m_axis_offset);
901 break;
902 case TRANS:
903 DataMaths::transpose(*vleft,m_left->getShape(),subroffset, v,getShape(),offset,m_axis_offset);
904 break;
905 default:
906 throw DataException("Programmer error - resolveNP1OUTP can not resolve operator "+opToString(m_op)+".");
907 }
908 return &v;
909 }
910
911
912 #define PROC_OP(TYPE,X) \
913 for (int i=0;i<steps;++i,resultp+=resultStep) \
914 { \
915 tensor_binary_operation< TYPE >(chunksize, &((*left)[lroffset]), &((*right)[rroffset]), resultp, X); \
916 lroffset+=leftStep; \
917 rroffset+=rightStep; \
918 }
919
920 /*
921 \brief Compute the value of the expression (binary operation) for the given sample.
922 \return Vector which stores the value of the subexpression for the given sample.
923 \param v A vector to store intermediate results.
924 \param offset Index in v to begin storing results.
925 \param sampleNo Sample number to evaluate.
926 \param roffset (output parameter) the offset in the return vector where the result begins.
927
928 The return value will be an existing vector so do not deallocate it.
929 If the result is stored in v it should be stored at the offset given.
930 Everything from offset to the end of v should be considered available for this method to use.
931 */
932 // This method assumes that any subexpressions which evaluate to Constant or Tagged Data
933 // have already been collapsed to IDENTITY. So we must have at least one expanded child.
934 // If both children are expanded, then we can process them in a single operation (we treat
935 // the whole sample as one big datapoint.
936 // If one of the children is not expanded, then we need to treat each point in the sample
937 // individually.
938 // There is an additional complication when scalar operations are considered.
939 // For example, 2+Vector.
940 // In this case each double within the point is treated individually
941 DataTypes::ValueType*
942 DataLazy::resolveBinary(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const
943 {
944 cout << "Resolve binary: " << toString() << endl;
945
946 size_t lroffset=0, rroffset=0; // offsets in the left and right result vectors
947 // first work out which of the children are expanded
948 bool leftExp=(m_left->m_readytype=='E');
949 bool rightExp=(m_right->m_readytype=='E');
950 if (!leftExp && !rightExp)
951 {
952 throw DataException("Programmer Error - please use collapse if neither argument has type 'E'.");
953 }
954 bool leftScalar=(m_left->getRank()==0);
955 bool rightScalar=(m_right->getRank()==0);
956 bool bigloops=((leftExp && rightExp) || (!leftExp && !rightExp)); // is processing in single step?
957 int steps=(bigloops?1:getNumDPPSample());
958 size_t chunksize=(bigloops? m_samplesize : getNoValues()); // if bigloops, pretend the whole sample is a datapoint
959 if (m_left->getRank()!=m_right->getRank()) // need to deal with scalar * ? ops
960 {
961 if (!leftScalar && !rightScalar)
962 {
963 throw DataException("resolveBinary - ranks of arguments must match unless one of them is scalar.");
964 }
965 steps=getNumDPPSample()*max(m_left->getNoValues(),m_right->getNoValues());
966 chunksize=1; // for scalar
967 }
968 int leftStep=((leftExp && (!rightExp || rightScalar))? m_right->getNoValues() : 0);
969 int rightStep=((rightExp && (!leftExp || leftScalar))? m_left->getNoValues() : 0);
970 int resultStep=max(leftStep,rightStep); // only one (at most) should be !=0
971 // Get the values of sub-expressions
972 const ValueType* left=m_left->resolveSample(v,offset,sampleNo,lroffset);
973 const ValueType* right=m_right->resolveSample(v,offset+m_samplesize,sampleNo,rroffset); // Note
974 // the right child starts further along.
975 double* resultp=&(v[offset]); // results are stored at the vector offset we recieved
976 switch(m_op)
977 {
978 case ADD:
979 PROC_OP(NO_ARG,plus<double>());
980 break;
981 case SUB:
982 PROC_OP(NO_ARG,minus<double>());
983 break;
984 case MUL:
985 PROC_OP(NO_ARG,multiplies<double>());
986 break;
987 case DIV:
988 PROC_OP(NO_ARG,divides<double>());
989 break;
990 case POW:
991 PROC_OP(double (double,double),::pow);
992 break;
993 default:
994 throw DataException("Programmer error - resolveBinary can not resolve operator "+opToString(m_op)+".");
995 }
996 roffset=offset;
997 return &v;
998 }
999
1000
1001 /*
1002 \brief Compute the value of the expression (tensor product) for the given sample.
1003 \return Vector which stores the value of the subexpression for the given sample.
1004 \param v A vector to store intermediate results.
1005 \param offset Index in v to begin storing results.
1006 \param sampleNo Sample number to evaluate.
1007 \param roffset (output parameter) the offset in the return vector where the result begins.
1008
1009 The return value will be an existing vector so do not deallocate it.
1010 If the result is stored in v it should be stored at the offset given.
1011 Everything from offset to the end of v should be considered available for this method to use.
1012 */
1013 // This method assumes that any subexpressions which evaluate to Constant or Tagged Data
1014 // have already been collapsed to IDENTITY. So we must have at least one expanded child.
1015 // unlike the other resolve helpers, we must treat these datapoints separately.
1016 DataTypes::ValueType*
1017 DataLazy::resolveTProd(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const
1018 {
1019 cout << "Resolve TensorProduct: " << toString() << endl;
1020
1021 size_t lroffset=0, rroffset=0; // offsets in the left and right result vectors
1022 // first work out which of the children are expanded
1023 bool leftExp=(m_left->m_readytype=='E');
1024 bool rightExp=(m_right->m_readytype=='E');
1025 int steps=getNumDPPSample();
1026 int leftStep=((leftExp && !rightExp)? m_right->getNoValues() : 0);
1027 int rightStep=((rightExp && !leftExp)? m_left->getNoValues() : 0);
1028 int resultStep=max(leftStep,rightStep); // only one (at most) should be !=0
1029 // Get the values of sub-expressions (leave a gap of one sample for the result).
1030 const ValueType* left=m_left->resolveSample(v,offset+m_samplesize,sampleNo,lroffset);
1031 const ValueType* right=m_right->resolveSample(v,offset+2*m_samplesize,sampleNo,rroffset);
1032 double* resultp=&(v[offset]); // results are stored at the vector offset we recieved
1033 switch(m_op)
1034 {
1035 case PROD:
1036 for (int i=0;i<steps;++i,resultp+=resultStep)
1037 {
1038 const double *ptr_0 = &((*left)[lroffset]);
1039 const double *ptr_1 = &((*right)[rroffset]);
1040 matrix_matrix_product(m_SL, m_SM, m_SR, ptr_0, ptr_1, resultp, m_transpose);
1041 lroffset+=leftStep;
1042 rroffset+=rightStep;
1043 }
1044 break;
1045 default:
1046 throw DataException("Programmer error - resolveTProduct can not resolve operator "+opToString(m_op)+".");
1047 }
1048 roffset=offset;
1049 return &v;
1050 }
1051
1052
1053
1054 /*
1055 \brief Compute the value of the expression for the given sample.
1056 \return Vector which stores the value of the subexpression for the given sample.
1057 \param v A vector to store intermediate results.
1058 \param offset Index in v to begin storing results.
1059 \param sampleNo Sample number to evaluate.
1060 \param roffset (output parameter) the offset in the return vector where the result begins.
1061
1062 The return value will be an existing vector so do not deallocate it.
1063 */
1064 // the vector and the offset are a place where the method could write its data if it wishes
1065 // it is not obligated to do so. For example, if it has its own storage already, it can use that.
1066 // Hence the return value to indicate where the data is actually stored.
1067 // Regardless, the storage should be assumed to be used, even if it isn't.
1068
1069 // the roffset is the offset within the returned vector where the data begins
1070 const DataTypes::ValueType*
1071 DataLazy::resolveSample(ValueType& v, size_t offset, int sampleNo, size_t& roffset)
1072 {
1073 cout << "Resolve sample " << toString() << endl;
1074 // collapse so we have a 'E' node or an IDENTITY for some other type
1075 if (m_readytype!='E' && m_op!=IDENTITY)
1076 {
1077 collapse();
1078 }
1079 if (m_op==IDENTITY)
1080 {
1081 const ValueType& vec=m_id->getVector();
1082 if (m_readytype=='C')
1083 {
1084 roffset=0;
1085 return &(vec);
1086 }
1087 roffset=m_id->getPointOffset(sampleNo, 0);
1088 return &(vec);
1089 }
1090 if (m_readytype!='E')
1091 {
1092 throw DataException("Programmer Error - Collapse did not produce an expanded node.");
1093 }
1094 switch (getOpgroup(m_op))
1095 {
1096 case G_UNARY: return resolveUnary(v, offset,sampleNo,roffset);
1097 case G_BINARY: return resolveBinary(v, offset,sampleNo,roffset);
1098 case G_NP1OUT: return resolveNP1OUT(v, offset, sampleNo,roffset);
1099 case G_NP1OUT_P: return resolveNP1OUT_P(v, offset, sampleNo,roffset);
1100 case G_TENSORPROD: return resolveTProd(v,offset, sampleNo,roffset);
1101 default:
1102 throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");
1103 }
1104 }
1105
1106
1107 // To simplify the memory management, all threads operate on one large vector, rather than one each.
1108 // Each sample is evaluated independently and copied into the result DataExpanded.
1109 DataReady_ptr
1110 DataLazy::resolve()
1111 {
1112
1113 cout << "Sample size=" << m_samplesize << endl;
1114 cout << "Buffers=" << m_buffsRequired << endl;
1115
1116 if (m_readytype!='E') // if the whole sub-expression is Constant or Tagged, then evaluate it normally
1117 {
1118 collapse();
1119 }
1120 if (m_op==IDENTITY) // So a lazy expression of Constant or Tagged data will be returned here.
1121 {
1122 return m_id;
1123 }
1124 // from this point on we must have m_op!=IDENTITY and m_readytype=='E'
1125 size_t threadbuffersize=m_maxsamplesize*(max(1,m_buffsRequired)); // Each thread needs to have enough
1126 // storage to evaluate its expression
1127 int numthreads=1;
1128 #ifdef _OPENMP
1129 numthreads=getNumberOfThreads();
1130 #endif
1131 ValueType v(numthreads*threadbuffersize);
1132 cout << "Buffer created with size=" << v.size() << endl;
1133 DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(), ValueType(getNoValues()));
1134 ValueType& resvec=result->getVector();
1135 DataReady_ptr resptr=DataReady_ptr(result);
1136 int sample;
1137 size_t outoffset; // offset in the output data
1138 int totalsamples=getNumSamples();
1139 const ValueType* res=0; // Vector storing the answer
1140 size_t resoffset=0; // where in the vector to find the answer
1141 #pragma omp parallel for private(sample,resoffset,outoffset,res) schedule(static)
1142 for (sample=0;sample<totalsamples;++sample)
1143 {
1144 cout << "################################# " << sample << endl;
1145 #ifdef _OPENMP
1146 res=resolveSample(v,threadbuffersize*omp_get_thread_num(),sample,resoffset);
1147 #else
1148 res=resolveSample(v,0,sample,resoffset); // res would normally be v, but not if its a single IDENTITY op.
1149 #endif
1150 cerr << "-------------------------------- " << endl;
1151 outoffset=result->getPointOffset(sample,0);
1152 cerr << "offset=" << outoffset << endl;
1153 for (unsigned int i=0;i<m_samplesize;++i,++outoffset,++resoffset) // copy values into the output vector
1154 {
1155 resvec[outoffset]=(*res)[resoffset];
1156 }
1157 cerr << "*********************************" << endl;
1158 }
1159 return resptr;
1160 }
1161
1162 std::string
1163 DataLazy::toString() const
1164 {
1165 ostringstream oss;
1166 oss << "Lazy Data:";
1167 intoString(oss);
1168 return oss.str();
1169 }
1170
1171
1172 void
1173 DataLazy::intoString(ostringstream& oss) const
1174 {
1175 switch (getOpgroup(m_op))
1176 {
1177 case G_IDENTITY:
1178 if (m_id->isExpanded())
1179 {
1180 oss << "E";
1181 }
1182 else if (m_id->isTagged())
1183 {
1184 oss << "T";
1185 }
1186 else if (m_id->isConstant())
1187 {
1188 oss << "C";
1189 }
1190 else
1191 {
1192 oss << "?";
1193 }
1194 oss << '@' << m_id.get();
1195 break;
1196 case G_BINARY:
1197 oss << '(';
1198 m_left->intoString(oss);
1199 oss << ' ' << opToString(m_op) << ' ';
1200 m_right->intoString(oss);
1201 oss << ')';
1202 break;
1203 case G_UNARY:
1204 case G_NP1OUT:
1205 case G_NP1OUT_P:
1206 oss << opToString(m_op) << '(';
1207 m_left->intoString(oss);
1208 oss << ')';
1209 break;
1210 case G_TENSORPROD:
1211 oss << opToString(m_op) << '(';
1212 m_left->intoString(oss);
1213 oss << ", ";
1214 m_right->intoString(oss);
1215 oss << ')';
1216 break;
1217 default:
1218 oss << "UNKNOWN";
1219 }
1220 }
1221
1222 DataAbstract*
1223 DataLazy::deepCopy()
1224 {
1225 switch (getOpgroup(m_op))
1226 {
1227 case G_IDENTITY: return new DataLazy(m_id->deepCopy()->getPtr());
1228 case G_UNARY: return new DataLazy(m_left->deepCopy()->getPtr(),m_op);
1229 case G_BINARY: return new DataLazy(m_left->deepCopy()->getPtr(),m_right->deepCopy()->getPtr(),m_op);
1230 case G_NP1OUT: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(),m_op);
1231 case G_TENSORPROD: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(), m_op, m_axis_offset, m_transpose);
1232 default:
1233 throw DataException("Programmer error - do not know how to deepcopy operator "+opToString(m_op)+".");
1234 }
1235 }
1236
1237
1238 // There is no single, natural interpretation of getLength on DataLazy.
1239 // Instances of DataReady can look at the size of their vectors.
1240 // For lazy though, it could be the size the data would be if it were resolved;
1241 // or it could be some function of the lengths of the DataReady instances which
1242 // form part of the expression.
1243 // Rather than have people making assumptions, I have disabled the method.
1244 DataTypes::ValueType::size_type
1245 DataLazy::getLength() const
1246 {
1247 throw DataException("getLength() does not make sense for lazy data.");
1248 }
1249
1250
1251 DataAbstract*
1252 DataLazy::getSlice(const DataTypes::RegionType& region) const
1253 {
1254 throw DataException("getSlice - not implemented for Lazy objects.");
1255 }
1256
1257
1258 // To do this we need to rely on our child nodes
1259 DataTypes::ValueType::size_type
1260 DataLazy::getPointOffset(int sampleNo,
1261 int dataPointNo)
1262 {
1263 if (m_op==IDENTITY)
1264 {
1265 return m_id->getPointOffset(sampleNo,dataPointNo);
1266 }
1267 if (m_readytype!='E')
1268 {
1269 collapse();
1270 return m_id->getPointOffset(sampleNo,dataPointNo);
1271 }
1272 // at this point we do not have an identity node and the expression will be Expanded
1273 // so we only need to know which child to ask
1274 if (m_left->m_readytype=='E')
1275 {
1276 return m_left->getPointOffset(sampleNo,dataPointNo);
1277 }
1278 else
1279 {
1280 return m_right->getPointOffset(sampleNo,dataPointNo);
1281 }
1282 }
1283
1284 // To do this we need to rely on our child nodes
1285 DataTypes::ValueType::size_type
1286 DataLazy::getPointOffset(int sampleNo,
1287 int dataPointNo) const
1288 {
1289 if (m_op==IDENTITY)
1290 {
1291 return m_id->getPointOffset(sampleNo,dataPointNo);
1292 }
1293 if (m_readytype=='E')
1294 {
1295 // at this point we do not have an identity node and the expression will be Expanded
1296 // so we only need to know which child to ask
1297 if (m_left->m_readytype=='E')
1298 {
1299 return m_left->getPointOffset(sampleNo,dataPointNo);
1300 }
1301 else
1302 {
1303 return m_right->getPointOffset(sampleNo,dataPointNo);
1304 }
1305 }
1306 if (m_readytype=='C')
1307 {
1308 return m_left->getPointOffset(sampleNo,dataPointNo); // which child doesn't matter
1309 }
1310 throw DataException("Programmer error - getPointOffset on lazy data may require collapsing (but this object is marked const).");
1311 }
1312
1313 // It would seem that DataTagged will need to be treated differently since even after setting all tags
1314 // to zero, all the tags from all the DataTags would be in the result.
1315 // However since they all have the same value (0) whether they are there or not should not matter.
1316 // So I have decided that for all types this method will create a constant 0.
1317 // It can be promoted up as required.
1318 // A possible efficiency concern might be expanded->constant->expanded which has an extra memory management
1319 // but we can deal with that if it arrises.
1320 void
1321 DataLazy::setToZero()
1322 {
1323 DataTypes::ValueType v(getNoValues(),0);
1324 m_id=DataReady_ptr(new DataConstant(getFunctionSpace(),getShape(),v));
1325 m_op=IDENTITY;
1326 m_right.reset();
1327 m_left.reset();
1328 m_readytype='C';
1329 m_buffsRequired=1;
1330 }
1331
1332 } // end namespace

  ViewVC Help
Powered by ViewVC 1.1.26