/[escript]/branches/clazy/escriptcore/src/DataLazy.cpp
ViewVC logotype

Contents of /branches/clazy/escriptcore/src/DataLazy.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2085 - (show annotations)
Mon Nov 24 00:45:48 2008 UTC (10 years, 4 months ago) by jfenwick
Original Path: trunk/escript/src/DataLazy.cpp
File size: 42860 byte(s)
Added c++ unit tests for new operations.
Added resolve to some operations in Data

1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2008 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 #include "DataLazy.h"
16 #ifdef USE_NETCDF
17 #include <netcdfcpp.h>
18 #endif
19 #ifdef PASO_MPI
20 #include <mpi.h>
21 #endif
22 #ifdef _OPENMP
23 #include <omp.h>
24 #endif
25 #include "FunctionSpace.h"
26 #include "DataTypes.h"
27 #include "Data.h"
28 #include "UnaryFuncs.h" // for escript::fsign
29 #include "Utils.h"
30
31 /*
32 How does DataLazy work?
33 ~~~~~~~~~~~~~~~~~~~~~~~
34
35 Each instance represents a single operation on one or two other DataLazy instances. These arguments are normally
36 denoted left and right.
37
38 A special operation, IDENTITY, stores an instance of DataReady in the m_id member.
39 This means that all "internal" nodes in the structure are instances of DataLazy.
40
41 Each operation has a string representation as well as an opgroup - eg G_IDENTITY, G_BINARY, ...
42 Note that IDENITY is not considered a unary operation.
43
44 I am avoiding calling the structure formed a tree because it is not guaranteed to be one (eg c=a+a).
45 It must however form a DAG (directed acyclic graph).
46 I will refer to individual DataLazy objects with the structure as nodes.
47
48 Each node also stores:
49 - m_readytype \in {'E','T','C','?'} ~ indicates what sort of DataReady would be produced if the expression was
50 evaluated.
51 - m_buffsrequired ~ the larged number of samples which would need to be kept simultaneously in order to
52 evaluate the expression.
53 - m_samplesize ~ the number of doubles stored in a sample.
54
55 When a new node is created, the above values are computed based on the values in the child nodes.
56 Eg: if left requires 4 samples and right requires 6 then left+right requires 7 samples.
57
58 The resolve method, which produces a DataReady from a DataLazy, does the following:
59 1) Create a DataReady to hold the new result.
60 2) Allocate a vector (v) big enough to hold m_buffsrequired samples.
61 3) For each sample, call resolveSample with v, to get its values and copy them into the result object.
62
63 (In the case of OMP, multiple samples are resolved in parallel so the vector needs to be larger.)
64
65 resolveSample returns a Vector* and an offset within that vector where the result is stored.
66 Normally, this would be v, but for identity nodes their internal vector is returned instead.
67
68 The convention that I use, is that the resolve methods should store their results starting at the offset they are passed.
69
70 For expressions which evaluate to Constant or Tagged, there is a different evaluation method.
71 The collapse method invokes the (non-lazy) operations on the Data class to evaluate the expression.
72
73 To add a new operator you need to do the following (plus anything I might have forgotten):
74 1) Add to the ES_optype.
75 2) determine what opgroup your operation belongs to (X)
76 3) add a string for the op to the end of ES_opstrings
77 4) increase ES_opcount
78 5) add an entry (X) to opgroups
79 6) add an entry to the switch in collapseToReady
80 7) add an entry to resolveX
81 */
82
83
84 using namespace std;
85 using namespace boost;
86
87 namespace escript
88 {
89
90 namespace
91 {
92
93 enum ES_opgroup
94 {
95 G_UNKNOWN,
96 G_IDENTITY,
97 G_BINARY, // pointwise operations with two arguments
98 G_UNARY, // pointwise operations with one argument
99 G_NP1OUT, // non-pointwise op with one output
100 G_NP1OUT_P, // non-pointwise op with one output requiring a parameter
101 G_TENSORPROD // general tensor product
102 };
103
104
105
106
107 string ES_opstrings[]={"UNKNOWN","IDENTITY","+","-","*","/","^",
108 "sin","cos","tan",
109 "asin","acos","atan","sinh","cosh","tanh","erf",
110 "asinh","acosh","atanh",
111 "log10","log","sign","abs","neg","pos","exp","sqrt",
112 "1/","where>0","where<0","where>=0","where<=0",
113 "symmetric","nonsymmetric",
114 "prod",
115 "transpose",
116 "trace"};
117 int ES_opcount=38;
118 ES_opgroup opgroups[]={G_UNKNOWN,G_IDENTITY,G_BINARY,G_BINARY,G_BINARY,G_BINARY, G_BINARY,
119 G_UNARY,G_UNARY,G_UNARY, //10
120 G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY, // 17
121 G_UNARY,G_UNARY,G_UNARY, // 20
122 G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY, // 28
123 G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY, // 33
124 G_NP1OUT,G_NP1OUT,
125 G_TENSORPROD,
126 G_NP1OUT_P, G_NP1OUT_P};
127 inline
128 ES_opgroup
129 getOpgroup(ES_optype op)
130 {
131 return opgroups[op];
132 }
133
134 // return the FunctionSpace of the result of "left op right"
135 FunctionSpace
136 resultFS(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
137 {
138 // perhaps this should call interpolate and throw or something?
139 // maybe we need an interpolate node -
140 // that way, if interpolate is required in any other op we can just throw a
141 // programming error exception.
142
143 FunctionSpace l=left->getFunctionSpace();
144 FunctionSpace r=right->getFunctionSpace();
145 if (l!=r)
146 {
147 if (r.probeInterpolation(l))
148 {
149 return l;
150 }
151 if (l.probeInterpolation(r))
152 {
153 return r;
154 }
155 throw DataException("Cannot interpolate between the FunctionSpaces given for operation "+opToString(op)+".");
156 }
157 return l;
158 }
159
160 // return the shape of the result of "left op right"
161 // the shapes resulting from tensor product are more complex to compute so are worked out elsewhere
162 DataTypes::ShapeType
163 resultShape(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
164 {
165 if (left->getShape()!=right->getShape())
166 {
167 if ((getOpgroup(op)!=G_BINARY) && (getOpgroup(op)!=G_NP1OUT))
168 {
169 throw DataException("Shapes not the name - shapes must match for (point)binary operations.");
170 }
171 if (left->getRank()==0) // we need to allow scalar * anything
172 {
173 return right->getShape();
174 }
175 if (right->getRank()==0)
176 {
177 return left->getShape();
178 }
179 throw DataException("Shapes not the same - arguments must have matching shapes (or be scalars) for (point)binary operations on lazy data.");
180 }
181 return left->getShape();
182 }
183
184 // return the shape for "op left"
185
186 DataTypes::ShapeType
187 resultShape(DataAbstract_ptr left, ES_optype op)
188 {
189 switch(op)
190 {
191 case TRANS:
192 return left->getShape();
193 break;
194 case TRACE:
195 return DataTypes::scalarShape;
196 break;
197 default:
198 throw DataException("Programmer error - resultShape(left,op) can't compute shapes for operator "+opToString(op)+".");
199 }
200 }
201
202 // determine the output shape for the general tensor product operation
203 // the additional parameters return information required later for the product
204 // the majority of this code is copy pasted from C_General_Tensor_Product
205 DataTypes::ShapeType
206 GTPShape(DataAbstract_ptr left, DataAbstract_ptr right, int axis_offset, int transpose, int& SL, int& SM, int& SR)
207 {
208
209 // Get rank and shape of inputs
210 int rank0 = left->getRank();
211 int rank1 = right->getRank();
212 const DataTypes::ShapeType& shape0 = left->getShape();
213 const DataTypes::ShapeType& shape1 = right->getShape();
214
215 // Prepare for the loops of the product and verify compatibility of shapes
216 int start0=0, start1=0;
217 if (transpose == 0) {}
218 else if (transpose == 1) { start0 = axis_offset; }
219 else if (transpose == 2) { start1 = rank1-axis_offset; }
220 else { throw DataException("DataLazy GeneralTensorProduct Constructor: Error - transpose should be 0, 1 or 2"); }
221
222 if (rank0<axis_offset)
223 {
224 throw DataException("DataLazy GeneralTensorProduct Constructor: Error - rank of left < axisoffset");
225 }
226
227 // Adjust the shapes for transpose
228 DataTypes::ShapeType tmpShape0(rank0); // pre-sizing the vectors rather
229 DataTypes::ShapeType tmpShape1(rank1); // than using push_back
230 for (int i=0; i<rank0; i++) { tmpShape0[i]=shape0[(i+start0)%rank0]; }
231 for (int i=0; i<rank1; i++) { tmpShape1[i]=shape1[(i+start1)%rank1]; }
232
233 // Prepare for the loops of the product
234 SL=1, SM=1, SR=1;
235 for (int i=0; i<rank0-axis_offset; i++) {
236 SL *= tmpShape0[i];
237 }
238 for (int i=rank0-axis_offset; i<rank0; i++) {
239 if (tmpShape0[i] != tmpShape1[i-(rank0-axis_offset)]) {
240 throw DataException("C_GeneralTensorProduct: Error - incompatible shapes");
241 }
242 SM *= tmpShape0[i];
243 }
244 for (int i=axis_offset; i<rank1; i++) {
245 SR *= tmpShape1[i];
246 }
247
248 // Define the shape of the output (rank of shape is the sum of the loop ranges below)
249 DataTypes::ShapeType shape2(rank0+rank1-2*axis_offset);
250 { // block to limit the scope of out_index
251 int out_index=0;
252 for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z
253 for (int i=axis_offset; i<rank1; i++, ++out_index) { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z
254 }
255 return shape2;
256 }
257
258
259 // determine the number of points in the result of "left op right"
260 // note that determining the resultLength for G_TENSORPROD is more complex and will not be processed here
261 // size_t
262 // resultLength(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
263 // {
264 // switch (getOpgroup(op))
265 // {
266 // case G_BINARY: return left->getLength();
267 // case G_UNARY: return left->getLength();
268 // case G_NP1OUT: return left->getLength();
269 // default:
270 // throw DataException("Programmer Error - attempt to getLength() for operator "+opToString(op)+".");
271 // }
272 // }
273
274 // determine the number of samples requires to evaluate an expression combining left and right
275 // NP1OUT needs an extra buffer because we can't write the answers over the top of the input.
276 // The same goes for G_TENSORPROD
277 int
278 calcBuffs(const DataLazy_ptr& left, const DataLazy_ptr& right, ES_optype op)
279 {
280 switch(getOpgroup(op))
281 {
282 case G_IDENTITY: return 1;
283 case G_BINARY: return max(left->getBuffsRequired(),right->getBuffsRequired()+1);
284 case G_UNARY: return max(left->getBuffsRequired(),1);
285 case G_NP1OUT: return 1+max(left->getBuffsRequired(),1);
286 case G_NP1OUT_P: return 1+max(left->getBuffsRequired(),1);
287 case G_TENSORPROD: return 1+max(left->getBuffsRequired(),right->getBuffsRequired()+1);
288 default:
289 throw DataException("Programmer Error - attempt to calcBuffs() for operator "+opToString(op)+".");
290 }
291 }
292
293
294 } // end anonymous namespace
295
296
297
298 // Return a string representing the operation
299 const std::string&
300 opToString(ES_optype op)
301 {
302 if (op<0 || op>=ES_opcount)
303 {
304 op=UNKNOWNOP;
305 }
306 return ES_opstrings[op];
307 }
308
309
310 DataLazy::DataLazy(DataAbstract_ptr p)
311 : parent(p->getFunctionSpace(),p->getShape()),
312 m_op(IDENTITY),
313 m_axis_offset(0),
314 m_transpose(0),
315 m_SL(0), m_SM(0), m_SR(0)
316 {
317 if (p->isLazy())
318 {
319 // I don't want identity of Lazy.
320 // Question: Why would that be so bad?
321 // Answer: We assume that the child of ID is something we can call getVector on
322 throw DataException("Programmer error - attempt to create identity from a DataLazy.");
323 }
324 else
325 {
326 m_id=dynamic_pointer_cast<DataReady>(p);
327 if(p->isConstant()) {m_readytype='C';}
328 else if(p->isExpanded()) {m_readytype='E';}
329 else if (p->isTagged()) {m_readytype='T';}
330 else {throw DataException("Unknown DataReady instance in DataLazy constructor.");}
331 }
332 m_buffsRequired=1;
333 m_samplesize=getNumDPPSample()*getNoValues();
334 m_maxsamplesize=m_samplesize;
335 cout << "(1)Lazy created with " << m_samplesize << endl;
336 }
337
338
339
340
341 DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op)
342 : parent(left->getFunctionSpace(),left->getShape()),
343 m_op(op),
344 m_axis_offset(0),
345 m_transpose(0),
346 m_SL(0), m_SM(0), m_SR(0)
347 {
348 if ((getOpgroup(op)!=G_UNARY) && (getOpgroup(op)!=G_NP1OUT))
349 {
350 throw DataException("Programmer error - constructor DataLazy(left, op) will only process UNARY operations.");
351 }
352
353 DataLazy_ptr lleft;
354 if (!left->isLazy())
355 {
356 lleft=DataLazy_ptr(new DataLazy(left));
357 }
358 else
359 {
360 lleft=dynamic_pointer_cast<DataLazy>(left);
361 }
362 m_readytype=lleft->m_readytype;
363 m_left=lleft;
364 m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point
365 m_samplesize=getNumDPPSample()*getNoValues();
366 m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());
367 }
368
369
370 // In this constructor we need to consider interpolation
371 DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op)
372 : parent(resultFS(left,right,op), resultShape(left,right,op)),
373 m_op(op),
374 m_SL(0), m_SM(0), m_SR(0)
375 {
376 if ((getOpgroup(op)!=G_BINARY))
377 {
378 throw DataException("Programmer error - constructor DataLazy(left, right, op) will only process BINARY operations.");
379 }
380
381 if (getFunctionSpace()!=left->getFunctionSpace()) // left needs to be interpolated
382 {
383 FunctionSpace fs=getFunctionSpace();
384 Data ltemp(left);
385 Data tmp(ltemp,fs);
386 left=tmp.borrowDataPtr();
387 }
388 if (getFunctionSpace()!=right->getFunctionSpace()) // right needs to be interpolated
389 {
390 Data tmp(Data(right),getFunctionSpace());
391 right=tmp.borrowDataPtr();
392 }
393 left->operandCheck(*right);
394
395 if (left->isLazy()) // the children need to be DataLazy. Wrap them in IDENTITY if required
396 {
397 m_left=dynamic_pointer_cast<DataLazy>(left);
398 }
399 else
400 {
401 m_left=DataLazy_ptr(new DataLazy(left));
402 }
403 if (right->isLazy())
404 {
405 m_right=dynamic_pointer_cast<DataLazy>(right);
406 }
407 else
408 {
409 m_right=DataLazy_ptr(new DataLazy(right));
410 }
411 char lt=m_left->m_readytype;
412 char rt=m_right->m_readytype;
413 if (lt=='E' || rt=='E')
414 {
415 m_readytype='E';
416 }
417 else if (lt=='T' || rt=='T')
418 {
419 m_readytype='T';
420 }
421 else
422 {
423 m_readytype='C';
424 }
425 m_samplesize=getNumDPPSample()*getNoValues();
426 m_maxsamplesize=max(max(m_samplesize,m_right->getMaxSampleSize()),m_left->getMaxSampleSize());
427 m_buffsRequired=calcBuffs(m_left, m_right,m_op);
428 cout << "(3)Lazy created with " << m_samplesize << endl;
429 }
430
431 DataLazy::DataLazy(DataAbstract_ptr left, DataAbstract_ptr right, ES_optype op, int axis_offset, int transpose)
432 : parent(resultFS(left,right,op), GTPShape(left,right, axis_offset, transpose, m_SL,m_SM, m_SR)),
433 m_op(op),
434 m_axis_offset(axis_offset),
435 m_transpose(transpose)
436 {
437 if ((getOpgroup(op)!=G_TENSORPROD))
438 {
439 throw DataException("Programmer error - constructor DataLazy(left, right, op, ax, tr) will only process BINARY operations which require parameters.");
440 }
441 if ((transpose>2) || (transpose<0))
442 {
443 throw DataException("DataLazy GeneralTensorProduct constructor: Error - transpose should be 0, 1 or 2");
444 }
445 if (getFunctionSpace()!=left->getFunctionSpace()) // left needs to be interpolated
446 {
447 FunctionSpace fs=getFunctionSpace();
448 Data ltemp(left);
449 Data tmp(ltemp,fs);
450 left=tmp.borrowDataPtr();
451 }
452 if (getFunctionSpace()!=right->getFunctionSpace()) // right needs to be interpolated
453 {
454 Data tmp(Data(right),getFunctionSpace());
455 right=tmp.borrowDataPtr();
456 }
457 left->operandCheck(*right);
458
459 if (left->isLazy()) // the children need to be DataLazy. Wrap them in IDENTITY if required
460 {
461 m_left=dynamic_pointer_cast<DataLazy>(left);
462 }
463 else
464 {
465 m_left=DataLazy_ptr(new DataLazy(left));
466 }
467 if (right->isLazy())
468 {
469 m_right=dynamic_pointer_cast<DataLazy>(right);
470 }
471 else
472 {
473 m_right=DataLazy_ptr(new DataLazy(right));
474 }
475 char lt=m_left->m_readytype;
476 char rt=m_right->m_readytype;
477 if (lt=='E' || rt=='E')
478 {
479 m_readytype='E';
480 }
481 else if (lt=='T' || rt=='T')
482 {
483 m_readytype='T';
484 }
485 else
486 {
487 m_readytype='C';
488 }
489 m_samplesize=getNumDPPSample()*getNoValues();
490 m_maxsamplesize=max(max(m_samplesize,m_right->getMaxSampleSize()),m_left->getMaxSampleSize());
491 m_buffsRequired=calcBuffs(m_left, m_right,m_op);
492 cout << "(4)Lazy created with " << m_samplesize << endl;
493 }
494
495
496 DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op, int axis_offset)
497 : parent(left->getFunctionSpace(), resultShape(left,op)),
498 m_op(op),
499 m_axis_offset(axis_offset),
500 m_transpose(0)
501 {
502 if ((getOpgroup(op)!=G_NP1OUT_P))
503 {
504 throw DataException("Programmer error - constructor DataLazy(left, op, ax) will only process UNARY operations which require parameters.");
505 }
506 DataLazy_ptr lleft;
507 if (!left->isLazy())
508 {
509 lleft=DataLazy_ptr(new DataLazy(left));
510 }
511 else
512 {
513 lleft=dynamic_pointer_cast<DataLazy>(left);
514 }
515 m_readytype=lleft->m_readytype;
516 m_left=lleft;
517 m_buffsRequired=calcBuffs(m_left, m_right,m_op); // yeah m_right will be null at this point
518 m_samplesize=getNumDPPSample()*getNoValues();
519 m_maxsamplesize=max(m_samplesize,m_left->getMaxSampleSize());
520 cout << "(5)Lazy created with " << m_samplesize << endl;
521 }
522
523
524 DataLazy::~DataLazy()
525 {
526 }
527
528
529 int
530 DataLazy::getBuffsRequired() const
531 {
532 return m_buffsRequired;
533 }
534
535
536 size_t
537 DataLazy::getMaxSampleSize() const
538 {
539 return m_maxsamplesize;
540 }
541
542 /*
543 \brief Evaluates the expression using methods on Data.
544 This does the work for the collapse method.
545 For reasons of efficiency do not call this method on DataExpanded nodes.
546 */
547 DataReady_ptr
548 DataLazy::collapseToReady()
549 {
550 if (m_readytype=='E')
551 { // this is more an efficiency concern than anything else
552 throw DataException("Programmer Error - do not use collapse on Expanded data.");
553 }
554 if (m_op==IDENTITY)
555 {
556 return m_id;
557 }
558 DataReady_ptr pleft=m_left->collapseToReady();
559 Data left(pleft);
560 Data right;
561 if ((getOpgroup(m_op)==G_BINARY) || (getOpgroup(m_op)==G_TENSORPROD))
562 {
563 right=Data(m_right->collapseToReady());
564 }
565 Data result;
566 switch(m_op)
567 {
568 case ADD:
569 result=left+right;
570 break;
571 case SUB:
572 result=left-right;
573 break;
574 case MUL:
575 result=left*right;
576 break;
577 case DIV:
578 result=left/right;
579 break;
580 case SIN:
581 result=left.sin();
582 break;
583 case COS:
584 result=left.cos();
585 break;
586 case TAN:
587 result=left.tan();
588 break;
589 case ASIN:
590 result=left.asin();
591 break;
592 case ACOS:
593 result=left.acos();
594 break;
595 case ATAN:
596 result=left.atan();
597 break;
598 case SINH:
599 result=left.sinh();
600 break;
601 case COSH:
602 result=left.cosh();
603 break;
604 case TANH:
605 result=left.tanh();
606 break;
607 case ERF:
608 result=left.erf();
609 break;
610 case ASINH:
611 result=left.asinh();
612 break;
613 case ACOSH:
614 result=left.acosh();
615 break;
616 case ATANH:
617 result=left.atanh();
618 break;
619 case LOG10:
620 result=left.log10();
621 break;
622 case LOG:
623 result=left.log();
624 break;
625 case SIGN:
626 result=left.sign();
627 break;
628 case ABS:
629 result=left.abs();
630 break;
631 case NEG:
632 result=left.neg();
633 break;
634 case POS:
635 // it doesn't mean anything for delayed.
636 // it will just trigger a deep copy of the lazy object
637 throw DataException("Programmer error - POS not supported for lazy data.");
638 break;
639 case EXP:
640 result=left.exp();
641 break;
642 case SQRT:
643 result=left.sqrt();
644 break;
645 case RECIP:
646 result=left.oneOver();
647 break;
648 case GZ:
649 result=left.wherePositive();
650 break;
651 case LZ:
652 result=left.whereNegative();
653 break;
654 case GEZ:
655 result=left.whereNonNegative();
656 break;
657 case LEZ:
658 result=left.whereNonPositive();
659 break;
660 case SYM:
661 result=left.symmetric();
662 break;
663 case NSYM:
664 result=left.nonsymmetric();
665 break;
666 case PROD:
667 result=C_GeneralTensorProduct(left,right,m_axis_offset, m_transpose);
668 break;
669 case TRANS:
670 result=left.transpose(m_axis_offset);
671 break;
672 case TRACE:
673 result=left.trace(m_axis_offset);
674 break;
675 default:
676 throw DataException("Programmer error - collapseToReady does not know how to resolve operator "+opToString(m_op)+".");
677 }
678 return result.borrowReadyPtr();
679 }
680
681 /*
682 \brief Converts the DataLazy into an IDENTITY storing the value of the expression.
683 This method uses the original methods on the Data class to evaluate the expressions.
684 For this reason, it should not be used on DataExpanded instances. (To do so would defeat
685 the purpose of using DataLazy in the first place).
686 */
687 void
688 DataLazy::collapse()
689 {
690 if (m_op==IDENTITY)
691 {
692 return;
693 }
694 if (m_readytype=='E')
695 { // this is more an efficiency concern than anything else
696 throw DataException("Programmer Error - do not use collapse on Expanded data.");
697 }
698 m_id=collapseToReady();
699 m_op=IDENTITY;
700 }
701
702 /*
703 \brief Compute the value of the expression (unary operation) for the given sample.
704 \return Vector which stores the value of the subexpression for the given sample.
705 \param v A vector to store intermediate results.
706 \param offset Index in v to begin storing results.
707 \param sampleNo Sample number to evaluate.
708 \param roffset (output parameter) the offset in the return vector where the result begins.
709
710 The return value will be an existing vector so do not deallocate it.
711 If the result is stored in v it should be stored at the offset given.
712 Everything from offset to the end of v should be considered available for this method to use.
713 */
714 DataTypes::ValueType*
715 DataLazy::resolveUnary(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const
716 {
717 // we assume that any collapsing has been done before we get here
718 // since we only have one argument we don't need to think about only
719 // processing single points.
720 if (m_readytype!='E')
721 {
722 throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");
723 }
724 const ValueType* vleft=m_left->resolveSample(v,offset,sampleNo,roffset);
725 const double* left=&((*vleft)[roffset]);
726 double* result=&(v[offset]);
727 roffset=offset;
728 switch (m_op)
729 {
730 case SIN:
731 tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sin);
732 break;
733 case COS:
734 tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::cos);
735 break;
736 case TAN:
737 tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::tan);
738 break;
739 case ASIN:
740 tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::asin);
741 break;
742 case ACOS:
743 tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::acos);
744 break;
745 case ATAN:
746 tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::atan);
747 break;
748 case SINH:
749 tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sinh);
750 break;
751 case COSH:
752 tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::cosh);
753 break;
754 case TANH:
755 tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::tanh);
756 break;
757 case ERF:
758 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
759 throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");
760 #else
761 tensor_unary_operation(m_samplesize, left, result, ::erf);
762 break;
763 #endif
764 case ASINH:
765 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
766 tensor_unary_operation(m_samplesize, left, result, escript::asinh_substitute);
767 #else
768 tensor_unary_operation(m_samplesize, left, result, ::asinh);
769 #endif
770 break;
771 case ACOSH:
772 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
773 tensor_unary_operation(m_samplesize, left, result, escript::acosh_substitute);
774 #else
775 tensor_unary_operation(m_samplesize, left, result, ::acosh);
776 #endif
777 break;
778 case ATANH:
779 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
780 tensor_unary_operation(m_samplesize, left, result, escript::atanh_substitute);
781 #else
782 tensor_unary_operation(m_samplesize, left, result, ::atanh);
783 #endif
784 break;
785 case LOG10:
786 tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::log10);
787 break;
788 case LOG:
789 tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::log);
790 break;
791 case SIGN:
792 tensor_unary_operation(m_samplesize, left, result, escript::fsign);
793 break;
794 case ABS:
795 tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::fabs);
796 break;
797 case NEG:
798 tensor_unary_operation(m_samplesize, left, result, negate<double>());
799 break;
800 case POS:
801 // it doesn't mean anything for delayed.
802 // it will just trigger a deep copy of the lazy object
803 throw DataException("Programmer error - POS not supported for lazy data.");
804 break;
805 case EXP:
806 tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::exp);
807 break;
808 case SQRT:
809 tensor_unary_operation<double (*)(double)>(m_samplesize, left, result, ::sqrt);
810 break;
811 case RECIP:
812 tensor_unary_operation(m_samplesize, left, result, bind1st(divides<double>(),1.));
813 break;
814 case GZ:
815 tensor_unary_operation(m_samplesize, left, result, bind2nd(greater<double>(),0.0));
816 break;
817 case LZ:
818 tensor_unary_operation(m_samplesize, left, result, bind2nd(less<double>(),0.0));
819 break;
820 case GEZ:
821 tensor_unary_operation(m_samplesize, left, result, bind2nd(greater_equal<double>(),0.0));
822 break;
823 case LEZ:
824 tensor_unary_operation(m_samplesize, left, result, bind2nd(less_equal<double>(),0.0));
825 break;
826
827 default:
828 throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");
829 }
830 return &v;
831 }
832
833
834 /*
835 \brief Compute the value of the expression (unary operation) for the given sample.
836 \return Vector which stores the value of the subexpression for the given sample.
837 \param v A vector to store intermediate results.
838 \param offset Index in v to begin storing results.
839 \param sampleNo Sample number to evaluate.
840 \param roffset (output parameter) the offset in the return vector where the result begins.
841
842 The return value will be an existing vector so do not deallocate it.
843 If the result is stored in v it should be stored at the offset given.
844 Everything from offset to the end of v should be considered available for this method to use.
845 */
846 DataTypes::ValueType*
847 DataLazy::resolveNP1OUT(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const
848 {
849 // we assume that any collapsing has been done before we get here
850 // since we only have one argument we don't need to think about only
851 // processing single points.
852 if (m_readytype!='E')
853 {
854 throw DataException("Programmer error - resolveNP1OUT should only be called on expanded Data.");
855 }
856 // since we can't write the result over the input, we need a result offset further along
857 size_t subroffset=roffset+m_samplesize;
858 const ValueType* vleft=m_left->resolveSample(v,offset,sampleNo,subroffset);
859 roffset=offset;
860 switch (m_op)
861 {
862 case SYM:
863 DataMaths::symmetric(*vleft,m_left->getShape(),subroffset, v, getShape(), offset);
864 break;
865 case NSYM:
866 DataMaths::nonsymmetric(*vleft,m_left->getShape(),subroffset, v, getShape(), offset);
867 break;
868 default:
869 throw DataException("Programmer error - resolveNP1OUT can not resolve operator "+opToString(m_op)+".");
870 }
871 return &v;
872 }
873
874 /*
875 \brief Compute the value of the expression (unary operation) for the given sample.
876 \return Vector which stores the value of the subexpression for the given sample.
877 \param v A vector to store intermediate results.
878 \param offset Index in v to begin storing results.
879 \param sampleNo Sample number to evaluate.
880 \param roffset (output parameter) the offset in the return vector where the result begins.
881
882 The return value will be an existing vector so do not deallocate it.
883 If the result is stored in v it should be stored at the offset given.
884 Everything from offset to the end of v should be considered available for this method to use.
885 */
886 DataTypes::ValueType*
887 DataLazy::resolveNP1OUT_P(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const
888 {
889 // we assume that any collapsing has been done before we get here
890 // since we only have one argument we don't need to think about only
891 // processing single points.
892 if (m_readytype!='E')
893 {
894 throw DataException("Programmer error - resolveNP1OUT_P should only be called on expanded Data.");
895 }
896 // since we can't write the result over the input, we need a result offset further along
897 size_t subroffset=roffset+m_samplesize;
898 const ValueType* vleft=m_left->resolveSample(v,offset,sampleNo,subroffset);
899 roffset=offset;
900 switch (m_op)
901 {
902 case TRACE:
903 DataMaths::trace(*vleft,m_left->getShape(),subroffset, v,getShape(),offset,m_axis_offset);
904 break;
905 case TRANS:
906 DataMaths::transpose(*vleft,m_left->getShape(),subroffset, v,getShape(),offset,m_axis_offset);
907 break;
908 default:
909 throw DataException("Programmer error - resolveNP1OUTP can not resolve operator "+opToString(m_op)+".");
910 }
911 return &v;
912 }
913
914
915 #define PROC_OP(TYPE,X) \
916 for (int i=0;i<steps;++i,resultp+=resultStep) \
917 { \
918 tensor_binary_operation< TYPE >(chunksize, &((*left)[lroffset]), &((*right)[rroffset]), resultp, X); \
919 lroffset+=leftStep; \
920 rroffset+=rightStep; \
921 }
922
923 /*
924 \brief Compute the value of the expression (binary operation) for the given sample.
925 \return Vector which stores the value of the subexpression for the given sample.
926 \param v A vector to store intermediate results.
927 \param offset Index in v to begin storing results.
928 \param sampleNo Sample number to evaluate.
929 \param roffset (output parameter) the offset in the return vector where the result begins.
930
931 The return value will be an existing vector so do not deallocate it.
932 If the result is stored in v it should be stored at the offset given.
933 Everything from offset to the end of v should be considered available for this method to use.
934 */
935 // This method assumes that any subexpressions which evaluate to Constant or Tagged Data
936 // have already been collapsed to IDENTITY. So we must have at least one expanded child.
937 // If both children are expanded, then we can process them in a single operation (we treat
938 // the whole sample as one big datapoint.
939 // If one of the children is not expanded, then we need to treat each point in the sample
940 // individually.
941 // There is an additional complication when scalar operations are considered.
942 // For example, 2+Vector.
943 // In this case each double within the point is treated individually
944 DataTypes::ValueType*
945 DataLazy::resolveBinary(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const
946 {
947 cout << "Resolve binary: " << toString() << endl;
948
949 size_t lroffset=0, rroffset=0; // offsets in the left and right result vectors
950 // first work out which of the children are expanded
951 bool leftExp=(m_left->m_readytype=='E');
952 bool rightExp=(m_right->m_readytype=='E');
953 if (!leftExp && !rightExp)
954 {
955 throw DataException("Programmer Error - please use collapse if neither argument has type 'E'.");
956 }
957 bool leftScalar=(m_left->getRank()==0);
958 bool rightScalar=(m_right->getRank()==0);
959 bool bigloops=((leftExp && rightExp) || (!leftExp && !rightExp)); // is processing in single step?
960 int steps=(bigloops?1:getNumDPPSample());
961 size_t chunksize=(bigloops? m_samplesize : getNoValues()); // if bigloops, pretend the whole sample is a datapoint
962 if (m_left->getRank()!=m_right->getRank()) // need to deal with scalar * ? ops
963 {
964 if (!leftScalar && !rightScalar)
965 {
966 throw DataException("resolveBinary - ranks of arguments must match unless one of them is scalar.");
967 }
968 steps=getNumDPPSample()*max(m_left->getNoValues(),m_right->getNoValues());
969 chunksize=1; // for scalar
970 }
971 int leftStep=((leftExp && (!rightExp || rightScalar))? m_right->getNoValues() : 0);
972 int rightStep=((rightExp && (!leftExp || leftScalar))? m_left->getNoValues() : 0);
973 int resultStep=max(leftStep,rightStep); // only one (at most) should be !=0
974 // Get the values of sub-expressions
975 const ValueType* left=m_left->resolveSample(v,offset,sampleNo,lroffset);
976 const ValueType* right=m_right->resolveSample(v,offset+m_samplesize,sampleNo,rroffset); // Note
977 // the right child starts further along.
978 double* resultp=&(v[offset]); // results are stored at the vector offset we recieved
979 switch(m_op)
980 {
981 case ADD:
982 PROC_OP(NO_ARG,plus<double>());
983 break;
984 case SUB:
985 PROC_OP(NO_ARG,minus<double>());
986 break;
987 case MUL:
988 PROC_OP(NO_ARG,multiplies<double>());
989 break;
990 case DIV:
991 PROC_OP(NO_ARG,divides<double>());
992 break;
993 case POW:
994 PROC_OP(double (double,double),::pow);
995 break;
996 default:
997 throw DataException("Programmer error - resolveBinary can not resolve operator "+opToString(m_op)+".");
998 }
999 roffset=offset;
1000 return &v;
1001 }
1002
1003
1004 /*
1005 \brief Compute the value of the expression (tensor product) for the given sample.
1006 \return Vector which stores the value of the subexpression for the given sample.
1007 \param v A vector to store intermediate results.
1008 \param offset Index in v to begin storing results.
1009 \param sampleNo Sample number to evaluate.
1010 \param roffset (output parameter) the offset in the return vector where the result begins.
1011
1012 The return value will be an existing vector so do not deallocate it.
1013 If the result is stored in v it should be stored at the offset given.
1014 Everything from offset to the end of v should be considered available for this method to use.
1015 */
1016 // This method assumes that any subexpressions which evaluate to Constant or Tagged Data
1017 // have already been collapsed to IDENTITY. So we must have at least one expanded child.
1018 // unlike the other resolve helpers, we must treat these datapoints separately.
1019 DataTypes::ValueType*
1020 DataLazy::resolveTProd(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const
1021 {
1022 cout << "Resolve TensorProduct: " << toString() << endl;
1023
1024 size_t lroffset=0, rroffset=0; // offsets in the left and right result vectors
1025 // first work out which of the children are expanded
1026 bool leftExp=(m_left->m_readytype=='E');
1027 bool rightExp=(m_right->m_readytype=='E');
1028 int steps=getNumDPPSample();
1029 int leftStep=((leftExp && !rightExp)? m_right->getNoValues() : 0);
1030 int rightStep=((rightExp && !leftExp)? m_left->getNoValues() : 0);
1031 int resultStep=max(leftStep,rightStep); // only one (at most) should be !=0
1032 // Get the values of sub-expressions (leave a gap of one sample for the result).
1033 const ValueType* left=m_left->resolveSample(v,offset+m_samplesize,sampleNo,lroffset);
1034 const ValueType* right=m_right->resolveSample(v,offset+2*m_samplesize,sampleNo,rroffset);
1035 double* resultp=&(v[offset]); // results are stored at the vector offset we recieved
1036 switch(m_op)
1037 {
1038 case PROD:
1039 for (int i=0;i<steps;++i,resultp+=resultStep)
1040 {
1041 const double *ptr_0 = &((*left)[lroffset]);
1042 const double *ptr_1 = &((*right)[rroffset]);
1043 matrix_matrix_product(m_SL, m_SM, m_SR, ptr_0, ptr_1, resultp, m_transpose);
1044 lroffset+=leftStep;
1045 rroffset+=rightStep;
1046 }
1047 break;
1048 default:
1049 throw DataException("Programmer error - resolveTProduct can not resolve operator "+opToString(m_op)+".");
1050 }
1051 roffset=offset;
1052 return &v;
1053 }
1054
1055
1056
1057 /*
1058 \brief Compute the value of the expression for the given sample.
1059 \return Vector which stores the value of the subexpression for the given sample.
1060 \param v A vector to store intermediate results.
1061 \param offset Index in v to begin storing results.
1062 \param sampleNo Sample number to evaluate.
1063 \param roffset (output parameter) the offset in the return vector where the result begins.
1064
1065 The return value will be an existing vector so do not deallocate it.
1066 */
1067 // the vector and the offset are a place where the method could write its data if it wishes
1068 // it is not obligated to do so. For example, if it has its own storage already, it can use that.
1069 // Hence the return value to indicate where the data is actually stored.
1070 // Regardless, the storage should be assumed to be used, even if it isn't.
1071
1072 // the roffset is the offset within the returned vector where the data begins
1073 const DataTypes::ValueType*
1074 DataLazy::resolveSample(ValueType& v, size_t offset, int sampleNo, size_t& roffset)
1075 {
1076 cout << "Resolve sample " << toString() << endl;
1077 // collapse so we have a 'E' node or an IDENTITY for some other type
1078 if (m_readytype!='E' && m_op!=IDENTITY)
1079 {
1080 collapse();
1081 }
1082 if (m_op==IDENTITY)
1083 {
1084 const ValueType& vec=m_id->getVector();
1085 if (m_readytype=='C')
1086 {
1087 roffset=0;
1088 return &(vec);
1089 }
1090 roffset=m_id->getPointOffset(sampleNo, 0);
1091 return &(vec);
1092 }
1093 if (m_readytype!='E')
1094 {
1095 throw DataException("Programmer Error - Collapse did not produce an expanded node.");
1096 }
1097 switch (getOpgroup(m_op))
1098 {
1099 case G_UNARY: return resolveUnary(v, offset,sampleNo,roffset);
1100 case G_BINARY: return resolveBinary(v, offset,sampleNo,roffset);
1101 case G_NP1OUT: return resolveNP1OUT(v, offset, sampleNo,roffset);
1102 case G_NP1OUT_P: return resolveNP1OUT_P(v, offset, sampleNo,roffset);
1103 case G_TENSORPROD: return resolveTProd(v,offset, sampleNo,roffset);
1104 default:
1105 throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");
1106 }
1107 }
1108
1109
1110 // To simplify the memory management, all threads operate on one large vector, rather than one each.
1111 // Each sample is evaluated independently and copied into the result DataExpanded.
1112 DataReady_ptr
1113 DataLazy::resolve()
1114 {
1115
1116 cout << "Sample size=" << m_samplesize << endl;
1117 cout << "Buffers=" << m_buffsRequired << endl;
1118
1119 if (m_readytype!='E') // if the whole sub-expression is Constant or Tagged, then evaluate it normally
1120 {
1121 collapse();
1122 }
1123 if (m_op==IDENTITY) // So a lazy expression of Constant or Tagged data will be returned here.
1124 {
1125 return m_id;
1126 }
1127 // from this point on we must have m_op!=IDENTITY and m_readytype=='E'
1128 size_t threadbuffersize=m_maxsamplesize*(max(1,m_buffsRequired)); // Each thread needs to have enough
1129 // storage to evaluate its expression
1130 int numthreads=1;
1131 #ifdef _OPENMP
1132 numthreads=getNumberOfThreads();
1133 #endif
1134 ValueType v(numthreads*threadbuffersize);
1135 cout << "Buffer created with size=" << v.size() << endl;
1136 DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(), ValueType(getNoValues()));
1137 ValueType& resvec=result->getVector();
1138 DataReady_ptr resptr=DataReady_ptr(result);
1139 int sample;
1140 size_t outoffset; // offset in the output data
1141 int totalsamples=getNumSamples();
1142 const ValueType* res=0; // Vector storing the answer
1143 size_t resoffset=0; // where in the vector to find the answer
1144 #pragma omp parallel for private(sample,resoffset,outoffset,res) schedule(static)
1145 for (sample=0;sample<totalsamples;++sample)
1146 {
1147 cout << "################################# " << sample << endl;
1148 #ifdef _OPENMP
1149 res=resolveSample(v,threadbuffersize*omp_get_thread_num(),sample,resoffset);
1150 #else
1151 res=resolveSample(v,0,sample,resoffset); // res would normally be v, but not if its a single IDENTITY op.
1152 #endif
1153 cerr << "-------------------------------- " << endl;
1154 outoffset=result->getPointOffset(sample,0);
1155 cerr << "offset=" << outoffset << endl;
1156 for (unsigned int i=0;i<m_samplesize;++i,++outoffset,++resoffset) // copy values into the output vector
1157 {
1158 resvec[outoffset]=(*res)[resoffset];
1159 }
1160 cerr << "*********************************" << endl;
1161 }
1162 return resptr;
1163 }
1164
1165 std::string
1166 DataLazy::toString() const
1167 {
1168 ostringstream oss;
1169 oss << "Lazy Data:";
1170 intoString(oss);
1171 return oss.str();
1172 }
1173
1174
1175 void
1176 DataLazy::intoString(ostringstream& oss) const
1177 {
1178 switch (getOpgroup(m_op))
1179 {
1180 case G_IDENTITY:
1181 if (m_id->isExpanded())
1182 {
1183 oss << "E";
1184 }
1185 else if (m_id->isTagged())
1186 {
1187 oss << "T";
1188 }
1189 else if (m_id->isConstant())
1190 {
1191 oss << "C";
1192 }
1193 else
1194 {
1195 oss << "?";
1196 }
1197 oss << '@' << m_id.get();
1198 break;
1199 case G_BINARY:
1200 oss << '(';
1201 m_left->intoString(oss);
1202 oss << ' ' << opToString(m_op) << ' ';
1203 m_right->intoString(oss);
1204 oss << ')';
1205 break;
1206 case G_UNARY:
1207 case G_NP1OUT:
1208 case G_NP1OUT_P:
1209 oss << opToString(m_op) << '(';
1210 m_left->intoString(oss);
1211 oss << ')';
1212 break;
1213 case G_TENSORPROD:
1214 oss << opToString(m_op) << '(';
1215 m_left->intoString(oss);
1216 oss << ", ";
1217 m_right->intoString(oss);
1218 oss << ')';
1219 break;
1220 default:
1221 oss << "UNKNOWN";
1222 }
1223 }
1224
1225 DataAbstract*
1226 DataLazy::deepCopy()
1227 {
1228 switch (getOpgroup(m_op))
1229 {
1230 case G_IDENTITY: return new DataLazy(m_id->deepCopy()->getPtr());
1231 case G_UNARY: return new DataLazy(m_left->deepCopy()->getPtr(),m_op);
1232 case G_BINARY: return new DataLazy(m_left->deepCopy()->getPtr(),m_right->deepCopy()->getPtr(),m_op);
1233 case G_NP1OUT: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(),m_op);
1234 case G_TENSORPROD: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(), m_op, m_axis_offset, m_transpose);
1235 default:
1236 throw DataException("Programmer error - do not know how to deepcopy operator "+opToString(m_op)+".");
1237 }
1238 }
1239
1240
1241 // There is no single, natural interpretation of getLength on DataLazy.
1242 // Instances of DataReady can look at the size of their vectors.
1243 // For lazy though, it could be the size the data would be if it were resolved;
1244 // or it could be some function of the lengths of the DataReady instances which
1245 // form part of the expression.
1246 // Rather than have people making assumptions, I have disabled the method.
1247 DataTypes::ValueType::size_type
1248 DataLazy::getLength() const
1249 {
1250 throw DataException("getLength() does not make sense for lazy data.");
1251 }
1252
1253
1254 DataAbstract*
1255 DataLazy::getSlice(const DataTypes::RegionType& region) const
1256 {
1257 throw DataException("getSlice - not implemented for Lazy objects.");
1258 }
1259
1260
1261 // To do this we need to rely on our child nodes
1262 DataTypes::ValueType::size_type
1263 DataLazy::getPointOffset(int sampleNo,
1264 int dataPointNo)
1265 {
1266 if (m_op==IDENTITY)
1267 {
1268 return m_id->getPointOffset(sampleNo,dataPointNo);
1269 }
1270 if (m_readytype!='E')
1271 {
1272 collapse();
1273 return m_id->getPointOffset(sampleNo,dataPointNo);
1274 }
1275 // at this point we do not have an identity node and the expression will be Expanded
1276 // so we only need to know which child to ask
1277 if (m_left->m_readytype=='E')
1278 {
1279 return m_left->getPointOffset(sampleNo,dataPointNo);
1280 }
1281 else
1282 {
1283 return m_right->getPointOffset(sampleNo,dataPointNo);
1284 }
1285 }
1286
1287 // To do this we need to rely on our child nodes
1288 DataTypes::ValueType::size_type
1289 DataLazy::getPointOffset(int sampleNo,
1290 int dataPointNo) const
1291 {
1292 if (m_op==IDENTITY)
1293 {
1294 return m_id->getPointOffset(sampleNo,dataPointNo);
1295 }
1296 if (m_readytype=='E')
1297 {
1298 // at this point we do not have an identity node and the expression will be Expanded
1299 // so we only need to know which child to ask
1300 if (m_left->m_readytype=='E')
1301 {
1302 return m_left->getPointOffset(sampleNo,dataPointNo);
1303 }
1304 else
1305 {
1306 return m_right->getPointOffset(sampleNo,dataPointNo);
1307 }
1308 }
1309 if (m_readytype=='C')
1310 {
1311 return m_left->getPointOffset(sampleNo,dataPointNo); // which child doesn't matter
1312 }
1313 throw DataException("Programmer error - getPointOffset on lazy data may require collapsing (but this object is marked const).");
1314 }
1315
1316 // It would seem that DataTagged will need to be treated differently since even after setting all tags
1317 // to zero, all the tags from all the DataTags would be in the result.
1318 // However since they all have the same value (0) whether they are there or not should not matter.
1319 // So I have decided that for all types this method will create a constant 0.
1320 // It can be promoted up as required.
1321 // A possible efficiency concern might be expanded->constant->expanded which has an extra memory management
1322 // but we can deal with that if it arrises.
1323 void
1324 DataLazy::setToZero()
1325 {
1326 DataTypes::ValueType v(getNoValues(),0);
1327 m_id=DataReady_ptr(new DataConstant(getFunctionSpace(),getShape(),v));
1328 m_op=IDENTITY;
1329 m_right.reset();
1330 m_left.reset();
1331 m_readytype='C';
1332 m_buffsRequired=1;
1333 }
1334
1335 } // end namespace

  ViewVC Help
Powered by ViewVC 1.1.26