/[escript]/trunk/escript/src/Data.cpp
ViewVC logotype

Contents of /trunk/escript/src/Data.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2260 - (show annotations)
Tue Feb 10 04:50:10 2009 UTC (10 years, 11 months ago) by jfenwick
File size: 86548 byte(s)
That should be the last of the copyWithMask related errors.


1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2008 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 #include "Data.h"
16
17 #include "DataExpanded.h"
18 #include "DataConstant.h"
19 #include "DataTagged.h"
20 #include "DataEmpty.h"
21 #include "DataLazy.h"
22 #include "FunctionSpaceFactory.h"
23 #include "AbstractContinuousDomain.h"
24 #include "UnaryFuncs.h"
25 #include "FunctionSpaceException.h"
26 #include "EscriptParams.h"
27
28 extern "C" {
29 #include "esysUtils/blocktimer.h"
30 }
31
32 #include <fstream>
33 #include <algorithm>
34 #include <vector>
35 #include <functional>
36 #include <sstream> // so we can throw messages about ranks
37
38 #include <boost/python/dict.hpp>
39 #include <boost/python/extract.hpp>
40 #include <boost/python/long.hpp>
41
42 using namespace std;
43 using namespace boost::python;
44 using namespace boost;
45 using namespace escript;
46
47 // ensure the current object is not a DataLazy
48 // The idea was that we could add an optional warning whenever a resolve is forced
49 #define FORCERESOLVE if (isLazy()) {resolve();}
50 #define AUTOLAZYON escriptParams.getAUTOLAZY()
51 #define MAKELAZYOP(X) if (isLazy() || (AUTOLAZYON && m_data->isExpanded())) \
52 {\
53 DataLazy* c=new DataLazy(borrowDataPtr(),X);\
54 return Data(c);\
55 }
56 #define MAKELAZYOPOFF(X,Y) if (isLazy() || (AUTOLAZYON && m_data->isExpanded())) \
57 {\
58 DataLazy* c=new DataLazy(borrowDataPtr(),X,Y);\
59 return Data(c);\
60 }
61
62 #define MAKELAZYBINSELF(R,X) if (isLazy() || R.isLazy() || (AUTOLAZYON && (isExpanded() || R.isExpanded()))) \
63 {\
64 DataLazy* c=new DataLazy(m_data,R.borrowDataPtr(),X);\
65 m_data=c->getPtr();\
66 return (*this);\
67 }
68
69 // like the above but returns a new data rather than *this
70 #define MAKELAZYBIN(R,X) if (isLazy() || R.isLazy() || (AUTOLAZYON && (isExpanded() || R.isExpanded()))) \
71 {\
72 DataLazy* c=new DataLazy(m_data,R.borrowDataPtr(),X);\
73 return Data(c);\
74 }
75
76 #define MAKELAZYBIN2(L,R,X) if (L.isLazy() || R.isLazy() || (AUTOLAZYON && (L.isExpanded() || R.isExpanded()))) \
77 {\
78 DataLazy* c=new DataLazy(L.borrowDataPtr(),R.borrowDataPtr(),X);\
79 return Data(c);\
80 }
81
82 Data::Data()
83 {
84 //
85 // Default data is type DataEmpty
86 DataAbstract* temp=new DataEmpty();
87 m_data=temp->getPtr();
88 m_protected=false;
89 }
90
91 Data::Data(double value,
92 const tuple& shape,
93 const FunctionSpace& what,
94 bool expanded)
95 {
96 DataTypes::ShapeType dataPointShape;
97 for (int i = 0; i < shape.attr("__len__")(); ++i) {
98 dataPointShape.push_back(extract<const int>(shape[i]));
99 }
100
101 int len = DataTypes::noValues(dataPointShape);
102 DataVector temp_data(len,value,len);
103 initialise(temp_data, dataPointShape, what, expanded);
104 m_protected=false;
105 }
106
107 Data::Data(double value,
108 const DataTypes::ShapeType& dataPointShape,
109 const FunctionSpace& what,
110 bool expanded)
111 {
112 int len = DataTypes::noValues(dataPointShape);
113
114 DataVector temp_data(len,value,len);
115 // DataArrayView temp_dataView(temp_data, dataPointShape);
116
117 // initialise(temp_dataView, what, expanded);
118 initialise(temp_data, dataPointShape, what, expanded);
119
120 m_protected=false;
121 }
122
123 Data::Data(const Data& inData)
124 {
125 m_data=inData.m_data;
126 m_protected=inData.isProtected();
127 }
128
129
130 Data::Data(const Data& inData,
131 const DataTypes::RegionType& region)
132 {
133 DataAbstract_ptr dat=inData.m_data;
134 if (inData.isLazy())
135 {
136 dat=inData.m_data->resolve();
137 }
138 else
139 {
140 dat=inData.m_data;
141 }
142 //
143 // Create Data which is a slice of another Data
144 DataAbstract* tmp = dat->getSlice(region);
145 m_data=DataAbstract_ptr(tmp);
146 m_protected=false;
147 }
148
149 Data::Data(const Data& inData,
150 const FunctionSpace& functionspace)
151 {
152 if (inData.isEmpty())
153 {
154 throw DataException("Error - will not interpolate for instances of DataEmpty.");
155 }
156 if (inData.getFunctionSpace()==functionspace) {
157 m_data=inData.m_data;
158 }
159 else
160 {
161
162 if (inData.isConstant()) { // for a constant function, we just need to use the new function space
163 if (!inData.probeInterpolation(functionspace))
164 { // Even though this is constant, we still need to check whether interpolation is allowed
165 throw FunctionSpaceException("Cannot interpolate across to the domain of the specified FunctionSpace. (DataConstant)");
166 }
167 // if the data is not lazy, this will just be a cast to DataReady
168 DataReady_ptr dr=inData.m_data->resolve();
169 DataConstant* dc=new DataConstant(functionspace,inData.m_data->getShape(),dr->getVector());
170 m_data=DataAbstract_ptr(dc);
171 } else {
172 Data tmp(0,inData.getDataPointShape(),functionspace,true);
173 // Note: Must use a reference or pointer to a derived object
174 // in order to get polymorphic behaviour. Shouldn't really
175 // be able to create an instance of AbstractDomain but that was done
176 // as a boost:python work around which may no longer be required.
177 /*const AbstractDomain& inDataDomain=inData.getDomain();*/
178 const_Domain_ptr inDataDomain=inData.getDomain();
179 if (inDataDomain==functionspace.getDomain()) {
180 inDataDomain->interpolateOnDomain(tmp,inData);
181 } else {
182 inDataDomain->interpolateACross(tmp,inData);
183 }
184 m_data=tmp.m_data;
185 }
186 }
187 m_protected=false;
188 }
189
190 Data::Data(DataAbstract* underlyingdata)
191 {
192 // m_data=shared_ptr<DataAbstract>(underlyingdata);
193 m_data=underlyingdata->getPtr();
194 m_protected=false;
195 }
196
197 Data::Data(DataAbstract_ptr underlyingdata)
198 {
199 m_data=underlyingdata;
200 m_protected=false;
201 }
202
203
204 Data::Data(const numeric::array& value,
205 const FunctionSpace& what,
206 bool expanded)
207 {
208 initialise(value,what,expanded);
209 m_protected=false;
210 }
211
212 Data::Data(const DataTypes::ValueType& value,
213 const DataTypes::ShapeType& shape,
214 const FunctionSpace& what,
215 bool expanded)
216 {
217 initialise(value,shape,what,expanded);
218 m_protected=false;
219 }
220
221
222 Data::Data(const object& value,
223 const FunctionSpace& what,
224 bool expanded)
225 {
226 numeric::array asNumArray(value);
227 initialise(asNumArray,what,expanded);
228 m_protected=false;
229 }
230
231
232 Data::Data(const object& value,
233 const Data& other)
234 {
235 numeric::array asNumArray(value);
236
237 // extract the shape of the numarray
238 DataTypes::ShapeType tempShape=DataTypes::shapeFromNumArray(asNumArray);
239 if (DataTypes::getRank(tempShape)==0) {
240
241
242 // get the space for the data vector
243 int len1 = DataTypes::noValues(tempShape);
244 DataVector temp_data(len1, 0.0, len1);
245 temp_data.copyFromNumArray(asNumArray,1);
246
247 int len = DataTypes::noValues(other.getDataPointShape());
248
249 DataVector temp2_data(len, temp_data[0]/*temp_dataView()*/, len);
250 DataConstant* t=new DataConstant(other.getFunctionSpace(),other.getDataPointShape(),temp2_data);
251 m_data=DataAbstract_ptr(t);
252
253 } else {
254 //
255 // Create a DataConstant with the same sample shape as other
256 DataConstant* t=new DataConstant(asNumArray,other.getFunctionSpace());
257 // boost::shared_ptr<DataAbstract> sp(t);
258 // m_data=sp;
259 m_data=DataAbstract_ptr(t);
260 }
261 m_protected=false;
262 }
263
264 Data::~Data()
265 {
266
267 }
268
269
270
271 void
272 Data::initialise(const boost::python::numeric::array& value,
273 const FunctionSpace& what,
274 bool expanded)
275 {
276 //
277 // Construct a Data object of the appropriate type.
278 // Construct the object first as there seems to be a bug which causes
279 // undefined behaviour if an exception is thrown during construction
280 // within the shared_ptr constructor.
281 if (expanded) {
282 DataAbstract* temp=new DataExpanded(value, what);
283 // boost::shared_ptr<DataAbstract> temp_data(temp);
284 // m_data=temp_data;
285 m_data=temp->getPtr();
286 } else {
287 DataAbstract* temp=new DataConstant(value, what);
288 // boost::shared_ptr<DataAbstract> temp_data(temp);
289 // m_data=temp_data;
290 m_data=temp->getPtr();
291 }
292 }
293
294
295 void
296 Data::initialise(const DataTypes::ValueType& value,
297 const DataTypes::ShapeType& shape,
298 const FunctionSpace& what,
299 bool expanded)
300 {
301 //
302 // Construct a Data object of the appropriate type.
303 // Construct the object first as there seems to be a bug which causes
304 // undefined behaviour if an exception is thrown during construction
305 // within the shared_ptr constructor.
306 if (expanded) {
307 DataAbstract* temp=new DataExpanded(what, shape, value);
308 // boost::shared_ptr<DataAbstract> temp_data(temp);
309 // m_data=temp_data;
310 m_data=temp->getPtr();
311 } else {
312 DataAbstract* temp=new DataConstant(what, shape, value);
313 // boost::shared_ptr<DataAbstract> temp_data(temp);
314 // m_data=temp_data;
315 m_data=temp->getPtr();
316 }
317 }
318
319
320 // void
321 // Data::CompareDebug(const Data& rd)
322 // {
323 // using namespace std;
324 // bool mismatch=false;
325 // std::cout << "Comparing left and right" << endl;
326 // const DataTagged* left=dynamic_cast<DataTagged*>(m_data.get());
327 // const DataTagged* right=dynamic_cast<DataTagged*>(rd.m_data.get());
328 //
329 // if (left==0)
330 // {
331 // cout << "left arg is not a DataTagged\n";
332 // return;
333 // }
334 //
335 // if (right==0)
336 // {
337 // cout << "right arg is not a DataTagged\n";
338 // return;
339 // }
340 // cout << "Num elements=" << left->getVector().size() << ":" << right->getVector().size() << std::endl;
341 // cout << "Shapes ";
342 // if (left->getShape()==right->getShape())
343 // {
344 // cout << "ok\n";
345 // }
346 // else
347 // {
348 // cout << "Problem: shapes do not match\n";
349 // mismatch=true;
350 // }
351 // int lim=left->getVector().size();
352 // if (right->getVector().size()) lim=right->getVector().size();
353 // for (int i=0;i<lim;++i)
354 // {
355 // if (left->getVector()[i]!=right->getVector()[i])
356 // {
357 // cout << "[" << i << "] value mismatch " << left->getVector()[i] << ":" << right->getVector()[i] << endl;
358 // mismatch=true;
359 // }
360 // }
361 //
362 // // still need to check the tag map
363 // // also need to watch what is happening to function spaces, are they copied or what?
364 //
365 // const DataTagged::DataMapType& mapleft=left->getTagLookup();
366 // const DataTagged::DataMapType& mapright=right->getTagLookup();
367 //
368 // if (mapleft.size()!=mapright.size())
369 // {
370 // cout << "Maps are different sizes " << mapleft.size() << ":" << mapright.size() << endl;
371 // mismatch=true;
372 // cout << "Left map\n";
373 // DataTagged::DataMapType::const_iterator i,j;
374 // for (i=mapleft.begin();i!=mapleft.end();++i) {
375 // cout << "(" << i->first << "=>" << i->second << ")\n";
376 // }
377 // cout << "Right map\n";
378 // for (i=mapright.begin();i!=mapright.end();++i) {
379 // cout << "(" << i->first << "=>" << i->second << ")\n";
380 // }
381 // cout << "End map\n";
382 //
383 // }
384 //
385 // DataTagged::DataMapType::const_iterator i,j;
386 // for (i=mapleft.begin(),j=mapright.begin();i!=mapleft.end() && j!=mapright.end();++i,++j) {
387 // if ((i->first!=j->first) || (i->second!=j->second))
388 // {
389 // cout << "(" << i->first << "=>" << i->second << ")";
390 // cout << ":(" << j->first << "=>" << j->second << ") ";
391 // mismatch=true;
392 // }
393 // }
394 // if (mismatch)
395 // {
396 // cout << "#Mismatch\n";
397 // }
398 // }
399
400 escriptDataC
401 Data::getDataC()
402 {
403 escriptDataC temp;
404 temp.m_dataPtr=(void*)this;
405 return temp;
406 }
407
408 escriptDataC
409 Data::getDataC() const
410 {
411 escriptDataC temp;
412 temp.m_dataPtr=(void*)this;
413 return temp;
414 }
415
416 const boost::python::tuple
417 Data::getShapeTuple() const
418 {
419 const DataTypes::ShapeType& shape=getDataPointShape();
420 switch(getDataPointRank()) {
421 case 0:
422 return make_tuple();
423 case 1:
424 return make_tuple(long_(shape[0]));
425 case 2:
426 return make_tuple(long_(shape[0]),long_(shape[1]));
427 case 3:
428 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]));
429 case 4:
430 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]),long_(shape[3]));
431 default:
432 throw DataException("Error - illegal Data rank.");
433 }
434 }
435
436
437 // The different name is needed because boost has trouble with overloaded functions.
438 // It can't work out what type the function is based soley on its name.
439 // There are ways to fix this involving creating function pointer variables for each form
440 // but there doesn't seem to be a need given that the methods have the same name from the python point of view
441 Data
442 Data::copySelf()
443 {
444 DataAbstract* temp=m_data->deepCopy();
445 return Data(temp);
446 }
447
448 void
449 Data::copy(const Data& other)
450 {
451 DataAbstract* temp=other.m_data->deepCopy();
452 DataAbstract_ptr p=temp->getPtr();
453 m_data=p;
454 }
455
456
457 Data
458 Data::delay()
459 {
460 DataLazy* dl=new DataLazy(m_data);
461 return Data(dl);
462 }
463
464 void
465 Data::delaySelf()
466 {
467 if (!isLazy())
468 {
469 m_data=(new DataLazy(m_data))->getPtr();
470 }
471 }
472
473 void
474 Data::setToZero()
475 {
476 if (isEmpty())
477 {
478 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
479 }
480 m_data->setToZero();
481 }
482
483 void
484 Data::copyWithMask(const Data& other,
485 const Data& mask)
486 {
487 // 1. Interpolate if required so all Datas use the same FS as this
488 // 2. Tag or Expand so that all Data's are the same type
489 // 3. Iterate over the data vectors copying values where mask is >0
490 if (other.isEmpty() || mask.isEmpty())
491 {
492 throw DataException("Error - copyWithMask not permitted using instances of DataEmpty.");
493 }
494 Data other2(other);
495 Data mask2(mask);
496 other2.resolve();
497 mask2.resolve();
498 this->resolve();
499 FunctionSpace myFS=getFunctionSpace();
500 FunctionSpace oFS=other2.getFunctionSpace();
501 FunctionSpace mFS=mask2.getFunctionSpace();
502 if (oFS!=myFS)
503 {
504 if (other2.probeInterpolation(myFS))
505 {
506 other2=other2.interpolate(myFS);
507 }
508 else
509 {
510 throw DataException("Error - copyWithMask: other FunctionSpace is not compatible with this one.");
511 }
512 }
513 if (mFS!=myFS)
514 {
515 if (mask2.probeInterpolation(myFS))
516 {
517 mask2=mask2.interpolate(myFS);
518 }
519 else
520 {
521 throw DataException("Error - copyWithMask: mask FunctionSpace is not compatible with this one.");
522 }
523 }
524 // Ensure that all args have the same type
525 if (this->isExpanded() || mask2.isExpanded() || other2.isExpanded())
526 {
527 this->expand();
528 other2.expand();
529 mask2.expand();
530 }
531 else if (this->isTagged() || mask2.isTagged() || other2.isTagged())
532 {
533 this->tag();
534 other2.tag();
535 mask2.tag();
536 }
537 else if (this->isConstant() && mask2.isConstant() && other2.isConstant())
538 {
539 }
540 else
541 {
542 throw DataException("Error - Unknown DataAbstract passed to copyWithMask.");
543 }
544 unsigned int selfrank=getDataPointRank();
545 unsigned int otherrank=other2.getDataPointRank();
546 unsigned int maskrank=mask2.getDataPointRank();
547 if ((selfrank==0) && (otherrank>0 || maskrank>0))
548 {
549 // to get here we must be copying from a large object into a scalar
550 // I am not allowing this.
551 // If you are calling copyWithMask then you are considering keeping some existing values
552 // and so I'm going to assume that you don't want your data objects getting a new shape.
553 throw DataException("Attempt to copyWithMask into a scalar from an object or mask with rank>0.");
554 }
555 // Now we iterate over the elements
556 DataVector& self=getReadyPtr()->getVector();
557 const DataVector& ovec=other2.getReadyPtr()->getVector();
558 const DataVector& mvec=mask2.getReadyPtr()->getVector();
559
560 if ((selfrank>0) && (otherrank==0) &&(maskrank==0))
561 {
562 // Not allowing this combination.
563 // it is not clear what the rank of the target should be.
564 // Should it be filled with the scalar (rank stays the same);
565 // or should the target object be reshaped to be a scalar as well.
566 throw DataException("Attempt to copyWithMask from scalar mask and data into non-scalar target.");
567 }
568 if ((selfrank>0) && (otherrank>0) &&(maskrank==0))
569 {
570 if (mvec[0]>0) // copy whole object if scalar is >0
571 {
572 copy(other);
573 }
574 return;
575 }
576 if (isTagged()) // so all objects involved will also be tagged
577 {
578 // note the !
579 if (!((getDataPointShape()==mask2.getDataPointShape()) &&
580 ((other2.getDataPointShape()==mask2.getDataPointShape()) || (otherrank==0))))
581 {
582 throw DataException("copyWithMask, shape mismatch.");
583 }
584
585 // We need to consider the possibility that tags are missing or in the wrong order
586 // My guiding assumption here is: All tagged Datas are assumed to have the default value for
587 // all tags which are not explicitly defined
588
589 const DataTagged* mptr=dynamic_cast<const DataTagged*>(mask2.m_data.get());
590 const DataTagged* optr=dynamic_cast<const DataTagged*>(other2.m_data.get());
591 DataTagged* tptr=dynamic_cast<DataTagged*>(m_data.get());
592
593 // first, add any tags missing from other or mask
594 const DataTagged::DataMapType& olookup=optr->getTagLookup();
595 const DataTagged::DataMapType& mlookup=mptr->getTagLookup();
596 const DataTagged::DataMapType& tlookup=tptr->getTagLookup();
597 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
598 for (i=olookup.begin();i!=olookup.end();i++)
599 {
600 tptr->addTag(i->first);
601 }
602 for (i=mlookup.begin();i!=mlookup.end();i++) {
603 tptr->addTag(i->first);
604 }
605 // now we know that *this has all the required tags but they aren't guaranteed to be in
606 // the same order
607
608 // There are two possibilities: 1. all objects have the same rank. 2. other is a scalar
609 if ((selfrank==otherrank) && (otherrank==maskrank))
610 {
611 for (i=tlookup.begin();i!=tlookup.end();i++)
612 {
613 // get the target offset
614 DataTypes::ValueType::size_type toff=tptr->getOffsetForTag(i->first);
615 DataTypes::ValueType::size_type moff=mptr->getOffsetForTag(i->first);
616 DataTypes::ValueType::size_type ooff=optr->getOffsetForTag(i->first);
617 for (int j=0;j<getDataPointSize();++j)
618 {
619 if (mvec[j+moff]>0)
620 {
621 self[j+toff]=ovec[j+ooff];
622 }
623 }
624 }
625 // now for the default value
626 for (int j=0;j<getDataPointSize();++j)
627 {
628 if (mvec[j+mptr->getDefaultOffset()]>0)
629 {
630 self[j+tptr->getDefaultOffset()]=ovec[j+optr->getDefaultOffset()];
631 }
632 }
633 }
634 else // other is a scalar
635 {
636 for (i=tlookup.begin();i!=tlookup.end();i++)
637 {
638 // get the target offset
639 DataTypes::ValueType::size_type toff=tptr->getOffsetForTag(i->first);
640 DataTypes::ValueType::size_type moff=mptr->getOffsetForTag(i->first);
641 DataTypes::ValueType::size_type ooff=optr->getOffsetForTag(i->first);
642 for (int j=0;j<getDataPointSize();++j)
643 {
644 if (mvec[j+moff]>0)
645 {
646 self[j+toff]=ovec[ooff];
647 }
648 }
649 }
650 // now for the default value
651 for (int j=0;j<getDataPointSize();++j)
652 {
653 if (mvec[j+mptr->getDefaultOffset()]>0)
654 {
655 self[j+tptr->getDefaultOffset()]=ovec[0];
656 }
657 }
658 }
659
660 return; // ugly
661 }
662 // mixed scalar and non-scalar operation
663 if ((selfrank>0) && (otherrank==0) && (mask2.getDataPointShape()==getDataPointShape()))
664 {
665 size_t num_points=self.size();
666 // OPENMP 3.0 allows unsigned loop vars.
667 #if defined(_OPENMP) && (_OPENMP < 200805)
668 long i;
669 #else
670 size_t i;
671 #endif
672 size_t psize=getDataPointSize();
673 #pragma omp parallel for private(i) schedule(static)
674 for (i=0;i<num_points;++i)
675 {
676 if (mvec[i]>0)
677 {
678 self[i]=ovec[i/psize]; // since this is expanded there is one scalar
679 } // dest point
680 }
681 return; // ugly!
682 }
683 // tagged data is already taken care of so we only need to worry about shapes
684 // special cases with scalars are already dealt with so all we need to worry about is shape
685 if ((getDataPointShape()!=other2.getDataPointShape()) || getDataPointShape()!=mask2.getDataPointShape())
686 {
687 ostringstream oss;
688 oss <<"Error - size mismatch in arguments to copyWithMask.";
689 oss << "\nself_shape=" << DataTypes::shapeToString(getDataPointShape());
690 oss << " other2_shape=" << DataTypes::shapeToString(other2.getDataPointShape());
691 oss << " mask2_shape=" << DataTypes::shapeToString(mask2.getDataPointShape());
692 throw DataException(oss.str());
693 }
694 size_t num_points=self.size();
695
696 // OPENMP 3.0 allows unsigned loop vars.
697 #if defined(_OPENMP) && (_OPENMP < 200805)
698 long i;
699 #else
700 size_t i;
701 #endif
702 #pragma omp parallel for private(i) schedule(static)
703 for (i=0;i<num_points;++i)
704 {
705 if (mvec[i]>0)
706 {
707 self[i]=ovec[i];
708 }
709 }
710 }
711
712
713
714 bool
715 Data::isExpanded() const
716 {
717 DataExpanded* temp=dynamic_cast<DataExpanded*>(m_data.get());
718 return (temp!=0);
719 }
720
721 bool
722 Data::isTagged() const
723 {
724 DataTagged* temp=dynamic_cast<DataTagged*>(m_data.get());
725 return (temp!=0);
726 }
727
728 bool
729 Data::isEmpty() const
730 {
731 DataEmpty* temp=dynamic_cast<DataEmpty*>(m_data.get());
732 return (temp!=0);
733 }
734
735 bool
736 Data::isConstant() const
737 {
738 DataConstant* temp=dynamic_cast<DataConstant*>(m_data.get());
739 return (temp!=0);
740 }
741
742 bool
743 Data::isLazy() const
744 {
745 return m_data->isLazy();
746 }
747
748 // at the moment this is synonymous with !isLazy() but that could change
749 bool
750 Data::isReady() const
751 {
752 return (dynamic_cast<DataReady*>(m_data.get())!=0);
753 }
754
755
756 void
757 Data::setProtection()
758 {
759 m_protected=true;
760 }
761
762 bool
763 Data::isProtected() const
764 {
765 return m_protected;
766 }
767
768
769
770 void
771 Data::expand()
772 {
773 if (isConstant()) {
774 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
775 DataAbstract* temp=new DataExpanded(*tempDataConst);
776 // shared_ptr<DataAbstract> temp_data(temp);
777 // m_data=temp_data;
778 m_data=temp->getPtr();
779 } else if (isTagged()) {
780 DataTagged* tempDataTag=dynamic_cast<DataTagged*>(m_data.get());
781 DataAbstract* temp=new DataExpanded(*tempDataTag);
782 // shared_ptr<DataAbstract> temp_data(temp);
783 // m_data=temp_data;
784 m_data=temp->getPtr();
785 } else if (isExpanded()) {
786 //
787 // do nothing
788 } else if (isEmpty()) {
789 throw DataException("Error - Expansion of DataEmpty not possible.");
790 } else if (isLazy()) {
791 resolve();
792 expand(); // resolve might not give us expanded data
793 } else {
794 throw DataException("Error - Expansion not implemented for this Data type.");
795 }
796 }
797
798 void
799 Data::tag()
800 {
801 if (isConstant()) {
802 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
803 DataAbstract* temp=new DataTagged(*tempDataConst);
804 // shared_ptr<DataAbstract> temp_data(temp);
805 // m_data=temp_data;
806 m_data=temp->getPtr();
807 } else if (isTagged()) {
808 // do nothing
809 } else if (isExpanded()) {
810 throw DataException("Error - Creating tag data from DataExpanded not possible.");
811 } else if (isEmpty()) {
812 throw DataException("Error - Creating tag data from DataEmpty not possible.");
813 } else if (isLazy()) {
814 DataAbstract_ptr res=m_data->resolve();
815 if (m_data->isExpanded())
816 {
817 throw DataException("Error - data would resolve to DataExpanded, tagging is not possible.");
818 }
819 m_data=res;
820 tag();
821 } else {
822 throw DataException("Error - Tagging not implemented for this Data type.");
823 }
824 }
825
826 void
827 Data::resolve()
828 {
829 if (isLazy())
830 {
831 m_data=m_data->resolve();
832 }
833 }
834
835
836 Data
837 Data::oneOver() const
838 {
839 MAKELAZYOP(RECIP)
840 return C_TensorUnaryOperation(*this, bind1st(divides<double>(),1.));
841 }
842
843 Data
844 Data::wherePositive() const
845 {
846 MAKELAZYOP(GZ)
847 return C_TensorUnaryOperation(*this, bind2nd(greater<double>(),0.0));
848 }
849
850 Data
851 Data::whereNegative() const
852 {
853 MAKELAZYOP(LZ)
854 return C_TensorUnaryOperation(*this, bind2nd(less<double>(),0.0));
855 }
856
857 Data
858 Data::whereNonNegative() const
859 {
860 MAKELAZYOP(GEZ)
861 return C_TensorUnaryOperation(*this, bind2nd(greater_equal<double>(),0.0));
862 }
863
864 Data
865 Data::whereNonPositive() const
866 {
867 MAKELAZYOP(LEZ)
868 return C_TensorUnaryOperation(*this, bind2nd(less_equal<double>(),0.0));
869 }
870
871 Data
872 Data::whereZero(double tol) const
873 {
874 // Data dataAbs=abs();
875 // return C_TensorUnaryOperation(dataAbs, bind2nd(less_equal<double>(),tol));
876 MAKELAZYOPOFF(EZ,tol)
877 return C_TensorUnaryOperation(*this, bind2nd(AbsLTE(),tol));
878
879 }
880
881 Data
882 Data::whereNonZero(double tol) const
883 {
884 // Data dataAbs=abs();
885 // return C_TensorUnaryOperation(dataAbs, bind2nd(greater<double>(),tol));
886 MAKELAZYOPOFF(NEZ,tol)
887 return C_TensorUnaryOperation(*this, bind2nd(AbsGT(),tol));
888
889 }
890
891 Data
892 Data::interpolate(const FunctionSpace& functionspace) const
893 {
894 return Data(*this,functionspace);
895 }
896
897 bool
898 Data::probeInterpolation(const FunctionSpace& functionspace) const
899 {
900 return getFunctionSpace().probeInterpolation(functionspace);
901 }
902
903 Data
904 Data::gradOn(const FunctionSpace& functionspace) const
905 {
906 if (isEmpty())
907 {
908 throw DataException("Error - operation not permitted on instances of DataEmpty.");
909 }
910 double blocktimer_start = blocktimer_time();
911 if (functionspace.getDomain()!=getDomain())
912 throw DataException("Error - gradient cannot be calculated on different domains.");
913 DataTypes::ShapeType grad_shape=getDataPointShape();
914 grad_shape.push_back(functionspace.getDim());
915 Data out(0.0,grad_shape,functionspace,true);
916 getDomain()->setToGradient(out,*this);
917 blocktimer_increment("grad()", blocktimer_start);
918 return out;
919 }
920
921 Data
922 Data::grad() const
923 {
924 if (isEmpty())
925 {
926 throw DataException("Error - operation not permitted on instances of DataEmpty.");
927 }
928 return gradOn(escript::function(*getDomain()));
929 }
930
931 int
932 Data::getDataPointSize() const
933 {
934 return m_data->getNoValues();
935 }
936
937 DataTypes::ValueType::size_type
938 Data::getLength() const
939 {
940 return m_data->getLength();
941 }
942
943 const
944 boost::python::numeric::array
945 Data:: getValueOfDataPoint(int dataPointNo)
946 {
947 int i, j, k, l;
948
949 FORCERESOLVE;
950
951 //
952 // determine the rank and shape of each data point
953 int dataPointRank = getDataPointRank();
954 const DataTypes::ShapeType& dataPointShape = getDataPointShape();
955
956 //
957 // create the numeric array to be returned
958 boost::python::numeric::array numArray(0.0);
959
960 //
961 // the shape of the returned numeric array will be the same
962 // as that of the data point
963 int arrayRank = dataPointRank;
964 const DataTypes::ShapeType& arrayShape = dataPointShape;
965
966 //
967 // resize the numeric array to the shape just calculated
968 if (arrayRank==0) {
969 numArray.resize(1);
970 }
971 if (arrayRank==1) {
972 numArray.resize(arrayShape[0]);
973 }
974 if (arrayRank==2) {
975 numArray.resize(arrayShape[0],arrayShape[1]);
976 }
977 if (arrayRank==3) {
978 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2]);
979 }
980 if (arrayRank==4) {
981 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2],arrayShape[3]);
982 }
983
984 if (getNumDataPointsPerSample()>0) {
985 int sampleNo = dataPointNo/getNumDataPointsPerSample();
986 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
987 //
988 // Check a valid sample number has been supplied
989 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
990 throw DataException("Error - Data::convertToNumArray: invalid sampleNo.");
991 }
992
993 //
994 // Check a valid data point number has been supplied
995 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
996 throw DataException("Error - Data::convertToNumArray: invalid dataPointNoInSample.");
997 }
998 // TODO: global error handling
999 // create a view of the data if it is stored locally
1000 // DataArrayView dataPointView = getDataPoint(sampleNo, dataPointNoInSample);
1001 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
1002
1003
1004 switch( dataPointRank ){
1005 case 0 :
1006 numArray[0] = getDataAtOffset(offset);
1007 break;
1008 case 1 :
1009 for( i=0; i<dataPointShape[0]; i++ )
1010 numArray[i]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i));
1011 break;
1012 case 2 :
1013 for( i=0; i<dataPointShape[0]; i++ )
1014 for( j=0; j<dataPointShape[1]; j++)
1015 numArray[make_tuple(i,j)]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j));
1016 break;
1017 case 3 :
1018 for( i=0; i<dataPointShape[0]; i++ )
1019 for( j=0; j<dataPointShape[1]; j++ )
1020 for( k=0; k<dataPointShape[2]; k++)
1021 numArray[make_tuple(i,j,k)]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k));
1022 break;
1023 case 4 :
1024 for( i=0; i<dataPointShape[0]; i++ )
1025 for( j=0; j<dataPointShape[1]; j++ )
1026 for( k=0; k<dataPointShape[2]; k++ )
1027 for( l=0; l<dataPointShape[3]; l++)
1028 numArray[make_tuple(i,j,k,l)]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k,l));
1029 break;
1030 }
1031 }
1032 //
1033 // return the array
1034 return numArray;
1035 }
1036
1037 void
1038 Data::setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object)
1039 {
1040 // this will throw if the value cannot be represented
1041 boost::python::numeric::array num_array(py_object);
1042 setValueOfDataPointToArray(dataPointNo,num_array);
1043 }
1044
1045 void
1046 Data::setValueOfDataPointToArray(int dataPointNo, const boost::python::numeric::array& num_array)
1047 {
1048 if (isProtected()) {
1049 throw DataException("Error - attempt to update protected Data object.");
1050 }
1051 FORCERESOLVE;
1052 //
1053 // check rank
1054 if (static_cast<unsigned int>(num_array.getrank())<getDataPointRank())
1055 throw DataException("Rank of numarray does not match Data object rank");
1056
1057 //
1058 // check shape of num_array
1059 for (unsigned int i=0; i<getDataPointRank(); i++) {
1060 if (extract<int>(num_array.getshape()[i])!=getDataPointShape()[i])
1061 throw DataException("Shape of numarray does not match Data object rank");
1062 }
1063 //
1064 // make sure data is expanded:
1065 //
1066 if (!isExpanded()) {
1067 expand();
1068 }
1069 if (getNumDataPointsPerSample()>0) {
1070 int sampleNo = dataPointNo/getNumDataPointsPerSample();
1071 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1072 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,num_array);
1073 } else {
1074 m_data->copyToDataPoint(-1, 0,num_array);
1075 }
1076 }
1077
1078 void
1079 Data::setValueOfDataPoint(int dataPointNo, const double value)
1080 {
1081 if (isProtected()) {
1082 throw DataException("Error - attempt to update protected Data object.");
1083 }
1084 //
1085 // make sure data is expanded:
1086 FORCERESOLVE;
1087 if (!isExpanded()) {
1088 expand();
1089 }
1090 if (getNumDataPointsPerSample()>0) {
1091 int sampleNo = dataPointNo/getNumDataPointsPerSample();
1092 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1093 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,value);
1094 } else {
1095 m_data->copyToDataPoint(-1, 0,value);
1096 }
1097 }
1098
1099 const
1100 boost::python::numeric::array
1101 Data::getValueOfGlobalDataPoint(int procNo, int dataPointNo)
1102 {
1103 size_t length=0;
1104 int i, j, k, l, pos;
1105 FORCERESOLVE;
1106 //
1107 // determine the rank and shape of each data point
1108 int dataPointRank = getDataPointRank();
1109 const DataTypes::ShapeType& dataPointShape = getDataPointShape();
1110
1111 //
1112 // create the numeric array to be returned
1113 boost::python::numeric::array numArray(0.0);
1114
1115 //
1116 // the shape of the returned numeric array will be the same
1117 // as that of the data point
1118 int arrayRank = dataPointRank;
1119 const DataTypes::ShapeType& arrayShape = dataPointShape;
1120
1121 //
1122 // resize the numeric array to the shape just calculated
1123 if (arrayRank==0) {
1124 numArray.resize(1);
1125 }
1126 if (arrayRank==1) {
1127 numArray.resize(arrayShape[0]);
1128 }
1129 if (arrayRank==2) {
1130 numArray.resize(arrayShape[0],arrayShape[1]);
1131 }
1132 if (arrayRank==3) {
1133 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2]);
1134 }
1135 if (arrayRank==4) {
1136 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2],arrayShape[3]);
1137 }
1138
1139 // added for the MPI communication
1140 length=1;
1141 for( i=0; i<arrayRank; i++ ) length *= arrayShape[i];
1142 double *tmpData = new double[length];
1143
1144 //
1145 // load the values for the data point into the numeric array.
1146
1147 // updated for the MPI case
1148 if( get_MPIRank()==procNo ){
1149 if (getNumDataPointsPerSample()>0) {
1150 int sampleNo = dataPointNo/getNumDataPointsPerSample();
1151 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1152 //
1153 // Check a valid sample number has been supplied
1154 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
1155 throw DataException("Error - Data::convertToNumArray: invalid sampleNo.");
1156 }
1157
1158 //
1159 // Check a valid data point number has been supplied
1160 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
1161 throw DataException("Error - Data::convertToNumArray: invalid dataPointNoInSample.");
1162 }
1163 // TODO: global error handling
1164 // create a view of the data if it is stored locally
1165 //DataArrayView dataPointView = getDataPoint(sampleNo, dataPointNoInSample);
1166 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
1167
1168 // pack the data from the view into tmpData for MPI communication
1169 pos=0;
1170 switch( dataPointRank ){
1171 case 0 :
1172 tmpData[0] = getDataAtOffset(offset);
1173 break;
1174 case 1 :
1175 for( i=0; i<dataPointShape[0]; i++ )
1176 tmpData[i]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i));
1177 break;
1178 case 2 :
1179 for( i=0; i<dataPointShape[0]; i++ )
1180 for( j=0; j<dataPointShape[1]; j++, pos++ )
1181 tmpData[pos]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j));
1182 break;
1183 case 3 :
1184 for( i=0; i<dataPointShape[0]; i++ )
1185 for( j=0; j<dataPointShape[1]; j++ )
1186 for( k=0; k<dataPointShape[2]; k++, pos++ )
1187 tmpData[pos]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k));
1188 break;
1189 case 4 :
1190 for( i=0; i<dataPointShape[0]; i++ )
1191 for( j=0; j<dataPointShape[1]; j++ )
1192 for( k=0; k<dataPointShape[2]; k++ )
1193 for( l=0; l<dataPointShape[3]; l++, pos++ )
1194 tmpData[pos]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k,l));
1195 break;
1196 }
1197 }
1198 }
1199 #ifdef PASO_MPI
1200 // broadcast the data to all other processes
1201 MPI_Bcast( tmpData, length, MPI_DOUBLE, procNo, get_MPIComm() );
1202 #endif
1203
1204 // unpack the data
1205 switch( dataPointRank ){
1206 case 0 :
1207 numArray[0]=tmpData[0];
1208 break;
1209 case 1 :
1210 for( i=0; i<dataPointShape[0]; i++ )
1211 numArray[i]=tmpData[i];
1212 break;
1213 case 2 :
1214 for( i=0; i<dataPointShape[0]; i++ )
1215 for( j=0; j<dataPointShape[1]; j++ )
1216 numArray[make_tuple(i,j)]=tmpData[i+j*dataPointShape[0]];
1217 break;
1218 case 3 :
1219 for( i=0; i<dataPointShape[0]; i++ )
1220 for( j=0; j<dataPointShape[1]; j++ )
1221 for( k=0; k<dataPointShape[2]; k++ )
1222 numArray[make_tuple(i,j,k)]=tmpData[i+dataPointShape[0]*(j*+k*dataPointShape[1])];
1223 break;
1224 case 4 :
1225 for( i=0; i<dataPointShape[0]; i++ )
1226 for( j=0; j<dataPointShape[1]; j++ )
1227 for( k=0; k<dataPointShape[2]; k++ )
1228 for( l=0; l<dataPointShape[3]; l++ )
1229 numArray[make_tuple(i,j,k,l)]=tmpData[i+dataPointShape[0]*(j*+dataPointShape[1]*(k+l*dataPointShape[2]))];
1230 break;
1231 }
1232
1233 delete [] tmpData;
1234 //
1235 // return the loaded array
1236 return numArray;
1237 }
1238
1239
1240 boost::python::numeric::array
1241 Data::integrate_const() const
1242 {
1243 if (isLazy())
1244 {
1245 throw DataException("Error - cannot integrate for constant lazy data.");
1246 }
1247 return integrateWorker();
1248 }
1249
1250 boost::python::numeric::array
1251 Data::integrate()
1252 {
1253 if (isLazy())
1254 {
1255 expand();
1256 }
1257 return integrateWorker();
1258 }
1259
1260
1261
1262 boost::python::numeric::array
1263 Data::integrateWorker() const
1264 {
1265 int index;
1266 int rank = getDataPointRank();
1267 DataTypes::ShapeType shape = getDataPointShape();
1268 int dataPointSize = getDataPointSize();
1269
1270 //
1271 // calculate the integral values
1272 vector<double> integrals(dataPointSize);
1273 vector<double> integrals_local(dataPointSize);
1274 const AbstractContinuousDomain* dom=dynamic_cast<const AbstractContinuousDomain*>(getDomain().get());
1275 if (dom==0)
1276 {
1277 throw DataException("Can not integrate over non-continuous domains.");
1278 }
1279 #ifdef PASO_MPI
1280 dom->setToIntegrals(integrals_local,*this);
1281 // Global sum: use an array instead of a vector because elements of array are guaranteed to be contiguous in memory
1282 double *tmp = new double[dataPointSize];
1283 double *tmp_local = new double[dataPointSize];
1284 for (int i=0; i<dataPointSize; i++) { tmp_local[i] = integrals_local[i]; }
1285 MPI_Allreduce( &tmp_local[0], &tmp[0], dataPointSize, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD );
1286 for (int i=0; i<dataPointSize; i++) { integrals[i] = tmp[i]; }
1287 delete[] tmp;
1288 delete[] tmp_local;
1289 #else
1290 dom->setToIntegrals(integrals,*this);
1291 #endif
1292
1293 //
1294 // create the numeric array to be returned
1295 // and load the array with the integral values
1296 boost::python::numeric::array bp_array(1.0);
1297 if (rank==0) {
1298 bp_array.resize(1);
1299 index = 0;
1300 bp_array[0] = integrals[index];
1301 }
1302 if (rank==1) {
1303 bp_array.resize(shape[0]);
1304 for (int i=0; i<shape[0]; i++) {
1305 index = i;
1306 bp_array[i] = integrals[index];
1307 }
1308 }
1309 if (rank==2) {
1310 bp_array.resize(shape[0],shape[1]);
1311 for (int i=0; i<shape[0]; i++) {
1312 for (int j=0; j<shape[1]; j++) {
1313 index = i + shape[0] * j;
1314 bp_array[make_tuple(i,j)] = integrals[index];
1315 }
1316 }
1317 }
1318 if (rank==3) {
1319 bp_array.resize(shape[0],shape[1],shape[2]);
1320 for (int i=0; i<shape[0]; i++) {
1321 for (int j=0; j<shape[1]; j++) {
1322 for (int k=0; k<shape[2]; k++) {
1323 index = i + shape[0] * ( j + shape[1] * k );
1324 bp_array[make_tuple(i,j,k)] = integrals[index];
1325 }
1326 }
1327 }
1328 }
1329 if (rank==4) {
1330 bp_array.resize(shape[0],shape[1],shape[2],shape[3]);
1331 for (int i=0; i<shape[0]; i++) {
1332 for (int j=0; j<shape[1]; j++) {
1333 for (int k=0; k<shape[2]; k++) {
1334 for (int l=0; l<shape[3]; l++) {
1335 index = i + shape[0] * ( j + shape[1] * ( k + shape[2] * l ) );
1336 bp_array[make_tuple(i,j,k,l)] = integrals[index];
1337 }
1338 }
1339 }
1340 }
1341 }
1342
1343 //
1344 // return the loaded array
1345 return bp_array;
1346 }
1347
1348 Data
1349 Data::sin() const
1350 {
1351 MAKELAZYOP(SIN)
1352 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sin);
1353 }
1354
1355 Data
1356 Data::cos() const
1357 {
1358 MAKELAZYOP(COS)
1359 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cos);
1360 }
1361
1362 Data
1363 Data::tan() const
1364 {
1365 MAKELAZYOP(TAN)
1366 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tan);
1367 }
1368
1369 Data
1370 Data::asin() const
1371 {
1372 MAKELAZYOP(ASIN)
1373 return C_TensorUnaryOperation<double (*)(double)>(*this, ::asin);
1374 }
1375
1376 Data
1377 Data::acos() const
1378 {
1379 MAKELAZYOP(ACOS)
1380 return C_TensorUnaryOperation<double (*)(double)>(*this, ::acos);
1381 }
1382
1383
1384 Data
1385 Data::atan() const
1386 {
1387 MAKELAZYOP(ATAN)
1388 return C_TensorUnaryOperation<double (*)(double)>(*this, ::atan);
1389 }
1390
1391 Data
1392 Data::sinh() const
1393 {
1394 MAKELAZYOP(SINH)
1395 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sinh);
1396 }
1397
1398 Data
1399 Data::cosh() const
1400 {
1401 MAKELAZYOP(COSH)
1402 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cosh);
1403 }
1404
1405 Data
1406 Data::tanh() const
1407 {
1408 MAKELAZYOP(TANH)
1409 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tanh);
1410 }
1411
1412
1413 Data
1414 Data::erf() const
1415 {
1416 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1417 throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");
1418 #else
1419 MAKELAZYOP(ERF)
1420 return C_TensorUnaryOperation(*this, ::erf);
1421 #endif
1422 }
1423
1424 Data
1425 Data::asinh() const
1426 {
1427 MAKELAZYOP(ASINH)
1428 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1429 return C_TensorUnaryOperation(*this, escript::asinh_substitute);
1430 #else
1431 return C_TensorUnaryOperation(*this, ::asinh);
1432 #endif
1433 }
1434
1435 Data
1436 Data::acosh() const
1437 {
1438 MAKELAZYOP(ACOSH)
1439 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1440 return C_TensorUnaryOperation(*this, escript::acosh_substitute);
1441 #else
1442 return C_TensorUnaryOperation(*this, ::acosh);
1443 #endif
1444 }
1445
1446 Data
1447 Data::atanh() const
1448 {
1449 MAKELAZYOP(ATANH)
1450 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1451 return C_TensorUnaryOperation(*this, escript::atanh_substitute);
1452 #else
1453 return C_TensorUnaryOperation(*this, ::atanh);
1454 #endif
1455 }
1456
1457 Data
1458 Data::log10() const
1459 {
1460 MAKELAZYOP(LOG10)
1461 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log10);
1462 }
1463
1464 Data
1465 Data::log() const
1466 {
1467 MAKELAZYOP(LOG)
1468 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log);
1469 }
1470
1471 Data
1472 Data::sign() const
1473 {
1474 MAKELAZYOP(SIGN)
1475 return C_TensorUnaryOperation(*this, escript::fsign);
1476 }
1477
1478 Data
1479 Data::abs() const
1480 {
1481 MAKELAZYOP(ABS)
1482 return C_TensorUnaryOperation<double (*)(double)>(*this, ::fabs);
1483 }
1484
1485 Data
1486 Data::neg() const
1487 {
1488 MAKELAZYOP(NEG)
1489 return C_TensorUnaryOperation(*this, negate<double>());
1490 }
1491
1492 Data
1493 Data::pos() const
1494 {
1495 // not doing lazy check here is deliberate.
1496 // since a deep copy of lazy data should be cheap, I'll just let it happen now
1497 Data result;
1498 // perform a deep copy
1499 result.copy(*this);
1500 return result;
1501 }
1502
1503 Data
1504 Data::exp() const
1505 {
1506 MAKELAZYOP(EXP)
1507 return C_TensorUnaryOperation<double (*)(double)>(*this, ::exp);
1508 }
1509
1510 Data
1511 Data::sqrt() const
1512 {
1513 MAKELAZYOP(SQRT)
1514 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sqrt);
1515 }
1516
1517 double
1518 Data::Lsup_const() const
1519 {
1520 if (isLazy())
1521 {
1522 throw DataException("Error - cannot compute Lsup for constant lazy data.");
1523 }
1524 return LsupWorker();
1525 }
1526
1527 double
1528 Data::Lsup()
1529 {
1530 if (isLazy())
1531 {
1532 resolve();
1533 }
1534 return LsupWorker();
1535 }
1536
1537 double
1538 Data::sup_const() const
1539 {
1540 if (isLazy())
1541 {
1542 throw DataException("Error - cannot compute sup for constant lazy data.");
1543 }
1544 return supWorker();
1545 }
1546
1547 double
1548 Data::sup()
1549 {
1550 if (isLazy())
1551 {
1552 resolve();
1553 }
1554 return supWorker();
1555 }
1556
1557 double
1558 Data::inf_const() const
1559 {
1560 if (isLazy())
1561 {
1562 throw DataException("Error - cannot compute inf for constant lazy data.");
1563 }
1564 return infWorker();
1565 }
1566
1567 double
1568 Data::inf()
1569 {
1570 if (isLazy())
1571 {
1572 resolve();
1573 }
1574 return infWorker();
1575 }
1576
1577 double
1578 Data::LsupWorker() const
1579 {
1580 double localValue;
1581 //
1582 // set the initial absolute maximum value to zero
1583
1584 AbsMax abs_max_func;
1585 localValue = algorithm(abs_max_func,0);
1586 #ifdef PASO_MPI
1587 double globalValue;
1588 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1589 return globalValue;
1590 #else
1591 return localValue;
1592 #endif
1593 }
1594
1595 double
1596 Data::supWorker() const
1597 {
1598 double localValue;
1599 //
1600 // set the initial maximum value to min possible double
1601 FMax fmax_func;
1602 localValue = algorithm(fmax_func,numeric_limits<double>::max()*-1);
1603 #ifdef PASO_MPI
1604 double globalValue;
1605 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1606 return globalValue;
1607 #else
1608 return localValue;
1609 #endif
1610 }
1611
1612 double
1613 Data::infWorker() const
1614 {
1615 double localValue;
1616 //
1617 // set the initial minimum value to max possible double
1618 FMin fmin_func;
1619 localValue = algorithm(fmin_func,numeric_limits<double>::max());
1620 #ifdef PASO_MPI
1621 double globalValue;
1622 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD );
1623 return globalValue;
1624 #else
1625 return localValue;
1626 #endif
1627 }
1628
1629 /* TODO */
1630 /* global reduction */
1631 Data
1632 Data::maxval() const
1633 {
1634 if (isLazy())
1635 {
1636 Data temp(*this); // to get around the fact that you can't resolve a const Data
1637 temp.resolve();
1638 return temp.maxval();
1639 }
1640 //
1641 // set the initial maximum value to min possible double
1642 FMax fmax_func;
1643 return dp_algorithm(fmax_func,numeric_limits<double>::max()*-1);
1644 }
1645
1646 Data
1647 Data::minval() const
1648 {
1649 if (isLazy())
1650 {
1651 Data temp(*this); // to get around the fact that you can't resolve a const Data
1652 temp.resolve();
1653 return temp.minval();
1654 }
1655 //
1656 // set the initial minimum value to max possible double
1657 FMin fmin_func;
1658 return dp_algorithm(fmin_func,numeric_limits<double>::max());
1659 }
1660
1661 Data
1662 Data::swapaxes(const int axis0, const int axis1) const
1663 {
1664 if (isLazy())
1665 {
1666 Data temp(*this);
1667 temp.resolve();
1668 return temp.swapaxes(axis0,axis1);
1669 }
1670 int axis0_tmp,axis1_tmp;
1671 DataTypes::ShapeType s=getDataPointShape();
1672 DataTypes::ShapeType ev_shape;
1673 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1674 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1675 int rank=getDataPointRank();
1676 if (rank<2) {
1677 throw DataException("Error - Data::swapaxes argument must have at least rank 2.");
1678 }
1679 if (axis0<0 || axis0>rank-1) {
1680 throw DataException("Error - Data::swapaxes: axis0 must be between 0 and rank-1=" + rank-1);
1681 }
1682 if (axis1<0 || axis1>rank-1) {
1683 throw DataException("Error - Data::swapaxes: axis1 must be between 0 and rank-1=" + rank-1);
1684 }
1685 if (axis0 == axis1) {
1686 throw DataException("Error - Data::swapaxes: axis indices must be different.");
1687 }
1688 if (axis0 > axis1) {
1689 axis0_tmp=axis1;
1690 axis1_tmp=axis0;
1691 } else {
1692 axis0_tmp=axis0;
1693 axis1_tmp=axis1;
1694 }
1695 for (int i=0; i<rank; i++) {
1696 if (i == axis0_tmp) {
1697 ev_shape.push_back(s[axis1_tmp]);
1698 } else if (i == axis1_tmp) {
1699 ev_shape.push_back(s[axis0_tmp]);
1700 } else {
1701 ev_shape.push_back(s[i]);
1702 }
1703 }
1704 Data ev(0.,ev_shape,getFunctionSpace());
1705 ev.typeMatchRight(*this);
1706 m_data->swapaxes(ev.m_data.get(), axis0_tmp, axis1_tmp);
1707 return ev;
1708
1709 }
1710
1711 Data
1712 Data::symmetric() const
1713 {
1714 // check input
1715 DataTypes::ShapeType s=getDataPointShape();
1716 if (getDataPointRank()==2) {
1717 if(s[0] != s[1])
1718 throw DataException("Error - Data::symmetric can only be calculated for rank 2 object with equal first and second dimension.");
1719 }
1720 else if (getDataPointRank()==4) {
1721 if(!(s[0] == s[2] && s[1] == s[3]))
1722 throw DataException("Error - Data::symmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1723 }
1724 else {
1725 throw DataException("Error - Data::symmetric can only be calculated for rank 2 or 4 object.");
1726 }
1727 MAKELAZYOP(SYM)
1728 Data ev(0.,getDataPointShape(),getFunctionSpace());
1729 ev.typeMatchRight(*this);
1730 m_data->symmetric(ev.m_data.get());
1731 return ev;
1732 }
1733
1734 Data
1735 Data::nonsymmetric() const
1736 {
1737 MAKELAZYOP(NSYM)
1738 // check input
1739 DataTypes::ShapeType s=getDataPointShape();
1740 if (getDataPointRank()==2) {
1741 if(s[0] != s[1])
1742 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 object with equal first and second dimension.");
1743 DataTypes::ShapeType ev_shape;
1744 ev_shape.push_back(s[0]);
1745 ev_shape.push_back(s[1]);
1746 Data ev(0.,ev_shape,getFunctionSpace());
1747 ev.typeMatchRight(*this);
1748 m_data->nonsymmetric(ev.m_data.get());
1749 return ev;
1750 }
1751 else if (getDataPointRank()==4) {
1752 if(!(s[0] == s[2] && s[1] == s[3]))
1753 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1754 DataTypes::ShapeType ev_shape;
1755 ev_shape.push_back(s[0]);
1756 ev_shape.push_back(s[1]);
1757 ev_shape.push_back(s[2]);
1758 ev_shape.push_back(s[3]);
1759 Data ev(0.,ev_shape,getFunctionSpace());
1760 ev.typeMatchRight(*this);
1761 m_data->nonsymmetric(ev.m_data.get());
1762 return ev;
1763 }
1764 else {
1765 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 or 4 object.");
1766 }
1767 }
1768
1769 Data
1770 Data::trace(int axis_offset) const
1771 {
1772 MAKELAZYOPOFF(TRACE,axis_offset)
1773 if ((axis_offset<0) || (axis_offset>getDataPointRank()))
1774 {
1775 throw DataException("Error - Data::trace, axis_offset must be between 0 and rank-2 inclusive.");
1776 }
1777 DataTypes::ShapeType s=getDataPointShape();
1778 if (getDataPointRank()==2) {
1779 DataTypes::ShapeType ev_shape;
1780 Data ev(0.,ev_shape,getFunctionSpace());
1781 ev.typeMatchRight(*this);
1782 m_data->trace(ev.m_data.get(), axis_offset);
1783 return ev;
1784 }
1785 if (getDataPointRank()==3) {
1786 DataTypes::ShapeType ev_shape;
1787 if (axis_offset==0) {
1788 int s2=s[2];
1789 ev_shape.push_back(s2);
1790 }
1791 else if (axis_offset==1) {
1792 int s0=s[0];
1793 ev_shape.push_back(s0);
1794 }
1795 Data ev(0.,ev_shape,getFunctionSpace());
1796 ev.typeMatchRight(*this);
1797 m_data->trace(ev.m_data.get(), axis_offset);
1798 return ev;
1799 }
1800 if (getDataPointRank()==4) {
1801 DataTypes::ShapeType ev_shape;
1802 if (axis_offset==0) {
1803 ev_shape.push_back(s[2]);
1804 ev_shape.push_back(s[3]);
1805 }
1806 else if (axis_offset==1) {
1807 ev_shape.push_back(s[0]);
1808 ev_shape.push_back(s[3]);
1809 }
1810 else if (axis_offset==2) {
1811 ev_shape.push_back(s[0]);
1812 ev_shape.push_back(s[1]);
1813 }
1814 Data ev(0.,ev_shape,getFunctionSpace());
1815 ev.typeMatchRight(*this);
1816 m_data->trace(ev.m_data.get(), axis_offset);
1817 return ev;
1818 }
1819 else {
1820 throw DataException("Error - Data::trace can only be calculated for rank 2, 3 or 4 object.");
1821 }
1822 }
1823
1824 Data
1825 Data::transpose(int axis_offset) const
1826 {
1827 MAKELAZYOPOFF(TRANS,axis_offset)
1828 DataTypes::ShapeType s=getDataPointShape();
1829 DataTypes::ShapeType ev_shape;
1830 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1831 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1832 int rank=getDataPointRank();
1833 if (axis_offset<0 || axis_offset>rank) {
1834 throw DataException("Error - Data::transpose must have 0 <= axis_offset <= rank=" + rank);
1835 }
1836 for (int i=0; i<rank; i++) {
1837 int index = (axis_offset+i)%rank;
1838 ev_shape.push_back(s[index]); // Append to new shape
1839 }
1840 Data ev(0.,ev_shape,getFunctionSpace());
1841 ev.typeMatchRight(*this);
1842 m_data->transpose(ev.m_data.get(), axis_offset);
1843 return ev;
1844 }
1845
1846 Data
1847 Data::eigenvalues() const
1848 {
1849 if (isLazy())
1850 {
1851 Data temp(*this); // to get around the fact that you can't resolve a const Data
1852 temp.resolve();
1853 return temp.eigenvalues();
1854 }
1855 // check input
1856 DataTypes::ShapeType s=getDataPointShape();
1857 if (getDataPointRank()!=2)
1858 throw DataException("Error - Data::eigenvalues can only be calculated for rank 2 object.");
1859 if(s[0] != s[1])
1860 throw DataException("Error - Data::eigenvalues can only be calculated for object with equal first and second dimension.");
1861 // create return
1862 DataTypes::ShapeType ev_shape(1,s[0]);
1863 Data ev(0.,ev_shape,getFunctionSpace());
1864 ev.typeMatchRight(*this);
1865 m_data->eigenvalues(ev.m_data.get());
1866 return ev;
1867 }
1868
1869 const boost::python::tuple
1870 Data::eigenvalues_and_eigenvectors(const double tol) const
1871 {
1872 if (isLazy())
1873 {
1874 Data temp(*this); // to get around the fact that you can't resolve a const Data
1875 temp.resolve();
1876 return temp.eigenvalues_and_eigenvectors(tol);
1877 }
1878 DataTypes::ShapeType s=getDataPointShape();
1879 if (getDataPointRank()!=2)
1880 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for rank 2 object.");
1881 if(s[0] != s[1])
1882 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for object with equal first and second dimension.");
1883 // create return
1884 DataTypes::ShapeType ev_shape(1,s[0]);
1885 Data ev(0.,ev_shape,getFunctionSpace());
1886 ev.typeMatchRight(*this);
1887 DataTypes::ShapeType V_shape(2,s[0]);
1888 Data V(0.,V_shape,getFunctionSpace());
1889 V.typeMatchRight(*this);
1890 m_data->eigenvalues_and_eigenvectors(ev.m_data.get(),V.m_data.get(),tol);
1891 return make_tuple(boost::python::object(ev),boost::python::object(V));
1892 }
1893
1894 const boost::python::tuple
1895 Data::minGlobalDataPoint() const
1896 {
1897 // NB: calc_minGlobalDataPoint( had to be split off from minGlobalDataPoint( as boost::make_tuple causes an
1898 // abort (for unknown reasons) if there are openmp directives with it in the
1899 // surrounding function
1900
1901 int DataPointNo;
1902 int ProcNo;
1903 calc_minGlobalDataPoint(ProcNo,DataPointNo);
1904 return make_tuple(ProcNo,DataPointNo);
1905 }
1906
1907 void
1908 Data::calc_minGlobalDataPoint(int& ProcNo,
1909 int& DataPointNo) const
1910 {
1911 if (isLazy())
1912 {
1913 Data temp(*this); // to get around the fact that you can't resolve a const Data
1914 temp.resolve();
1915 return temp.calc_minGlobalDataPoint(ProcNo,DataPointNo);
1916 }
1917 int i,j;
1918 int lowi=0,lowj=0;
1919 double min=numeric_limits<double>::max();
1920
1921 Data temp=minval();
1922
1923 int numSamples=temp.getNumSamples();
1924 int numDPPSample=temp.getNumDataPointsPerSample();
1925
1926 double next,local_min;
1927 int local_lowi=0,local_lowj=0;
1928
1929 #pragma omp parallel firstprivate(local_lowi,local_lowj) private(next,local_min)
1930 {
1931 local_min=min;
1932 #pragma omp for private(i,j) schedule(static)
1933 for (i=0; i<numSamples; i++) {
1934 for (j=0; j<numDPPSample; j++) {
1935 next=temp.getDataAtOffset(temp.getDataOffset(i,j));
1936 if (next<local_min) {
1937 local_min=next;
1938 local_lowi=i;
1939 local_lowj=j;
1940 }
1941 }
1942 }
1943 #pragma omp critical
1944 if (local_min<min) {
1945 min=local_min;
1946 lowi=local_lowi;
1947 lowj=local_lowj;
1948 }
1949 }
1950
1951 #ifdef PASO_MPI
1952 // determine the processor on which the minimum occurs
1953 next = temp.getDataPoint(lowi,lowj);
1954 int lowProc = 0;
1955 double *globalMins = new double[get_MPISize()+1];
1956 int error;
1957 error = MPI_Gather ( &next, 1, MPI_DOUBLE, globalMins, 1, MPI_DOUBLE, 0, get_MPIComm() );
1958
1959 if( get_MPIRank()==0 ){
1960 next = globalMins[lowProc];
1961 for( i=1; i<get_MPISize(); i++ )
1962 if( next>globalMins[i] ){
1963 lowProc = i;
1964 next = globalMins[i];
1965 }
1966 }
1967 MPI_Bcast( &lowProc, 1, MPI_DOUBLE, 0, get_MPIComm() );
1968
1969 delete [] globalMins;
1970 ProcNo = lowProc;
1971 #else
1972 ProcNo = 0;
1973 #endif
1974 DataPointNo = lowj + lowi * numDPPSample;
1975 }
1976
1977 void
1978 Data::saveDX(std::string fileName) const
1979 {
1980 if (isEmpty())
1981 {
1982 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1983 }
1984 if (isLazy())
1985 {
1986 Data temp(*this); // to get around the fact that you can't resolve a const Data
1987 temp.resolve();
1988 temp.saveDX(fileName);
1989 return;
1990 }
1991 boost::python::dict args;
1992 args["data"]=boost::python::object(this);
1993 getDomain()->saveDX(fileName,args);
1994 return;
1995 }
1996
1997 void
1998 Data::saveVTK(std::string fileName) const
1999 {
2000 if (isEmpty())
2001 {
2002 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2003 }
2004 if (isLazy())
2005 {
2006 Data temp(*this); // to get around the fact that you can't resolve a const Data
2007 temp.resolve();
2008 temp.saveVTK(fileName);
2009 return;
2010 }
2011 boost::python::dict args;
2012 args["data"]=boost::python::object(this);
2013 getDomain()->saveVTK(fileName,args);
2014 return;
2015 }
2016
2017 Data&
2018 Data::operator+=(const Data& right)
2019 {
2020 if (isProtected()) {
2021 throw DataException("Error - attempt to update protected Data object.");
2022 }
2023 MAKELAZYBINSELF(right,ADD) // for lazy + is equivalent to +=
2024 binaryOp(right,plus<double>());
2025 return (*this);
2026 }
2027
2028 Data&
2029 Data::operator+=(const boost::python::object& right)
2030 {
2031 if (isProtected()) {
2032 throw DataException("Error - attempt to update protected Data object.");
2033 }
2034 Data tmp(right,getFunctionSpace(),false);
2035 MAKELAZYBINSELF(tmp,ADD)
2036 binaryOp(tmp,plus<double>());
2037 return (*this);
2038 }
2039
2040 // Hmmm, operator= makes a deep copy but the copy constructor does not?
2041 Data&
2042 Data::operator=(const Data& other)
2043 {
2044 copy(other);
2045 return (*this);
2046 }
2047
2048 Data&
2049 Data::operator-=(const Data& right)
2050 {
2051 if (isProtected()) {
2052 throw DataException("Error - attempt to update protected Data object.");
2053 }
2054 MAKELAZYBINSELF(right,SUB)
2055 binaryOp(right,minus<double>());
2056 return (*this);
2057 }
2058
2059 Data&
2060 Data::operator-=(const boost::python::object& right)
2061 {
2062 if (isProtected()) {
2063 throw DataException("Error - attempt to update protected Data object.");
2064 }
2065 Data tmp(right,getFunctionSpace(),false);
2066 MAKELAZYBINSELF(tmp,SUB)
2067 binaryOp(tmp,minus<double>());
2068 return (*this);
2069 }
2070
2071 Data&
2072 Data::operator*=(const Data& right)
2073 {
2074 if (isProtected()) {
2075 throw DataException("Error - attempt to update protected Data object.");
2076 }
2077 MAKELAZYBINSELF(right,MUL)
2078 binaryOp(right,multiplies<double>());
2079 return (*this);
2080 }
2081
2082 Data&
2083 Data::operator*=(const boost::python::object& right)
2084 {
2085 if (isProtected()) {
2086 throw DataException("Error - attempt to update protected Data object.");
2087 }
2088 Data tmp(right,getFunctionSpace(),false);
2089 MAKELAZYBINSELF(tmp,MUL)
2090 binaryOp(tmp,multiplies<double>());
2091 return (*this);
2092 }
2093
2094 Data&
2095 Data::operator/=(const Data& right)
2096 {
2097 if (isProtected()) {
2098 throw DataException("Error - attempt to update protected Data object.");
2099 }
2100 MAKELAZYBINSELF(right,DIV)
2101 binaryOp(right,divides<double>());
2102 return (*this);
2103 }
2104
2105 Data&
2106 Data::operator/=(const boost::python::object& right)
2107 {
2108 if (isProtected()) {
2109 throw DataException("Error - attempt to update protected Data object.");
2110 }
2111 Data tmp(right,getFunctionSpace(),false);
2112 MAKELAZYBINSELF(tmp,DIV)
2113 binaryOp(tmp,divides<double>());
2114 return (*this);
2115 }
2116
2117 Data
2118 Data::rpowO(const boost::python::object& left) const
2119 {
2120 Data left_d(left,*this);
2121 return left_d.powD(*this);
2122 }
2123
2124 Data
2125 Data::powO(const boost::python::object& right) const
2126 {
2127 Data tmp(right,getFunctionSpace(),false);
2128 return powD(tmp);
2129 }
2130
2131 Data
2132 Data::powD(const Data& right) const
2133 {
2134 MAKELAZYBIN(right,POW)
2135 return C_TensorBinaryOperation<double (*)(double, double)>(*this, right, ::pow);
2136 }
2137
2138 //
2139 // NOTE: It is essential to specify the namespace this operator belongs to
2140 Data
2141 escript::operator+(const Data& left, const Data& right)
2142 {
2143 MAKELAZYBIN2(left,right,ADD)
2144 return C_TensorBinaryOperation(left, right, plus<double>());
2145 }
2146
2147 //
2148 // NOTE: It is essential to specify the namespace this operator belongs to
2149 Data
2150 escript::operator-(const Data& left, const Data& right)
2151 {
2152 MAKELAZYBIN2(left,right,SUB)
2153 return C_TensorBinaryOperation(left, right, minus<double>());
2154 }
2155
2156 //
2157 // NOTE: It is essential to specify the namespace this operator belongs to
2158 Data
2159 escript::operator*(const Data& left, const Data& right)
2160 {
2161 MAKELAZYBIN2(left,right,MUL)
2162 return C_TensorBinaryOperation(left, right, multiplies<double>());
2163 }
2164
2165 //
2166 // NOTE: It is essential to specify the namespace this operator belongs to
2167 Data
2168 escript::operator/(const Data& left, const Data& right)
2169 {
2170 MAKELAZYBIN2(left,right,DIV)
2171 return C_TensorBinaryOperation(left, right, divides<double>());
2172 }
2173
2174 //
2175 // NOTE: It is essential to specify the namespace this operator belongs to
2176 Data
2177 escript::operator+(const Data& left, const boost::python::object& right)
2178 {
2179 Data tmp(right,left.getFunctionSpace(),false);
2180 MAKELAZYBIN2(left,tmp,ADD)
2181 return left+tmp;
2182 }
2183
2184 //
2185 // NOTE: It is essential to specify the namespace this operator belongs to
2186 Data
2187 escript::operator-(const Data& left, const boost::python::object& right)
2188 {
2189 Data tmp(right,left.getFunctionSpace(),false);
2190 MAKELAZYBIN2(left,tmp,SUB)
2191 return left-tmp;
2192 }
2193
2194 //
2195 // NOTE: It is essential to specify the namespace this operator belongs to
2196 Data
2197 escript::operator*(const Data& left, const boost::python::object& right)
2198 {
2199 Data tmp(right,left.getFunctionSpace(),false);
2200 MAKELAZYBIN2(left,tmp,MUL)
2201 return left*tmp;
2202 }
2203
2204 //
2205 // NOTE: It is essential to specify the namespace this operator belongs to
2206 Data
2207 escript::operator/(const Data& left, const boost::python::object& right)
2208 {
2209 Data tmp(right,left.getFunctionSpace(),false);
2210 MAKELAZYBIN2(left,tmp,DIV)
2211 return left/tmp;
2212 }
2213
2214 //
2215 // NOTE: It is essential to specify the namespace this operator belongs to
2216 Data
2217 escript::operator+(const boost::python::object& left, const Data& right)
2218 {
2219 Data tmp(left,right.getFunctionSpace(),false);
2220 MAKELAZYBIN2(tmp,right,ADD)
2221 return tmp+right;
2222 }
2223
2224 //
2225 // NOTE: It is essential to specify the namespace this operator belongs to
2226 Data
2227 escript::operator-(const boost::python::object& left, const Data& right)
2228 {
2229 Data tmp(left,right.getFunctionSpace(),false);
2230 MAKELAZYBIN2(tmp,right,SUB)
2231 return tmp-right;
2232 }
2233
2234 //
2235 // NOTE: It is essential to specify the namespace this operator belongs to
2236 Data
2237 escript::operator*(const boost::python::object& left, const Data& right)
2238 {
2239 Data tmp(left,right.getFunctionSpace(),false);
2240 MAKELAZYBIN2(tmp,right,MUL)
2241 return tmp*right;
2242 }
2243
2244 //
2245 // NOTE: It is essential to specify the namespace this operator belongs to
2246 Data
2247 escript::operator/(const boost::python::object& left, const Data& right)
2248 {
2249 Data tmp(left,right.getFunctionSpace(),false);
2250 MAKELAZYBIN2(tmp,right,DIV)
2251 return tmp/right;
2252 }
2253
2254
2255 /* TODO */
2256 /* global reduction */
2257 Data
2258 Data::getItem(const boost::python::object& key) const
2259 {
2260
2261 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2262
2263 if (slice_region.size()!=getDataPointRank()) {
2264 throw DataException("Error - slice size does not match Data rank.");
2265 }
2266
2267 return getSlice(slice_region);
2268 }
2269
2270 /* TODO */
2271 /* global reduction */
2272 Data
2273 Data::getSlice(const DataTypes::RegionType& region) const
2274 {
2275 return Data(*this,region);
2276 }
2277
2278 /* TODO */
2279 /* global reduction */
2280 void
2281 Data::setItemO(const boost::python::object& key,
2282 const boost::python::object& value)
2283 {
2284 Data tempData(value,getFunctionSpace());
2285 setItemD(key,tempData);
2286 }
2287
2288 void
2289 Data::setItemD(const boost::python::object& key,
2290 const Data& value)
2291 {
2292 // const DataArrayView& view=getPointDataView();
2293
2294 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2295 if (slice_region.size()!=getDataPointRank()) {
2296 throw DataException("Error - slice size does not match Data rank.");
2297 }
2298 if (getFunctionSpace()!=value.getFunctionSpace()) {
2299 setSlice(Data(value,getFunctionSpace()),slice_region);
2300 } else {
2301 setSlice(value,slice_region);
2302 }
2303 }
2304
2305 void
2306 Data::setSlice(const Data& value,
2307 const DataTypes::RegionType& region)
2308 {
2309 if (isProtected()) {
2310 throw DataException("Error - attempt to update protected Data object.");
2311 }
2312 FORCERESOLVE;
2313 /* if (isLazy())
2314 {
2315 throw DataException("Error - setSlice not permitted on lazy data.");
2316 }*/
2317 Data tempValue(value);
2318 typeMatchLeft(tempValue);
2319 typeMatchRight(tempValue);
2320 getReady()->setSlice(tempValue.m_data.get(),region);
2321 }
2322
2323 void
2324 Data::typeMatchLeft(Data& right) const
2325 {
2326 if (right.isLazy() && !isLazy())
2327 {
2328 right.resolve();
2329 }
2330 if (isExpanded()){
2331 right.expand();
2332 } else if (isTagged()) {
2333 if (right.isConstant()) {
2334 right.tag();
2335 }
2336 }
2337 }
2338
2339 void
2340 Data::typeMatchRight(const Data& right)
2341 {
2342 if (isLazy() && !right.isLazy())
2343 {
2344 resolve();
2345 }
2346 if (isTagged()) {
2347 if (right.isExpanded()) {
2348 expand();
2349 }
2350 } else if (isConstant()) {
2351 if (right.isExpanded()) {
2352 expand();
2353 } else if (right.isTagged()) {
2354 tag();
2355 }
2356 }
2357 }
2358
2359 void
2360 Data::setTaggedValueByName(std::string name,
2361 const boost::python::object& value)
2362 {
2363 if (getFunctionSpace().getDomain()->isValidTagName(name)) {
2364 FORCERESOLVE;
2365 int tagKey=getFunctionSpace().getDomain()->getTag(name);
2366 setTaggedValue(tagKey,value);
2367 }
2368 }
2369 void
2370 Data::setTaggedValue(int tagKey,
2371 const boost::python::object& value)
2372 {
2373 if (isProtected()) {
2374 throw DataException("Error - attempt to update protected Data object.");
2375 }
2376 //
2377 // Ensure underlying data object is of type DataTagged
2378 FORCERESOLVE;
2379 if (isConstant()) tag();
2380 numeric::array asNumArray(value);
2381
2382 // extract the shape of the numarray
2383 DataTypes::ShapeType tempShape;
2384 for (int i=0; i < asNumArray.getrank(); i++) {
2385 tempShape.push_back(extract<int>(asNumArray.getshape()[i]));
2386 }
2387
2388 DataVector temp_data2;
2389 temp_data2.copyFromNumArray(asNumArray,1);
2390
2391 m_data->setTaggedValue(tagKey,tempShape, temp_data2);
2392 }
2393
2394
2395 void
2396 Data::setTaggedValueFromCPP(int tagKey,
2397 const DataTypes::ShapeType& pointshape,
2398 const DataTypes::ValueType& value,
2399 int dataOffset)
2400 {
2401 if (isProtected()) {
2402 throw DataException("Error - attempt to update protected Data object.");
2403 }
2404 //
2405 // Ensure underlying data object is of type DataTagged
2406 FORCERESOLVE;
2407 if (isConstant()) tag();
2408 //
2409 // Call DataAbstract::setTaggedValue
2410 m_data->setTaggedValue(tagKey,pointshape, value, dataOffset);
2411 }
2412
2413 int
2414 Data::getTagNumber(int dpno)
2415 {
2416 if (isEmpty())
2417 {
2418 throw DataException("Error - operation not permitted on instances of DataEmpty.");
2419 }
2420 return getFunctionSpace().getTagFromDataPointNo(dpno);
2421 }
2422
2423
2424 ostream& escript::operator<<(ostream& o, const Data& data)
2425 {
2426 o << data.toString();
2427 return o;
2428 }
2429
2430 Data
2431 escript::C_GeneralTensorProduct(Data& arg_0,
2432 Data& arg_1,
2433 int axis_offset,
2434 int transpose)
2435 {
2436 // General tensor product: res(SL x SR) = arg_0(SL x SM) * arg_1(SM x SR)
2437 // SM is the product of the last axis_offset entries in arg_0.getShape().
2438
2439 // deal with any lazy data
2440 // if (arg_0.isLazy()) {arg_0.resolve();}
2441 // if (arg_1.isLazy()) {arg_1.resolve();}
2442 if (arg_0.isLazy() || arg_1.isLazy() || (AUTOLAZYON && (arg_0.isExpanded() || arg_1.isExpanded())))
2443 {
2444 DataLazy* c=new DataLazy(arg_0.borrowDataPtr(), arg_1.borrowDataPtr(), PROD, axis_offset,transpose);
2445 return Data(c);
2446 }
2447
2448 // Interpolate if necessary and find an appropriate function space
2449 Data arg_0_Z, arg_1_Z;
2450 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2451 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2452 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2453 arg_1_Z = Data(arg_1);
2454 }
2455 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2456 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2457 arg_0_Z =Data(arg_0);
2458 }
2459 else {
2460 throw DataException("Error - C_GeneralTensorProduct: arguments have incompatible function spaces.");
2461 }
2462 } else {
2463 arg_0_Z = Data(arg_0);
2464 arg_1_Z = Data(arg_1);
2465 }
2466 // Get rank and shape of inputs
2467 int rank0 = arg_0_Z.getDataPointRank();
2468 int rank1 = arg_1_Z.getDataPointRank();
2469 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
2470 const DataTypes::ShapeType& shape1 = arg_1_Z.getDataPointShape();
2471
2472 // Prepare for the loops of the product and verify compatibility of shapes
2473 int start0=0, start1=0;
2474 if (transpose == 0) {}
2475 else if (transpose == 1) { start0 = axis_offset; }
2476 else if (transpose == 2) { start1 = rank1-axis_offset; }
2477 else { throw DataException("C_GeneralTensorProduct: Error - transpose should be 0, 1 or 2"); }
2478
2479
2480 // Adjust the shapes for transpose
2481 DataTypes::ShapeType tmpShape0(rank0); // pre-sizing the vectors rather
2482 DataTypes::ShapeType tmpShape1(rank1); // than using push_back
2483 for (int i=0; i<rank0; i++) { tmpShape0[i]=shape0[(i+start0)%rank0]; }
2484 for (int i=0; i<rank1; i++) { tmpShape1[i]=shape1[(i+start1)%rank1]; }
2485
2486 #if 0
2487 // For debugging: show shape after transpose
2488 char tmp[100];
2489 std::string shapeStr;
2490 shapeStr = "(";
2491 for (int i=0; i<rank0; i++) { sprintf(tmp, "%d,", tmpShape0[i]); shapeStr += tmp; }
2492 shapeStr += ")";
2493 cout << "C_GeneralTensorProduct: Shape of arg0 is " << shapeStr << endl;
2494 shapeStr = "(";
2495 for (int i=0; i<rank1; i++) { sprintf(tmp, "%d,", tmpShape1[i]); shapeStr += tmp; }
2496 shapeStr += ")";
2497 cout << "C_GeneralTensorProduct: Shape of arg1 is " << shapeStr << endl;
2498 #endif
2499
2500 // Prepare for the loops of the product
2501 int SL=1, SM=1, SR=1;
2502 for (int i=0; i<rank0-axis_offset; i++) {
2503 SL *= tmpShape0[i];
2504 }
2505 for (int i=rank0-axis_offset; i<rank0; i++) {
2506 if (tmpShape0[i] != tmpShape1[i-(rank0-axis_offset)]) {
2507 throw DataException("C_GeneralTensorProduct: Error - incompatible shapes");
2508 }
2509 SM *= tmpShape0[i];
2510 }
2511 for (int i=axis_offset; i<rank1; i++) {
2512 SR *= tmpShape1[i];
2513 }
2514
2515 // Define the shape of the output (rank of shape is the sum of the loop ranges below)
2516 DataTypes::ShapeType shape2(rank0+rank1-2*axis_offset);
2517 { // block to limit the scope of out_index
2518 int out_index=0;
2519 for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z
2520 for (int i=axis_offset; i<rank1; i++, ++out_index) { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z
2521 }
2522
2523 if (shape2.size()>ESCRIPT_MAX_DATA_RANK)
2524 {
2525 ostringstream os;
2526 os << "C_GeneralTensorProduct: Error - Attempt to create a rank " << shape2.size() << " object. The maximum rank is " << ESCRIPT_MAX_DATA_RANK << ".";
2527 throw DataException(os.str());
2528 }
2529
2530 // Declare output Data object
2531 Data res;
2532
2533 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2534 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataConstant output
2535 double *ptr_0 = &(arg_0_Z.getDataAtOffset(0));
2536 double *ptr_1 = &(arg_1_Z.getDataAtOffset(0));
2537 double *ptr_2 = &(res.getDataAtOffset(0));
2538 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2539 }
2540 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2541
2542 // Prepare the DataConstant input
2543 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2544 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2545
2546 // Borrow DataTagged input from Data object
2547 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2548 if (tmp_1==0) { throw DataException("GTP_1 Programming error - casting to DataTagged."); }
2549
2550 // Prepare a DataTagged output 2
2551 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataTagged output
2552 res.tag();
2553 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2554 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2555
2556 // Prepare offset into DataConstant
2557 int offset_0 = tmp_0->getPointOffset(0,0);
2558 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2559 // Get the views
2560 // DataArrayView view_1 = tmp_1->getDefaultValue();
2561 // DataArrayView view_2 = tmp_2->getDefaultValue();
2562 // // Get the pointers to the actual data
2563 // double *ptr_1 = &((view_1.getData())[0]);
2564 // double *ptr_2 = &((view_2.getData())[0]);
2565
2566 double *ptr_1 = &(tmp_1->getDefaultValue(0));
2567 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2568
2569
2570 // Compute an MVP for the default
2571 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2572 // Compute an MVP for each tag
2573 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2574 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2575 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2576 tmp_2->addTag(i->first);
2577 // DataArrayView view_1 = tmp_1->getDataPointByTag(i->first);
2578 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2579 // double *ptr_1 = &view_1.getData(0);
2580 // double *ptr_2 = &view_2.getData(0);
2581
2582 double *ptr_1 = &(tmp_1->getDataByTag(i->first,0));
2583 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2584
2585 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2586 }
2587
2588 }
2589 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2590
2591 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2592 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2593 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2594 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2595 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2596 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2597 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2598 int sampleNo_1,dataPointNo_1;
2599 int numSamples_1 = arg_1_Z.getNumSamples();
2600 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2601 int offset_0 = tmp_0->getPointOffset(0,0);
2602 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2603 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2604 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2605 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2606 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2607 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2608 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2609 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2610 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2611 }
2612 }
2613
2614 }
2615 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2616
2617 // Borrow DataTagged input from Data object
2618 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2619 if (tmp_0==0) { throw DataException("GTP_0 Programming error - casting to DataTagged."); }
2620
2621 // Prepare the DataConstant input
2622 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2623 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2624
2625 // Prepare a DataTagged output 2
2626 res = Data(0.0, shape2, arg_0_Z.getFunctionSpace()); // DataTagged output
2627 res.tag();
2628 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2629 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2630
2631 // Prepare offset into DataConstant
2632 int offset_1 = tmp_1->getPointOffset(0,0);
2633 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2634 // Get the views
2635 // DataArrayView view_0 = tmp_0->getDefaultValue();
2636 // DataArrayView view_2 = tmp_2->getDefaultValue();
2637 // // Get the pointers to the actual data
2638 // double *ptr_0 = &((view_0.getData())[0]);
2639 // double *ptr_2 = &((view_2.getData())[0]);
2640
2641 double *ptr_0 = &(tmp_0->getDefaultValue(0));
2642 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2643
2644 // Compute an MVP for the default
2645 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2646 // Compute an MVP for each tag
2647 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2648 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2649 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2650 // tmp_2->addTaggedValue(i->first,tmp_2->getDefaultValue());
2651 // DataArrayView view_0 = tmp_0->getDataPointByTag(i->first);
2652 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2653 // double *ptr_0 = &view_0.getData(0);
2654 // double *ptr_2 = &view_2.getData(0);
2655
2656 tmp_2->addTag(i->first);
2657 double *ptr_0 = &(tmp_0->getDataByTag(i->first,0));
2658 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2659 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2660 }
2661
2662 }
2663 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2664
2665 // Borrow DataTagged input from Data object
2666 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2667 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2668
2669 // Borrow DataTagged input from Data object
2670 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2671 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2672
2673 // Prepare a DataTagged output 2
2674 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace());
2675 res.tag(); // DataTagged output
2676 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2677 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2678
2679 // // Get the views
2680 // DataArrayView view_0 = tmp_0->getDefaultValue();
2681 // DataArrayView view_1 = tmp_1->getDefaultValue();
2682 // DataArrayView view_2 = tmp_2->getDefaultValue();
2683 // // Get the pointers to the actual data
2684 // double *ptr_0 = &((view_0.getData())[0]);
2685 // double *ptr_1 = &((view_1.getData())[0]);
2686 // double *ptr_2 = &((view_2.getData())[0]);
2687
2688 double *ptr_0 = &(tmp_0->getDefaultValue(0));
2689 double *ptr_1 = &(tmp_1->getDefaultValue(0));
2690 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2691
2692
2693 // Compute an MVP for the default
2694 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2695 // Merge the tags
2696 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2697 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2698 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2699 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2700 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2701 }
2702 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2703 tmp_2->addTag(i->first);
2704 }
2705 // Compute an MVP for each tag
2706 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2707 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2708 // DataArrayView view_0 = tmp_0->getDataPointByTag(i->first);
2709 // DataArrayView view_1 = tmp_1->getDataPointByTag(i->first);
2710 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2711 // double *ptr_0 = &view_0.getData(0);
2712 // double *ptr_1 = &view_1.getData(0);
2713 // double *ptr_2 = &view_2.getData(0);
2714
2715 double *ptr_0 = &(tmp_0->getDataByTag(i->first,0));
2716 double *ptr_1 = &(tmp_1->getDataByTag(i->first,0));
2717 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2718
2719 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2720 }
2721
2722 }
2723 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2724
2725 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2726 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2727 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2728 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2729 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2730 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2731 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2732 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2733 int sampleNo_0,dataPointNo_0;
2734 int numSamples_0 = arg_0_Z.getNumSamples();
2735 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2736 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2737 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2738 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2739 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2740 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2741 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2742 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2743 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2744 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2745 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2746 }
2747 }
2748
2749 }
2750 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2751
2752 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2753 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2754 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2755 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2756 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2757 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2758 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2759 int sampleNo_0,dataPointNo_0;
2760 int numSamples_0 = arg_0_Z.getNumSamples();
2761 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2762 int offset_1 = tmp_1->getPointOffset(0,0);
2763 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2764 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2765 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2766 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2767 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2768 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2769 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2770 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2771 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2772 }
2773 }
2774
2775
2776 }
2777 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2778
2779 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2780 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2781 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2782 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2783 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2784 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2785 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2786 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2787 int sampleNo_0,dataPointNo_0;
2788 int numSamples_0 = arg_0_Z.getNumSamples();
2789 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2790 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2791 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2792 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2793 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2794 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2795 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2796 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2797 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2798 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2799 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2800 }
2801 }
2802
2803 }
2804 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2805
2806 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2807 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2808 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2809 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2810 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2811 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2812 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2813 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2814 int sampleNo_0,dataPointNo_0;
2815 int numSamples_0 = arg_0_Z.getNumSamples();
2816 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2817 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2818 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2819 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2820 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2821 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2822 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2823 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2824 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2825 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2826 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2827 }
2828 }
2829
2830 }
2831 else {
2832 throw DataException("Error - C_GeneralTensorProduct: unknown combination of inputs");
2833 }
2834
2835 return res;
2836 }
2837
2838 DataAbstract*
2839 Data::borrowData() const
2840 {
2841 return m_data.get();
2842 }
2843
2844 // Not all that happy about returning a non-const from a const
2845 DataAbstract_ptr
2846 Data::borrowDataPtr() const
2847 {
2848 return m_data;
2849 }
2850
2851 // Not all that happy about returning a non-const from a const
2852 DataReady_ptr
2853 Data::borrowReadyPtr() const
2854 {
2855 DataReady_ptr dr=dynamic_pointer_cast<DataReady>(m_data);
2856 EsysAssert((dr!=0), "Error - casting to DataReady.");
2857 return dr;
2858 }
2859
2860 std::string
2861 Data::toString() const
2862 {
2863 if (!m_data->isEmpty() &&
2864 !m_data->isLazy() &&
2865 getLength()>escriptParams.getInt("TOO_MANY_LINES"))
2866 {
2867 stringstream temp;
2868 temp << "Summary: inf="<< inf_const() << " sup=" << sup_const() << " data points=" << getNumDataPoints();
2869 return temp.str();
2870 }
2871 return m_data->toString();
2872 }
2873
2874
2875
2876 DataTypes::ValueType::const_reference
2877 Data::getDataAtOffset(DataTypes::ValueType::size_type i) const
2878 {
2879 if (isLazy())
2880 {
2881 throw DataException("Programmer error - getDataAtOffset not permitted on lazy data (object is const which prevents resolving).");
2882 }
2883 return getReady()->getDataAtOffset(i);
2884 }
2885
2886
2887 DataTypes::ValueType::reference
2888 Data::getDataAtOffset(DataTypes::ValueType::size_type i)
2889 {
2890 // if (isLazy())
2891 // {
2892 // throw DataException("getDataAtOffset not permitted on lazy data.");
2893 // }
2894 FORCERESOLVE;
2895 return getReady()->getDataAtOffset(i);
2896 }
2897
2898 DataTypes::ValueType::const_reference
2899 Data::getDataPoint(int sampleNo, int dataPointNo) const
2900 {
2901 if (!isReady())
2902 {
2903 throw DataException("Programmer error - getDataPoint() not permitted on Lazy Data (object is const which prevents resolving).");
2904 }
2905 else
2906 {
2907 const DataReady* dr=getReady();
2908 return dr->getDataAtOffset(dr->getPointOffset(sampleNo, dataPointNo));
2909 }
2910 }
2911
2912
2913 DataTypes::ValueType::reference
2914 Data::getDataPoint(int sampleNo, int dataPointNo)
2915 {
2916 FORCERESOLVE;
2917 if (!isReady())
2918 {
2919 throw DataException("Programmer error - getDataPoint() not permitted on Lazy Data.");
2920 }
2921 else
2922 {
2923 DataReady* dr=getReady();
2924 return dr->getDataAtOffset(dr->getPointOffset(sampleNo, dataPointNo));
2925 }
2926 }
2927
2928
2929 /* Member functions specific to the MPI implementation */
2930
2931 void
2932 Data::print()
2933 {
2934 int i,j;
2935
2936 printf( "Data is %dX%d\n", getNumSamples(), getNumDataPointsPerSample() );
2937 for( i=0; i<getNumSamples(); i++ )
2938 {
2939 printf( "[%6d]", i );
2940 for( j=0; j<getNumDataPointsPerSample(); j++ )
2941 printf( "\t%10.7g", (getSampleData(i))[j] );
2942 printf( "\n" );
2943 }
2944 }
2945 void
2946 Data::dump(const std::string fileName) const
2947 {
2948 try
2949 {
2950 if (isLazy())
2951 {
2952 Data temp(*this); // this is to get a non-const object which we can resolve
2953 temp.resolve();
2954 temp.dump(fileName);
2955 }
2956 else
2957 {
2958 return m_data->dump(fileName);
2959 }
2960 }
2961 catch (std::exception& e)
2962 {
2963 cout << e.what() << endl;
2964 }
2965 }
2966
2967 int
2968 Data::get_MPISize() const
2969 {
2970 int size;
2971 #ifdef PASO_MPI
2972 int error;
2973 error = MPI_Comm_size( get_MPIComm(), &size );
2974 #else
2975 size = 1;
2976 #endif
2977 return size;
2978 }
2979
2980 int
2981 Data::get_MPIRank() const
2982 {
2983 int rank;
2984 #ifdef PASO_MPI
2985 int error;
2986 error = MPI_Comm_rank( get_MPIComm(), &rank );
2987 #else
2988 rank = 0;
2989 #endif
2990 return rank;
2991 }
2992
2993 MPI_Comm
2994 Data::get_MPIComm() const
2995 {
2996 #ifdef PASO_MPI
2997 return MPI_COMM_WORLD;
2998 #else
2999 return -1;
3000 #endif
3001 }
3002
3003

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26