/[escript]/trunk/escript/src/Data.cpp
ViewVC logotype

Contents of /trunk/escript/src/Data.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2085 - (show annotations)
Mon Nov 24 00:45:48 2008 UTC (11 years, 4 months ago) by jfenwick
File size: 85934 byte(s)
Added c++ unit tests for new operations.
Added resolve to some operations in Data

1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2008 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 #include "Data.h"
16
17 #include "DataExpanded.h"
18 #include "DataConstant.h"
19 #include "DataTagged.h"
20 #include "DataEmpty.h"
21 #include "DataLazy.h"
22 #include "FunctionSpaceFactory.h"
23 #include "AbstractContinuousDomain.h"
24 #include "UnaryFuncs.h"
25 #include "FunctionSpaceException.h"
26 #include "EscriptParams.h"
27
28 extern "C" {
29 #include "esysUtils/blocktimer.h"
30 }
31
32 #include <fstream>
33 #include <algorithm>
34 #include <vector>
35 #include <functional>
36
37 #include <boost/python/dict.hpp>
38 #include <boost/python/extract.hpp>
39 #include <boost/python/long.hpp>
40
41 using namespace std;
42 using namespace boost::python;
43 using namespace boost;
44 using namespace escript;
45
46 // ensure the current object is not a DataLazy
47 // The idea was that we could add an optional warning whenever a resolve is forced
48 #define FORCERESOLVE if (isLazy()) {resolve();}
49
50 Data::Data()
51 {
52 //
53 // Default data is type DataEmpty
54 DataAbstract* temp=new DataEmpty();
55 m_data=temp->getPtr();
56 m_protected=false;
57 }
58
59 Data::Data(double value,
60 const tuple& shape,
61 const FunctionSpace& what,
62 bool expanded)
63 {
64 DataTypes::ShapeType dataPointShape;
65 for (int i = 0; i < shape.attr("__len__")(); ++i) {
66 dataPointShape.push_back(extract<const int>(shape[i]));
67 }
68
69 int len = DataTypes::noValues(dataPointShape);
70 DataVector temp_data(len,value,len);
71 initialise(temp_data, dataPointShape, what, expanded);
72 m_protected=false;
73 }
74
75 Data::Data(double value,
76 const DataTypes::ShapeType& dataPointShape,
77 const FunctionSpace& what,
78 bool expanded)
79 {
80 int len = DataTypes::noValues(dataPointShape);
81
82 DataVector temp_data(len,value,len);
83 // DataArrayView temp_dataView(temp_data, dataPointShape);
84
85 // initialise(temp_dataView, what, expanded);
86 initialise(temp_data, dataPointShape, what, expanded);
87
88 m_protected=false;
89 }
90
91 Data::Data(const Data& inData)
92 {
93 m_data=inData.m_data;
94 m_protected=inData.isProtected();
95 }
96
97
98 Data::Data(const Data& inData,
99 const DataTypes::RegionType& region)
100 {
101 DataAbstract_ptr dat=inData.m_data;
102 if (inData.isLazy())
103 {
104 dat=inData.m_data->resolve();
105 }
106 else
107 {
108 dat=inData.m_data;
109 }
110 //
111 // Create Data which is a slice of another Data
112 DataAbstract* tmp = dat->getSlice(region);
113 m_data=DataAbstract_ptr(tmp);
114 m_protected=false;
115 }
116
117 Data::Data(const Data& inData,
118 const FunctionSpace& functionspace)
119 {
120 if (inData.isEmpty())
121 {
122 throw DataException("Error - will not interpolate for instances of DataEmpty.");
123 }
124 if (inData.getFunctionSpace()==functionspace) {
125 m_data=inData.m_data;
126 }
127 else
128 {
129
130 if (inData.isConstant()) { // for a constant function, we just need to use the new function space
131 if (!inData.probeInterpolation(functionspace))
132 { // Even though this is constant, we still need to check whether interpolation is allowed
133 throw FunctionSpaceException("Call to probeInterpolation returned false for DataConstant.");
134 }
135 // if the data is not lazy, this will just be a cast to DataReady
136 DataReady_ptr dr=inData.m_data->resolve();
137 DataConstant* dc=new DataConstant(functionspace,inData.m_data->getShape(),dr->getVector());
138 m_data=DataAbstract_ptr(dc);
139 } else {
140 Data tmp(0,inData.getDataPointShape(),functionspace,true);
141 // Note: Must use a reference or pointer to a derived object
142 // in order to get polymorphic behaviour. Shouldn't really
143 // be able to create an instance of AbstractDomain but that was done
144 // as a boost:python work around which may no longer be required.
145 /*const AbstractDomain& inDataDomain=inData.getDomain();*/
146 const_Domain_ptr inDataDomain=inData.getDomain();
147 if (inDataDomain==functionspace.getDomain()) {
148 inDataDomain->interpolateOnDomain(tmp,inData);
149 } else {
150 inDataDomain->interpolateACross(tmp,inData);
151 }
152 m_data=tmp.m_data;
153 }
154 }
155 m_protected=false;
156 }
157
158 Data::Data(DataAbstract* underlyingdata)
159 {
160 // m_data=shared_ptr<DataAbstract>(underlyingdata);
161 m_data=underlyingdata->getPtr();
162 m_protected=false;
163 }
164
165 Data::Data(DataAbstract_ptr underlyingdata)
166 {
167 m_data=underlyingdata;
168 m_protected=false;
169 }
170
171
172 Data::Data(const numeric::array& value,
173 const FunctionSpace& what,
174 bool expanded)
175 {
176 initialise(value,what,expanded);
177 m_protected=false;
178 }
179 /*
180 Data::Data(const DataArrayView& value,
181 const FunctionSpace& what,
182 bool expanded)
183 {
184 initialise(value,what,expanded);
185 m_protected=false;
186 }*/
187
188 Data::Data(const DataTypes::ValueType& value,
189 const DataTypes::ShapeType& shape,
190 const FunctionSpace& what,
191 bool expanded)
192 {
193 initialise(value,shape,what,expanded);
194 m_protected=false;
195 }
196
197
198 Data::Data(const object& value,
199 const FunctionSpace& what,
200 bool expanded)
201 {
202 numeric::array asNumArray(value);
203 initialise(asNumArray,what,expanded);
204 m_protected=false;
205 }
206
207
208 Data::Data(const object& value,
209 const Data& other)
210 {
211 numeric::array asNumArray(value);
212
213 // extract the shape of the numarray
214 DataTypes::ShapeType tempShape=DataTypes::shapeFromNumArray(asNumArray);
215 // /* for (int i=0; i < asNumArray.getrank(); i++) {
216 // tempShape.push_back(extract<int>(asNumArray.getshape()[i]));
217 // }*/
218 // // get the space for the data vector
219 // int len = DataTypes::noValues(tempShape);
220 // DataVector temp_data(len, 0.0, len);
221 // /* DataArrayView temp_dataView(temp_data, tempShape);
222 // temp_dataView.copy(asNumArray);*/
223 // temp_data.copyFromNumArray(asNumArray);
224
225 //
226 // Create DataConstant using the given value and all other parameters
227 // copied from other. If value is a rank 0 object this Data
228 // will assume the point data shape of other.
229
230 if (DataTypes::getRank(tempShape)/*temp_dataView.getRank()*/==0) {
231
232
233 // get the space for the data vector
234 int len1 = DataTypes::noValues(tempShape);
235 DataVector temp_data(len1, 0.0, len1);
236 temp_data.copyFromNumArray(asNumArray);
237
238 int len = DataTypes::noValues(other.getDataPointShape());
239
240 DataVector temp2_data(len, temp_data[0]/*temp_dataView()*/, len);
241 //DataArrayView temp2_dataView(temp2_data, other.getPointDataView().getShape());
242 // initialise(temp2_dataView, other.getFunctionSpace(), false);
243
244 DataConstant* t=new DataConstant(other.getFunctionSpace(),other.getDataPointShape(),temp2_data);
245 // boost::shared_ptr<DataAbstract> sp(t);
246 // m_data=sp;
247 m_data=DataAbstract_ptr(t);
248
249 } else {
250 //
251 // Create a DataConstant with the same sample shape as other
252 // initialise(temp_dataView, other.getFunctionSpace(), false);
253 DataConstant* t=new DataConstant(asNumArray,other.getFunctionSpace());
254 // boost::shared_ptr<DataAbstract> sp(t);
255 // m_data=sp;
256 m_data=DataAbstract_ptr(t);
257 }
258 m_protected=false;
259 }
260
261 Data::~Data()
262 {
263
264 }
265
266
267
268 void
269 Data::initialise(const boost::python::numeric::array& value,
270 const FunctionSpace& what,
271 bool expanded)
272 {
273 //
274 // Construct a Data object of the appropriate type.
275 // Construct the object first as there seems to be a bug which causes
276 // undefined behaviour if an exception is thrown during construction
277 // within the shared_ptr constructor.
278 if (expanded) {
279 DataAbstract* temp=new DataExpanded(value, what);
280 // boost::shared_ptr<DataAbstract> temp_data(temp);
281 // m_data=temp_data;
282 m_data=temp->getPtr();
283 } else {
284 DataAbstract* temp=new DataConstant(value, what);
285 // boost::shared_ptr<DataAbstract> temp_data(temp);
286 // m_data=temp_data;
287 m_data=temp->getPtr();
288 }
289 }
290
291
292 void
293 Data::initialise(const DataTypes::ValueType& value,
294 const DataTypes::ShapeType& shape,
295 const FunctionSpace& what,
296 bool expanded)
297 {
298 //
299 // Construct a Data object of the appropriate type.
300 // Construct the object first as there seems to be a bug which causes
301 // undefined behaviour if an exception is thrown during construction
302 // within the shared_ptr constructor.
303 if (expanded) {
304 DataAbstract* temp=new DataExpanded(what, shape, value);
305 // boost::shared_ptr<DataAbstract> temp_data(temp);
306 // m_data=temp_data;
307 m_data=temp->getPtr();
308 } else {
309 DataAbstract* temp=new DataConstant(what, shape, value);
310 // boost::shared_ptr<DataAbstract> temp_data(temp);
311 // m_data=temp_data;
312 m_data=temp->getPtr();
313 }
314 }
315
316
317 // void
318 // Data::CompareDebug(const Data& rd)
319 // {
320 // using namespace std;
321 // bool mismatch=false;
322 // std::cout << "Comparing left and right" << endl;
323 // const DataTagged* left=dynamic_cast<DataTagged*>(m_data.get());
324 // const DataTagged* right=dynamic_cast<DataTagged*>(rd.m_data.get());
325 //
326 // if (left==0)
327 // {
328 // cout << "left arg is not a DataTagged\n";
329 // return;
330 // }
331 //
332 // if (right==0)
333 // {
334 // cout << "right arg is not a DataTagged\n";
335 // return;
336 // }
337 // cout << "Num elements=" << left->getVector().size() << ":" << right->getVector().size() << std::endl;
338 // cout << "Shapes ";
339 // if (left->getShape()==right->getShape())
340 // {
341 // cout << "ok\n";
342 // }
343 // else
344 // {
345 // cout << "Problem: shapes do not match\n";
346 // mismatch=true;
347 // }
348 // int lim=left->getVector().size();
349 // if (right->getVector().size()) lim=right->getVector().size();
350 // for (int i=0;i<lim;++i)
351 // {
352 // if (left->getVector()[i]!=right->getVector()[i])
353 // {
354 // cout << "[" << i << "] value mismatch " << left->getVector()[i] << ":" << right->getVector()[i] << endl;
355 // mismatch=true;
356 // }
357 // }
358 //
359 // // still need to check the tag map
360 // // also need to watch what is happening to function spaces, are they copied or what?
361 //
362 // const DataTagged::DataMapType& mapleft=left->getTagLookup();
363 // const DataTagged::DataMapType& mapright=right->getTagLookup();
364 //
365 // if (mapleft.size()!=mapright.size())
366 // {
367 // cout << "Maps are different sizes " << mapleft.size() << ":" << mapright.size() << endl;
368 // mismatch=true;
369 // cout << "Left map\n";
370 // DataTagged::DataMapType::const_iterator i,j;
371 // for (i=mapleft.begin();i!=mapleft.end();++i) {
372 // cout << "(" << i->first << "=>" << i->second << ")\n";
373 // }
374 // cout << "Right map\n";
375 // for (i=mapright.begin();i!=mapright.end();++i) {
376 // cout << "(" << i->first << "=>" << i->second << ")\n";
377 // }
378 // cout << "End map\n";
379 //
380 // }
381 //
382 // DataTagged::DataMapType::const_iterator i,j;
383 // for (i=mapleft.begin(),j=mapright.begin();i!=mapleft.end() && j!=mapright.end();++i,++j) {
384 // if ((i->first!=j->first) || (i->second!=j->second))
385 // {
386 // cout << "(" << i->first << "=>" << i->second << ")";
387 // cout << ":(" << j->first << "=>" << j->second << ") ";
388 // mismatch=true;
389 // }
390 // }
391 // if (mismatch)
392 // {
393 // cout << "#Mismatch\n";
394 // }
395 // }
396
397 escriptDataC
398 Data::getDataC()
399 {
400 escriptDataC temp;
401 temp.m_dataPtr=(void*)this;
402 return temp;
403 }
404
405 escriptDataC
406 Data::getDataC() const
407 {
408 escriptDataC temp;
409 temp.m_dataPtr=(void*)this;
410 return temp;
411 }
412
413 const boost::python::tuple
414 Data::getShapeTuple() const
415 {
416 const DataTypes::ShapeType& shape=getDataPointShape();
417 switch(getDataPointRank()) {
418 case 0:
419 return make_tuple();
420 case 1:
421 return make_tuple(long_(shape[0]));
422 case 2:
423 return make_tuple(long_(shape[0]),long_(shape[1]));
424 case 3:
425 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]));
426 case 4:
427 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]),long_(shape[3]));
428 default:
429 throw DataException("Error - illegal Data rank.");
430 }
431 }
432
433
434 // The different name is needed because boost has trouble with overloaded functions.
435 // It can't work out what type the function is based soley on its name.
436 // There are ways to fix this involving creating function pointer variables for each form
437 // but there doesn't seem to be a need given that the methods have the same name from the python point of view
438 Data*
439 Data::copySelf()
440 {
441 DataAbstract* temp=m_data->deepCopy();
442 return new Data(temp);
443 }
444
445 void
446 Data::copy(const Data& other)
447 {
448 DataAbstract* temp=other.m_data->deepCopy();
449 DataAbstract_ptr p=temp->getPtr();
450 m_data=p;
451 }
452
453
454 Data
455 Data::delay()
456 {
457 DataLazy* dl=new DataLazy(m_data);
458 return Data(dl);
459 }
460
461 void
462 Data::delaySelf()
463 {
464 if (!isLazy())
465 {
466 m_data=(new DataLazy(m_data))->getPtr();
467 }
468 }
469
470 void
471 Data::setToZero()
472 {
473 if (isEmpty())
474 {
475 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
476 }
477 m_data->setToZero();
478 }
479
480 void
481 Data::copyWithMask(const Data& other,
482 const Data& mask)
483 {
484 // 1. Interpolate if required so all Datas use the same FS as this
485 // 2. Tag or Expand so that all Data's are the same type
486 // 3. Iterate over the data vectors copying values where mask is >0
487 if (other.isEmpty() || mask.isEmpty())
488 {
489 throw DataException("Error - copyWithMask not permitted using instances of DataEmpty.");
490 }
491 Data other2(other);
492 Data mask2(mask);
493 other2.resolve();
494 mask2.resolve();
495 this->resolve();
496 FunctionSpace myFS=getFunctionSpace();
497 FunctionSpace oFS=other2.getFunctionSpace();
498 FunctionSpace mFS=mask2.getFunctionSpace();
499 if (oFS!=myFS)
500 {
501 if (other2.probeInterpolation(myFS))
502 {
503 other2=other2.interpolate(myFS);
504 }
505 else
506 {
507 throw DataException("Error - copyWithMask: other FunctionSpace is not compatible with this one.");
508 }
509 }
510 if (mFS!=myFS)
511 {
512 if (mask2.probeInterpolation(myFS))
513 {
514 mask2=mask2.interpolate(myFS);
515 }
516 else
517 {
518 throw DataException("Error - copyWithMask: mask FunctionSpace is not compatible with this one.");
519 }
520 }
521 // Ensure that all args have the same type
522 if (this->isExpanded() || mask2.isExpanded() || other2.isExpanded())
523 {
524 this->expand();
525 other2.expand();
526 mask2.expand();
527 }
528 else if (this->isTagged() || mask2.isTagged() || other2.isTagged())
529 {
530 this->tag();
531 other2.tag();
532 mask2.tag();
533 }
534 else if (this->isConstant() && mask2.isConstant() && other2.isConstant())
535 {
536 }
537 else
538 {
539 throw DataException("Error - Unknown DataAbstract passed to copyWithMask.");
540 }
541 // Now we iterate over the elements
542 DataVector& self=getReadyPtr()->getVector();
543 const DataVector& ovec=other2.getReadyPtr()->getVector();
544 const DataVector& mvec=mask2.getReadyPtr()->getVector();
545 if ((self.size()!=ovec.size()) || (self.size()!=mvec.size()))
546 {
547 throw DataException("Error - size mismatch in arguments to copyWithMask.");
548 }
549 size_t num_points=self.size();
550
551 // OPENMP 3.0 allows unsigned loop vars.
552 #if defined(_OPENMP) && (_OPENMP < 200805)
553 long i;
554 #else
555 size_t i;
556 #endif
557 #pragma omp parallel for private(i) schedule(static)
558 for (i=0;i<num_points;++i)
559 {
560 if (mvec[i]>0)
561 {
562 self[i]=ovec[i];
563 }
564 }
565 }
566
567
568
569 bool
570 Data::isExpanded() const
571 {
572 DataExpanded* temp=dynamic_cast<DataExpanded*>(m_data.get());
573 return (temp!=0);
574 }
575
576 bool
577 Data::isTagged() const
578 {
579 DataTagged* temp=dynamic_cast<DataTagged*>(m_data.get());
580 return (temp!=0);
581 }
582
583 bool
584 Data::isEmpty() const
585 {
586 DataEmpty* temp=dynamic_cast<DataEmpty*>(m_data.get());
587 return (temp!=0);
588 }
589
590 bool
591 Data::isConstant() const
592 {
593 DataConstant* temp=dynamic_cast<DataConstant*>(m_data.get());
594 return (temp!=0);
595 }
596
597 bool
598 Data::isLazy() const
599 {
600 return m_data->isLazy();
601 }
602
603 // at the moment this is synonymous with !isLazy() but that could change
604 bool
605 Data::isReady() const
606 {
607 return (dynamic_cast<DataReady*>(m_data.get())!=0);
608 }
609
610
611 void
612 Data::setProtection()
613 {
614 m_protected=true;
615 }
616
617 bool
618 Data::isProtected() const
619 {
620 return m_protected;
621 }
622
623
624
625 void
626 Data::expand()
627 {
628 if (isConstant()) {
629 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
630 DataAbstract* temp=new DataExpanded(*tempDataConst);
631 // shared_ptr<DataAbstract> temp_data(temp);
632 // m_data=temp_data;
633 m_data=temp->getPtr();
634 } else if (isTagged()) {
635 DataTagged* tempDataTag=dynamic_cast<DataTagged*>(m_data.get());
636 DataAbstract* temp=new DataExpanded(*tempDataTag);
637 // shared_ptr<DataAbstract> temp_data(temp);
638 // m_data=temp_data;
639 m_data=temp->getPtr();
640 } else if (isExpanded()) {
641 //
642 // do nothing
643 } else if (isEmpty()) {
644 throw DataException("Error - Expansion of DataEmpty not possible.");
645 } else if (isLazy()) {
646 resolve();
647 expand(); // resolve might not give us expanded data
648 } else {
649 throw DataException("Error - Expansion not implemented for this Data type.");
650 }
651 }
652
653 void
654 Data::tag()
655 {
656 if (isConstant()) {
657 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
658 DataAbstract* temp=new DataTagged(*tempDataConst);
659 // shared_ptr<DataAbstract> temp_data(temp);
660 // m_data=temp_data;
661 m_data=temp->getPtr();
662 } else if (isTagged()) {
663 // do nothing
664 } else if (isExpanded()) {
665 throw DataException("Error - Creating tag data from DataExpanded not possible.");
666 } else if (isEmpty()) {
667 throw DataException("Error - Creating tag data from DataEmpty not possible.");
668 } else if (isLazy()) {
669 DataAbstract_ptr res=m_data->resolve();
670 if (m_data->isExpanded())
671 {
672 throw DataException("Error - data would resolve to DataExpanded, tagging is not possible.");
673 }
674 m_data=res;
675 tag();
676 } else {
677 throw DataException("Error - Tagging not implemented for this Data type.");
678 }
679 }
680
681 void
682 Data::resolve()
683 {
684 if (isLazy())
685 {
686 m_data=m_data->resolve();
687 }
688 }
689
690
691 Data
692 Data::oneOver() const
693 {
694 if (isLazy())
695 {
696 DataLazy* c=new DataLazy(borrowDataPtr(),RECIP);
697 return Data(c);
698 }
699 return C_TensorUnaryOperation(*this, bind1st(divides<double>(),1.));
700 }
701
702 Data
703 Data::wherePositive() const
704 {
705 if (isLazy())
706 {
707 DataLazy* c=new DataLazy(borrowDataPtr(),GZ);
708 return Data(c);
709 }
710 return C_TensorUnaryOperation(*this, bind2nd(greater<double>(),0.0));
711 }
712
713 Data
714 Data::whereNegative() const
715 {
716 if (isLazy())
717 {
718 DataLazy* c=new DataLazy(borrowDataPtr(),LZ);
719 return Data(c);
720 }
721 return C_TensorUnaryOperation(*this, bind2nd(less<double>(),0.0));
722 }
723
724 Data
725 Data::whereNonNegative() const
726 {
727 if (isLazy())
728 {
729 DataLazy* c=new DataLazy(borrowDataPtr(),GEZ);
730 return Data(c);
731 }
732 return C_TensorUnaryOperation(*this, bind2nd(greater_equal<double>(),0.0));
733 }
734
735 Data
736 Data::whereNonPositive() const
737 {
738 if (isLazy())
739 {
740 DataLazy* c=new DataLazy(borrowDataPtr(),LEZ);
741 return Data(c);
742 }
743 return C_TensorUnaryOperation(*this, bind2nd(less_equal<double>(),0.0));
744 }
745
746 Data
747 Data::whereZero(double tol) const
748 {
749 Data dataAbs=abs();
750 return C_TensorUnaryOperation(dataAbs, bind2nd(less_equal<double>(),tol));
751 }
752
753 Data
754 Data::whereNonZero(double tol) const
755 {
756 Data dataAbs=abs();
757 return C_TensorUnaryOperation(dataAbs, bind2nd(greater<double>(),tol));
758 }
759
760 Data
761 Data::interpolate(const FunctionSpace& functionspace) const
762 {
763 return Data(*this,functionspace);
764 }
765
766 bool
767 Data::probeInterpolation(const FunctionSpace& functionspace) const
768 {
769 return getFunctionSpace().probeInterpolation(functionspace);
770 // if (getFunctionSpace()==functionspace) {
771 // return true;
772 // } else {
773 // const_Domain_ptr domain=getDomain();
774 // if (*domain==*functionspace.getDomain()) {
775 // return domain->probeInterpolationOnDomain(getFunctionSpace().getTypeCode(),functionspace.getTypeCode());
776 // } else {
777 // return domain->probeInterpolationACross(getFunctionSpace().getTypeCode(),*(functionspace.getDomain()),functionspace.getTypeCode());
778 // }
779 // }
780 }
781
782 Data
783 Data::gradOn(const FunctionSpace& functionspace) const
784 {
785 if (isEmpty())
786 {
787 throw DataException("Error - operation not permitted on instances of DataEmpty.");
788 }
789 double blocktimer_start = blocktimer_time();
790 if (functionspace.getDomain()!=getDomain())
791 throw DataException("Error - gradient cannot be calculated on different domains.");
792 DataTypes::ShapeType grad_shape=getDataPointShape();
793 grad_shape.push_back(functionspace.getDim());
794 Data out(0.0,grad_shape,functionspace,true);
795 getDomain()->setToGradient(out,*this);
796 blocktimer_increment("grad()", blocktimer_start);
797 return out;
798 }
799
800 Data
801 Data::grad() const
802 {
803 if (isEmpty())
804 {
805 throw DataException("Error - operation not permitted on instances of DataEmpty.");
806 }
807 return gradOn(escript::function(*getDomain()));
808 }
809
810 int
811 Data::getDataPointSize() const
812 {
813 return m_data->getNoValues();
814 }
815
816 DataTypes::ValueType::size_type
817 Data::getLength() const
818 {
819 return m_data->getLength();
820 }
821
822 const
823 boost::python::numeric::array
824 Data:: getValueOfDataPoint(int dataPointNo)
825 {
826 int i, j, k, l;
827
828 FORCERESOLVE;
829
830 //
831 // determine the rank and shape of each data point
832 int dataPointRank = getDataPointRank();
833 const DataTypes::ShapeType& dataPointShape = getDataPointShape();
834
835 //
836 // create the numeric array to be returned
837 boost::python::numeric::array numArray(0.0);
838
839 //
840 // the shape of the returned numeric array will be the same
841 // as that of the data point
842 int arrayRank = dataPointRank;
843 const DataTypes::ShapeType& arrayShape = dataPointShape;
844
845 //
846 // resize the numeric array to the shape just calculated
847 if (arrayRank==0) {
848 numArray.resize(1);
849 }
850 if (arrayRank==1) {
851 numArray.resize(arrayShape[0]);
852 }
853 if (arrayRank==2) {
854 numArray.resize(arrayShape[0],arrayShape[1]);
855 }
856 if (arrayRank==3) {
857 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2]);
858 }
859 if (arrayRank==4) {
860 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2],arrayShape[3]);
861 }
862
863 if (getNumDataPointsPerSample()>0) {
864 int sampleNo = dataPointNo/getNumDataPointsPerSample();
865 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
866 //
867 // Check a valid sample number has been supplied
868 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
869 throw DataException("Error - Data::convertToNumArray: invalid sampleNo.");
870 }
871
872 //
873 // Check a valid data point number has been supplied
874 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
875 throw DataException("Error - Data::convertToNumArray: invalid dataPointNoInSample.");
876 }
877 // TODO: global error handling
878 // create a view of the data if it is stored locally
879 // DataArrayView dataPointView = getDataPoint(sampleNo, dataPointNoInSample);
880 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
881
882
883 switch( dataPointRank ){
884 case 0 :
885 numArray[0] = getDataAtOffset(offset);
886 break;
887 case 1 :
888 for( i=0; i<dataPointShape[0]; i++ )
889 numArray[i]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i));
890 break;
891 case 2 :
892 for( i=0; i<dataPointShape[0]; i++ )
893 for( j=0; j<dataPointShape[1]; j++)
894 numArray[make_tuple(i,j)]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j));
895 break;
896 case 3 :
897 for( i=0; i<dataPointShape[0]; i++ )
898 for( j=0; j<dataPointShape[1]; j++ )
899 for( k=0; k<dataPointShape[2]; k++)
900 numArray[make_tuple(i,j,k)]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k));
901 break;
902 case 4 :
903 for( i=0; i<dataPointShape[0]; i++ )
904 for( j=0; j<dataPointShape[1]; j++ )
905 for( k=0; k<dataPointShape[2]; k++ )
906 for( l=0; l<dataPointShape[3]; l++)
907 numArray[make_tuple(i,j,k,l)]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k,l));
908 break;
909 }
910 }
911 //
912 // return the array
913 return numArray;
914
915 }
916
917 void
918 Data::setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object)
919 {
920 // this will throw if the value cannot be represented
921 boost::python::numeric::array num_array(py_object);
922 setValueOfDataPointToArray(dataPointNo,num_array);
923 }
924
925 void
926 Data::setValueOfDataPointToArray(int dataPointNo, const boost::python::numeric::array& num_array)
927 {
928 if (isProtected()) {
929 throw DataException("Error - attempt to update protected Data object.");
930 }
931 FORCERESOLVE;
932 //
933 // check rank
934 if (static_cast<unsigned int>(num_array.getrank())<getDataPointRank())
935 throw DataException("Rank of numarray does not match Data object rank");
936
937 //
938 // check shape of num_array
939 for (unsigned int i=0; i<getDataPointRank(); i++) {
940 if (extract<int>(num_array.getshape()[i])!=getDataPointShape()[i])
941 throw DataException("Shape of numarray does not match Data object rank");
942 }
943 //
944 // make sure data is expanded:
945 //
946 if (!isExpanded()) {
947 expand();
948 }
949 if (getNumDataPointsPerSample()>0) {
950 int sampleNo = dataPointNo/getNumDataPointsPerSample();
951 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
952 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,num_array);
953 } else {
954 m_data->copyToDataPoint(-1, 0,num_array);
955 }
956 }
957
958 void
959 Data::setValueOfDataPoint(int dataPointNo, const double value)
960 {
961 if (isProtected()) {
962 throw DataException("Error - attempt to update protected Data object.");
963 }
964 //
965 // make sure data is expanded:
966 FORCERESOLVE;
967 if (!isExpanded()) {
968 expand();
969 }
970 if (getNumDataPointsPerSample()>0) {
971 int sampleNo = dataPointNo/getNumDataPointsPerSample();
972 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
973 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,value);
974 } else {
975 m_data->copyToDataPoint(-1, 0,value);
976 }
977 }
978
979 const
980 boost::python::numeric::array
981 Data::getValueOfGlobalDataPoint(int procNo, int dataPointNo)
982 {
983 size_t length=0;
984 int i, j, k, l, pos;
985 FORCERESOLVE;
986 //
987 // determine the rank and shape of each data point
988 int dataPointRank = getDataPointRank();
989 const DataTypes::ShapeType& dataPointShape = getDataPointShape();
990
991 //
992 // create the numeric array to be returned
993 boost::python::numeric::array numArray(0.0);
994
995 //
996 // the shape of the returned numeric array will be the same
997 // as that of the data point
998 int arrayRank = dataPointRank;
999 const DataTypes::ShapeType& arrayShape = dataPointShape;
1000
1001 //
1002 // resize the numeric array to the shape just calculated
1003 if (arrayRank==0) {
1004 numArray.resize(1);
1005 }
1006 if (arrayRank==1) {
1007 numArray.resize(arrayShape[0]);
1008 }
1009 if (arrayRank==2) {
1010 numArray.resize(arrayShape[0],arrayShape[1]);
1011 }
1012 if (arrayRank==3) {
1013 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2]);
1014 }
1015 if (arrayRank==4) {
1016 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2],arrayShape[3]);
1017 }
1018
1019 // added for the MPI communication
1020 length=1;
1021 for( i=0; i<arrayRank; i++ ) length *= arrayShape[i];
1022 double *tmpData = new double[length];
1023
1024 //
1025 // load the values for the data point into the numeric array.
1026
1027 // updated for the MPI case
1028 if( get_MPIRank()==procNo ){
1029 if (getNumDataPointsPerSample()>0) {
1030 int sampleNo = dataPointNo/getNumDataPointsPerSample();
1031 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1032 //
1033 // Check a valid sample number has been supplied
1034 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
1035 throw DataException("Error - Data::convertToNumArray: invalid sampleNo.");
1036 }
1037
1038 //
1039 // Check a valid data point number has been supplied
1040 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
1041 throw DataException("Error - Data::convertToNumArray: invalid dataPointNoInSample.");
1042 }
1043 // TODO: global error handling
1044 // create a view of the data if it is stored locally
1045 //DataArrayView dataPointView = getDataPoint(sampleNo, dataPointNoInSample);
1046 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
1047
1048 // pack the data from the view into tmpData for MPI communication
1049 pos=0;
1050 switch( dataPointRank ){
1051 case 0 :
1052 tmpData[0] = getDataAtOffset(offset);
1053 break;
1054 case 1 :
1055 for( i=0; i<dataPointShape[0]; i++ )
1056 tmpData[i]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i));
1057 break;
1058 case 2 :
1059 for( i=0; i<dataPointShape[0]; i++ )
1060 for( j=0; j<dataPointShape[1]; j++, pos++ )
1061 tmpData[pos]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j));
1062 break;
1063 case 3 :
1064 for( i=0; i<dataPointShape[0]; i++ )
1065 for( j=0; j<dataPointShape[1]; j++ )
1066 for( k=0; k<dataPointShape[2]; k++, pos++ )
1067 tmpData[pos]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k));
1068 break;
1069 case 4 :
1070 for( i=0; i<dataPointShape[0]; i++ )
1071 for( j=0; j<dataPointShape[1]; j++ )
1072 for( k=0; k<dataPointShape[2]; k++ )
1073 for( l=0; l<dataPointShape[3]; l++, pos++ )
1074 tmpData[pos]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k,l));
1075 break;
1076 }
1077 }
1078 }
1079 #ifdef PASO_MPI
1080 // broadcast the data to all other processes
1081 MPI_Bcast( tmpData, length, MPI_DOUBLE, procNo, get_MPIComm() );
1082 #endif
1083
1084 // unpack the data
1085 switch( dataPointRank ){
1086 case 0 :
1087 numArray[0]=tmpData[0];
1088 break;
1089 case 1 :
1090 for( i=0; i<dataPointShape[0]; i++ )
1091 numArray[i]=tmpData[i];
1092 break;
1093 case 2 :
1094 for( i=0; i<dataPointShape[0]; i++ )
1095 for( j=0; j<dataPointShape[1]; j++ )
1096 numArray[make_tuple(i,j)]=tmpData[i+j*dataPointShape[0]];
1097 break;
1098 case 3 :
1099 for( i=0; i<dataPointShape[0]; i++ )
1100 for( j=0; j<dataPointShape[1]; j++ )
1101 for( k=0; k<dataPointShape[2]; k++ )
1102 numArray[make_tuple(i,j,k)]=tmpData[i+dataPointShape[0]*(j*+k*dataPointShape[1])];
1103 break;
1104 case 4 :
1105 for( i=0; i<dataPointShape[0]; i++ )
1106 for( j=0; j<dataPointShape[1]; j++ )
1107 for( k=0; k<dataPointShape[2]; k++ )
1108 for( l=0; l<dataPointShape[3]; l++ )
1109 numArray[make_tuple(i,j,k,l)]=tmpData[i+dataPointShape[0]*(j*+dataPointShape[1]*(k+l*dataPointShape[2]))];
1110 break;
1111 }
1112
1113 delete [] tmpData;
1114 //
1115 // return the loaded array
1116 return numArray;
1117 }
1118
1119
1120 boost::python::numeric::array
1121 Data::integrate_const() const
1122 {
1123 if (isLazy())
1124 {
1125 throw DataException("Error - cannot integrate for constant lazy data.");
1126 }
1127 return integrateWorker();
1128 }
1129
1130 boost::python::numeric::array
1131 Data::integrate()
1132 {
1133 if (isLazy())
1134 {
1135 expand();
1136 }
1137 return integrateWorker();
1138 }
1139
1140
1141
1142 boost::python::numeric::array
1143 Data::integrateWorker() const
1144 {
1145 int index;
1146 int rank = getDataPointRank();
1147 DataTypes::ShapeType shape = getDataPointShape();
1148 int dataPointSize = getDataPointSize();
1149
1150 //
1151 // calculate the integral values
1152 vector<double> integrals(dataPointSize);
1153 vector<double> integrals_local(dataPointSize);
1154 #ifdef PASO_MPI
1155 AbstractContinuousDomain::asAbstractContinuousDomain(*getDomain()).setToIntegrals(integrals_local,*this);
1156 // Global sum: use an array instead of a vector because elements of array are guaranteed to be contiguous in memory
1157 double *tmp = new double[dataPointSize];
1158 double *tmp_local = new double[dataPointSize];
1159 for (int i=0; i<dataPointSize; i++) { tmp_local[i] = integrals_local[i]; }
1160 MPI_Allreduce( &tmp_local[0], &tmp[0], dataPointSize, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD );
1161 for (int i=0; i<dataPointSize; i++) { integrals[i] = tmp[i]; }
1162 delete[] tmp;
1163 delete[] tmp_local;
1164 #else
1165 AbstractContinuousDomain::asAbstractContinuousDomain(*getDomain()).setToIntegrals(integrals,*this);
1166 #endif
1167
1168 //
1169 // create the numeric array to be returned
1170 // and load the array with the integral values
1171 boost::python::numeric::array bp_array(1.0);
1172 if (rank==0) {
1173 bp_array.resize(1);
1174 index = 0;
1175 bp_array[0] = integrals[index];
1176 }
1177 if (rank==1) {
1178 bp_array.resize(shape[0]);
1179 for (int i=0; i<shape[0]; i++) {
1180 index = i;
1181 bp_array[i] = integrals[index];
1182 }
1183 }
1184 if (rank==2) {
1185 bp_array.resize(shape[0],shape[1]);
1186 for (int i=0; i<shape[0]; i++) {
1187 for (int j=0; j<shape[1]; j++) {
1188 index = i + shape[0] * j;
1189 bp_array[make_tuple(i,j)] = integrals[index];
1190 }
1191 }
1192 }
1193 if (rank==3) {
1194 bp_array.resize(shape[0],shape[1],shape[2]);
1195 for (int i=0; i<shape[0]; i++) {
1196 for (int j=0; j<shape[1]; j++) {
1197 for (int k=0; k<shape[2]; k++) {
1198 index = i + shape[0] * ( j + shape[1] * k );
1199 bp_array[make_tuple(i,j,k)] = integrals[index];
1200 }
1201 }
1202 }
1203 }
1204 if (rank==4) {
1205 bp_array.resize(shape[0],shape[1],shape[2],shape[3]);
1206 for (int i=0; i<shape[0]; i++) {
1207 for (int j=0; j<shape[1]; j++) {
1208 for (int k=0; k<shape[2]; k++) {
1209 for (int l=0; l<shape[3]; l++) {
1210 index = i + shape[0] * ( j + shape[1] * ( k + shape[2] * l ) );
1211 bp_array[make_tuple(i,j,k,l)] = integrals[index];
1212 }
1213 }
1214 }
1215 }
1216 }
1217
1218 //
1219 // return the loaded array
1220 return bp_array;
1221 }
1222
1223 Data
1224 Data::sin() const
1225 {
1226 if (isLazy())
1227 {
1228 DataLazy* c=new DataLazy(borrowDataPtr(),SIN);
1229 return Data(c);
1230 }
1231 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sin);
1232 }
1233
1234 Data
1235 Data::cos() const
1236 {
1237 if (isLazy())
1238 {
1239 DataLazy* c=new DataLazy(borrowDataPtr(),COS);
1240 return Data(c);
1241 }
1242 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cos);
1243 }
1244
1245 Data
1246 Data::tan() const
1247 {
1248 if (isLazy())
1249 {
1250 DataLazy* c=new DataLazy(borrowDataPtr(),TAN);
1251 return Data(c);
1252 }
1253 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tan);
1254 }
1255
1256 Data
1257 Data::asin() const
1258 {
1259 if (isLazy())
1260 {
1261 DataLazy* c=new DataLazy(borrowDataPtr(),ASIN);
1262 return Data(c);
1263 }
1264 return C_TensorUnaryOperation<double (*)(double)>(*this, ::asin);
1265 }
1266
1267 Data
1268 Data::acos() const
1269 {
1270 if (isLazy())
1271 {
1272 DataLazy* c=new DataLazy(borrowDataPtr(),ACOS);
1273 return Data(c);
1274 }
1275 return C_TensorUnaryOperation<double (*)(double)>(*this, ::acos);
1276 }
1277
1278
1279 Data
1280 Data::atan() const
1281 {
1282 if (isLazy())
1283 {
1284 DataLazy* c=new DataLazy(borrowDataPtr(),ATAN);
1285 return Data(c);
1286 }
1287 return C_TensorUnaryOperation<double (*)(double)>(*this, ::atan);
1288 }
1289
1290 Data
1291 Data::sinh() const
1292 {
1293 if (isLazy())
1294 {
1295 DataLazy* c=new DataLazy(borrowDataPtr(),SINH);
1296 return Data(c);
1297 }
1298 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sinh);
1299 }
1300
1301 Data
1302 Data::cosh() const
1303 {
1304 if (isLazy())
1305 {
1306 DataLazy* c=new DataLazy(borrowDataPtr(),COSH);
1307 return Data(c);
1308 }
1309 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cosh);
1310 }
1311
1312 Data
1313 Data::tanh() const
1314 {
1315 if (isLazy())
1316 {
1317 DataLazy* c=new DataLazy(borrowDataPtr(),TANH);
1318 return Data(c);
1319 }
1320 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tanh);
1321 }
1322
1323
1324 Data
1325 Data::erf() const
1326 {
1327 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1328 throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");
1329 #else
1330 if (isLazy())
1331 {
1332 DataLazy* c=new DataLazy(borrowDataPtr(),ERF);
1333 return Data(c);
1334 }
1335 return C_TensorUnaryOperation(*this, ::erf);
1336 #endif
1337 }
1338
1339 Data
1340 Data::asinh() const
1341 {
1342 if (isLazy())
1343 {
1344 DataLazy* c=new DataLazy(borrowDataPtr(),ASINH);
1345 return Data(c);
1346 }
1347 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1348 return C_TensorUnaryOperation(*this, escript::asinh_substitute);
1349 #else
1350 return C_TensorUnaryOperation(*this, ::asinh);
1351 #endif
1352 }
1353
1354 Data
1355 Data::acosh() const
1356 {
1357 if (isLazy())
1358 {
1359 DataLazy* c=new DataLazy(borrowDataPtr(),ACOSH);
1360 return Data(c);
1361 }
1362 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1363 return C_TensorUnaryOperation(*this, escript::acosh_substitute);
1364 #else
1365 return C_TensorUnaryOperation(*this, ::acosh);
1366 #endif
1367 }
1368
1369 Data
1370 Data::atanh() const
1371 {
1372 if (isLazy())
1373 {
1374 DataLazy* c=new DataLazy(borrowDataPtr(),ATANH);
1375 return Data(c);
1376 }
1377 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1378 return C_TensorUnaryOperation(*this, escript::atanh_substitute);
1379 #else
1380 return C_TensorUnaryOperation(*this, ::atanh);
1381 #endif
1382 }
1383
1384 Data
1385 Data::log10() const
1386 { if (isLazy())
1387 {
1388 DataLazy* c=new DataLazy(borrowDataPtr(),LOG10);
1389 return Data(c);
1390 }
1391 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log10);
1392 }
1393
1394 Data
1395 Data::log() const
1396 {
1397 if (isLazy())
1398 {
1399 DataLazy* c=new DataLazy(borrowDataPtr(),LOG);
1400 return Data(c);
1401 }
1402 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log);
1403 }
1404
1405 Data
1406 Data::sign() const
1407 {
1408 if (isLazy())
1409 {
1410 DataLazy* c=new DataLazy(borrowDataPtr(),SIGN);
1411 return Data(c);
1412 }
1413 return C_TensorUnaryOperation(*this, escript::fsign);
1414 }
1415
1416 Data
1417 Data::abs() const
1418 {
1419 if (isLazy())
1420 {
1421 DataLazy* c=new DataLazy(borrowDataPtr(),ABS);
1422 return Data(c);
1423 }
1424 return C_TensorUnaryOperation<double (*)(double)>(*this, ::fabs);
1425 }
1426
1427 Data
1428 Data::neg() const
1429 {
1430 if (isLazy())
1431 {
1432 DataLazy* c=new DataLazy(borrowDataPtr(),NEG);
1433 return Data(c);
1434 }
1435 return C_TensorUnaryOperation(*this, negate<double>());
1436 }
1437
1438 Data
1439 Data::pos() const
1440 {
1441 // not doing lazy check here is deliberate.
1442 // since a deep copy of lazy data should be cheap, I'll just let it happen now
1443 Data result;
1444 // perform a deep copy
1445 result.copy(*this);
1446 return result;
1447 }
1448
1449 Data
1450 Data::exp() const
1451 {
1452 if (isLazy())
1453 {
1454 DataLazy* c=new DataLazy(borrowDataPtr(),EXP);
1455 return Data(c);
1456 }
1457 return C_TensorUnaryOperation<double (*)(double)>(*this, ::exp);
1458 }
1459
1460 Data
1461 Data::sqrt() const
1462 {
1463 if (isLazy())
1464 {
1465 DataLazy* c=new DataLazy(borrowDataPtr(),SQRT);
1466 return Data(c);
1467 }
1468 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sqrt);
1469 }
1470
1471 double
1472 Data::Lsup_const() const
1473 {
1474 if (isLazy())
1475 {
1476 throw DataException("Error - cannot compute Lsup for constant lazy data.");
1477 }
1478 return LsupWorker();
1479 }
1480
1481 double
1482 Data::Lsup()
1483 {
1484 if (isLazy())
1485 {
1486 resolve();
1487 }
1488 return LsupWorker();
1489 }
1490
1491 double
1492 Data::sup_const() const
1493 {
1494 if (isLazy())
1495 {
1496 throw DataException("Error - cannot compute sup for constant lazy data.");
1497 }
1498 return supWorker();
1499 }
1500
1501 double
1502 Data::sup()
1503 {
1504 if (isLazy())
1505 {
1506 resolve();
1507 }
1508 return supWorker();
1509 }
1510
1511 double
1512 Data::inf_const() const
1513 {
1514 if (isLazy())
1515 {
1516 throw DataException("Error - cannot compute inf for constant lazy data.");
1517 }
1518 return infWorker();
1519 }
1520
1521 double
1522 Data::inf()
1523 {
1524 if (isLazy())
1525 {
1526 resolve();
1527 }
1528 return infWorker();
1529 }
1530
1531 double
1532 Data::LsupWorker() const
1533 {
1534 double localValue;
1535 //
1536 // set the initial absolute maximum value to zero
1537
1538 AbsMax abs_max_func;
1539 localValue = algorithm(abs_max_func,0);
1540 #ifdef PASO_MPI
1541 double globalValue;
1542 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1543 return globalValue;
1544 #else
1545 return localValue;
1546 #endif
1547 }
1548
1549 double
1550 Data::supWorker() const
1551 {
1552 double localValue;
1553 //
1554 // set the initial maximum value to min possible double
1555 FMax fmax_func;
1556 localValue = algorithm(fmax_func,numeric_limits<double>::max()*-1);
1557 #ifdef PASO_MPI
1558 double globalValue;
1559 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1560 return globalValue;
1561 #else
1562 return localValue;
1563 #endif
1564 }
1565
1566 double
1567 Data::infWorker() const
1568 {
1569 double localValue;
1570 //
1571 // set the initial minimum value to max possible double
1572 FMin fmin_func;
1573 localValue = algorithm(fmin_func,numeric_limits<double>::max());
1574 #ifdef PASO_MPI
1575 double globalValue;
1576 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD );
1577 return globalValue;
1578 #else
1579 return localValue;
1580 #endif
1581 }
1582
1583 /* TODO */
1584 /* global reduction */
1585 Data
1586 Data::maxval() const
1587 {
1588 if (isLazy())
1589 {
1590 Data temp(*this); // to get around the fact that you can't resolve a const Data
1591 temp.resolve();
1592 return temp.maxval();
1593 }
1594 //
1595 // set the initial maximum value to min possible double
1596 FMax fmax_func;
1597 return dp_algorithm(fmax_func,numeric_limits<double>::max()*-1);
1598 }
1599
1600 Data
1601 Data::minval() const
1602 {
1603 if (isLazy())
1604 {
1605 Data temp(*this); // to get around the fact that you can't resolve a const Data
1606 temp.resolve();
1607 return temp.minval();
1608 }
1609 //
1610 // set the initial minimum value to max possible double
1611 FMin fmin_func;
1612 return dp_algorithm(fmin_func,numeric_limits<double>::max());
1613 }
1614
1615 Data
1616 Data::swapaxes(const int axis0, const int axis1) const
1617 {
1618 if (isLazy())
1619 {
1620 Data temp(*this);
1621 temp.resolve();
1622 return temp.swapaxes(axis0,axis1);
1623 }
1624 int axis0_tmp,axis1_tmp;
1625 DataTypes::ShapeType s=getDataPointShape();
1626 DataTypes::ShapeType ev_shape;
1627 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1628 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1629 int rank=getDataPointRank();
1630 if (rank<2) {
1631 throw DataException("Error - Data::swapaxes argument must have at least rank 2.");
1632 }
1633 if (axis0<0 || axis0>rank-1) {
1634 throw DataException("Error - Data::swapaxes: axis0 must be between 0 and rank-1=" + rank-1);
1635 }
1636 if (axis1<0 || axis1>rank-1) {
1637 throw DataException("Error - Data::swapaxes: axis1 must be between 0 and rank-1=" + rank-1);
1638 }
1639 if (axis0 == axis1) {
1640 throw DataException("Error - Data::swapaxes: axis indices must be different.");
1641 }
1642 if (axis0 > axis1) {
1643 axis0_tmp=axis1;
1644 axis1_tmp=axis0;
1645 } else {
1646 axis0_tmp=axis0;
1647 axis1_tmp=axis1;
1648 }
1649 for (int i=0; i<rank; i++) {
1650 if (i == axis0_tmp) {
1651 ev_shape.push_back(s[axis1_tmp]);
1652 } else if (i == axis1_tmp) {
1653 ev_shape.push_back(s[axis0_tmp]);
1654 } else {
1655 ev_shape.push_back(s[i]);
1656 }
1657 }
1658 Data ev(0.,ev_shape,getFunctionSpace());
1659 ev.typeMatchRight(*this);
1660 m_data->swapaxes(ev.m_data.get(), axis0_tmp, axis1_tmp);
1661 return ev;
1662
1663 }
1664
1665 Data
1666 Data::symmetric() const
1667 {
1668 // check input
1669 DataTypes::ShapeType s=getDataPointShape();
1670 if (getDataPointRank()==2) {
1671 if(s[0] != s[1])
1672 throw DataException("Error - Data::symmetric can only be calculated for rank 2 object with equal first and second dimension.");
1673 }
1674 else if (getDataPointRank()==4) {
1675 if(!(s[0] == s[2] && s[1] == s[3]))
1676 throw DataException("Error - Data::symmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1677 }
1678 else {
1679 throw DataException("Error - Data::symmetric can only be calculated for rank 2 or 4 object.");
1680 }
1681 if (isLazy())
1682 {
1683 DataLazy* c=new DataLazy(borrowDataPtr(),SYM);
1684 return Data(c);
1685 }
1686 Data ev(0.,getDataPointShape(),getFunctionSpace());
1687 ev.typeMatchRight(*this);
1688 m_data->symmetric(ev.m_data.get());
1689 return ev;
1690 }
1691
1692 Data
1693 Data::nonsymmetric() const
1694 {
1695 if (isLazy())
1696 {
1697 DataLazy* c=new DataLazy(borrowDataPtr(),NSYM);
1698 return Data(c);
1699 }
1700 // check input
1701 DataTypes::ShapeType s=getDataPointShape();
1702 if (getDataPointRank()==2) {
1703 if(s[0] != s[1])
1704 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 object with equal first and second dimension.");
1705 DataTypes::ShapeType ev_shape;
1706 ev_shape.push_back(s[0]);
1707 ev_shape.push_back(s[1]);
1708 Data ev(0.,ev_shape,getFunctionSpace());
1709 ev.typeMatchRight(*this);
1710 m_data->nonsymmetric(ev.m_data.get());
1711 return ev;
1712 }
1713 else if (getDataPointRank()==4) {
1714 if(!(s[0] == s[2] && s[1] == s[3]))
1715 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1716 DataTypes::ShapeType ev_shape;
1717 ev_shape.push_back(s[0]);
1718 ev_shape.push_back(s[1]);
1719 ev_shape.push_back(s[2]);
1720 ev_shape.push_back(s[3]);
1721 Data ev(0.,ev_shape,getFunctionSpace());
1722 ev.typeMatchRight(*this);
1723 m_data->nonsymmetric(ev.m_data.get());
1724 return ev;
1725 }
1726 else {
1727 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 or 4 object.");
1728 }
1729 }
1730
1731 Data
1732 Data::trace(int axis_offset) const
1733 {
1734 if (isLazy())
1735 {
1736 DataLazy* c=new DataLazy(borrowDataPtr(),TRACE,axis_offset);
1737 return Data(c);
1738 }
1739 DataTypes::ShapeType s=getDataPointShape();
1740 if (getDataPointRank()==2) {
1741 DataTypes::ShapeType ev_shape;
1742 Data ev(0.,ev_shape,getFunctionSpace());
1743 ev.typeMatchRight(*this);
1744 m_data->trace(ev.m_data.get(), axis_offset);
1745 return ev;
1746 }
1747 if (getDataPointRank()==3) {
1748 DataTypes::ShapeType ev_shape;
1749 if (axis_offset==0) {
1750 int s2=s[2];
1751 ev_shape.push_back(s2);
1752 }
1753 else if (axis_offset==1) {
1754 int s0=s[0];
1755 ev_shape.push_back(s0);
1756 }
1757 Data ev(0.,ev_shape,getFunctionSpace());
1758 ev.typeMatchRight(*this);
1759 m_data->trace(ev.m_data.get(), axis_offset);
1760 return ev;
1761 }
1762 if (getDataPointRank()==4) {
1763 DataTypes::ShapeType ev_shape;
1764 if (axis_offset==0) {
1765 ev_shape.push_back(s[2]);
1766 ev_shape.push_back(s[3]);
1767 }
1768 else if (axis_offset==1) {
1769 ev_shape.push_back(s[0]);
1770 ev_shape.push_back(s[3]);
1771 }
1772 else if (axis_offset==2) {
1773 ev_shape.push_back(s[0]);
1774 ev_shape.push_back(s[1]);
1775 }
1776 Data ev(0.,ev_shape,getFunctionSpace());
1777 ev.typeMatchRight(*this);
1778 m_data->trace(ev.m_data.get(), axis_offset);
1779 return ev;
1780 }
1781 else {
1782 throw DataException("Error - Data::trace can only be calculated for rank 2, 3 or 4 object.");
1783 }
1784 }
1785
1786 Data
1787 Data::transpose(int axis_offset) const
1788 {
1789 if (isLazy())
1790 {
1791 DataLazy* c=new DataLazy(borrowDataPtr(),TRANS,axis_offset);
1792 return Data(c);
1793 }
1794 DataTypes::ShapeType s=getDataPointShape();
1795 DataTypes::ShapeType ev_shape;
1796 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1797 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1798 int rank=getDataPointRank();
1799 if (axis_offset<0 || axis_offset>rank) {
1800 throw DataException("Error - Data::transpose must have 0 <= axis_offset <= rank=" + rank);
1801 }
1802 for (int i=0; i<rank; i++) {
1803 int index = (axis_offset+i)%rank;
1804 ev_shape.push_back(s[index]); // Append to new shape
1805 }
1806 Data ev(0.,ev_shape,getFunctionSpace());
1807 ev.typeMatchRight(*this);
1808 m_data->transpose(ev.m_data.get(), axis_offset);
1809 return ev;
1810 }
1811
1812 Data
1813 Data::eigenvalues() const
1814 {
1815 if (isLazy())
1816 {
1817 Data temp(*this); // to get around the fact that you can't resolve a const Data
1818 temp.resolve();
1819 return temp.eigenvalues();
1820 }
1821 // check input
1822 DataTypes::ShapeType s=getDataPointShape();
1823 if (getDataPointRank()!=2)
1824 throw DataException("Error - Data::eigenvalues can only be calculated for rank 2 object.");
1825 if(s[0] != s[1])
1826 throw DataException("Error - Data::eigenvalues can only be calculated for object with equal first and second dimension.");
1827 // create return
1828 DataTypes::ShapeType ev_shape(1,s[0]);
1829 Data ev(0.,ev_shape,getFunctionSpace());
1830 ev.typeMatchRight(*this);
1831 m_data->eigenvalues(ev.m_data.get());
1832 return ev;
1833 }
1834
1835 const boost::python::tuple
1836 Data::eigenvalues_and_eigenvectors(const double tol) const
1837 {
1838 if (isLazy())
1839 {
1840 Data temp(*this); // to get around the fact that you can't resolve a const Data
1841 temp.resolve();
1842 return temp.eigenvalues_and_eigenvectors(tol);
1843 }
1844 DataTypes::ShapeType s=getDataPointShape();
1845 if (getDataPointRank()!=2)
1846 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for rank 2 object.");
1847 if(s[0] != s[1])
1848 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for object with equal first and second dimension.");
1849 // create return
1850 DataTypes::ShapeType ev_shape(1,s[0]);
1851 Data ev(0.,ev_shape,getFunctionSpace());
1852 ev.typeMatchRight(*this);
1853 DataTypes::ShapeType V_shape(2,s[0]);
1854 Data V(0.,V_shape,getFunctionSpace());
1855 V.typeMatchRight(*this);
1856 m_data->eigenvalues_and_eigenvectors(ev.m_data.get(),V.m_data.get(),tol);
1857 return make_tuple(boost::python::object(ev),boost::python::object(V));
1858 }
1859
1860 const boost::python::tuple
1861 Data::minGlobalDataPoint() const
1862 {
1863 // NB: calc_minGlobalDataPoint( had to be split off from minGlobalDataPoint( as boost::make_tuple causes an
1864 // abort (for unknown reasons) if there are openmp directives with it in the
1865 // surrounding function
1866
1867 int DataPointNo;
1868 int ProcNo;
1869 calc_minGlobalDataPoint(ProcNo,DataPointNo);
1870 return make_tuple(ProcNo,DataPointNo);
1871 }
1872
1873 void
1874 Data::calc_minGlobalDataPoint(int& ProcNo,
1875 int& DataPointNo) const
1876 {
1877 if (isLazy())
1878 {
1879 Data temp(*this); // to get around the fact that you can't resolve a const Data
1880 temp.resolve();
1881 return temp.calc_minGlobalDataPoint(ProcNo,DataPointNo);
1882 }
1883 int i,j;
1884 int lowi=0,lowj=0;
1885 double min=numeric_limits<double>::max();
1886
1887 Data temp=minval();
1888
1889 int numSamples=temp.getNumSamples();
1890 int numDPPSample=temp.getNumDataPointsPerSample();
1891
1892 double next,local_min;
1893 int local_lowi=0,local_lowj=0;
1894
1895 #pragma omp parallel firstprivate(local_lowi,local_lowj) private(next,local_min)
1896 {
1897 local_min=min;
1898 #pragma omp for private(i,j) schedule(static)
1899 for (i=0; i<numSamples; i++) {
1900 for (j=0; j<numDPPSample; j++) {
1901 next=temp.getDataAtOffset(temp.getDataOffset(i,j));
1902 if (next<local_min) {
1903 local_min=next;
1904 local_lowi=i;
1905 local_lowj=j;
1906 }
1907 }
1908 }
1909 #pragma omp critical
1910 if (local_min<min) {
1911 min=local_min;
1912 lowi=local_lowi;
1913 lowj=local_lowj;
1914 }
1915 }
1916
1917 #ifdef PASO_MPI
1918 // determine the processor on which the minimum occurs
1919 next = temp.getDataPoint(lowi,lowj);
1920 int lowProc = 0;
1921 double *globalMins = new double[get_MPISize()+1];
1922 int error;
1923 error = MPI_Gather ( &next, 1, MPI_DOUBLE, globalMins, 1, MPI_DOUBLE, 0, get_MPIComm() );
1924
1925 if( get_MPIRank()==0 ){
1926 next = globalMins[lowProc];
1927 for( i=1; i<get_MPISize(); i++ )
1928 if( next>globalMins[i] ){
1929 lowProc = i;
1930 next = globalMins[i];
1931 }
1932 }
1933 MPI_Bcast( &lowProc, 1, MPI_DOUBLE, 0, get_MPIComm() );
1934
1935 delete [] globalMins;
1936 ProcNo = lowProc;
1937 #else
1938 ProcNo = 0;
1939 #endif
1940 DataPointNo = lowj + lowi * numDPPSample;
1941 }
1942
1943 void
1944 Data::saveDX(std::string fileName) const
1945 {
1946 if (isEmpty())
1947 {
1948 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1949 }
1950 if (isLazy())
1951 {
1952 Data temp(*this); // to get around the fact that you can't resolve a const Data
1953 temp.resolve();
1954 temp.saveDX(fileName);
1955 return;
1956 }
1957 boost::python::dict args;
1958 args["data"]=boost::python::object(this);
1959 getDomain()->saveDX(fileName,args);
1960 return;
1961 }
1962
1963 void
1964 Data::saveVTK(std::string fileName) const
1965 {
1966 if (isEmpty())
1967 {
1968 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1969 }
1970 if (isLazy())
1971 {
1972 Data temp(*this); // to get around the fact that you can't resolve a const Data
1973 temp.resolve();
1974 temp.saveVTK(fileName);
1975 return;
1976 }
1977 boost::python::dict args;
1978 args["data"]=boost::python::object(this);
1979 getDomain()->saveVTK(fileName,args);
1980 return;
1981 }
1982
1983 Data&
1984 Data::operator+=(const Data& right)
1985 {
1986 if (isProtected()) {
1987 throw DataException("Error - attempt to update protected Data object.");
1988 }
1989 if (isLazy() || right.isLazy())
1990 {
1991 DataLazy* c=new DataLazy(m_data,right.borrowDataPtr(),ADD); // for lazy + is equivalent to +=
1992 m_data=c->getPtr();
1993 return (*this);
1994 }
1995 else
1996 {
1997 binaryOp(right,plus<double>());
1998 return (*this);
1999 }
2000 }
2001
2002 Data&
2003 Data::operator+=(const boost::python::object& right)
2004 {
2005 if (isProtected()) {
2006 throw DataException("Error - attempt to update protected Data object.");
2007 }
2008 Data tmp(right,getFunctionSpace(),false);
2009 if (isLazy())
2010 {
2011 DataLazy* c=new DataLazy(m_data,tmp.borrowDataPtr(),ADD); // for lazy + is equivalent to +=
2012 m_data=c->getPtr();
2013 return (*this);
2014 }
2015 else
2016 {
2017 binaryOp(tmp,plus<double>());
2018 return (*this);
2019 }
2020 }
2021
2022 // Hmmm, operator= makes a deep copy but the copy constructor does not?
2023 Data&
2024 Data::operator=(const Data& other)
2025 {
2026 copy(other);
2027 return (*this);
2028 }
2029
2030 Data&
2031 Data::operator-=(const Data& right)
2032 {
2033 if (isProtected()) {
2034 throw DataException("Error - attempt to update protected Data object.");
2035 }
2036 if (isLazy() || right.isLazy())
2037 {
2038 DataLazy* c=new DataLazy(m_data,right.borrowDataPtr(),SUB); // for lazy - is equivalent to -=
2039 m_data=c->getPtr();
2040 return (*this);
2041 }
2042 else
2043 {
2044 binaryOp(right,minus<double>());
2045 return (*this);
2046 }
2047 }
2048
2049 Data&
2050 Data::operator-=(const boost::python::object& right)
2051 {
2052 if (isProtected()) {
2053 throw DataException("Error - attempt to update protected Data object.");
2054 }
2055 Data tmp(right,getFunctionSpace(),false);
2056 if (isLazy())
2057 {
2058 DataLazy* c=new DataLazy(m_data,tmp.borrowDataPtr(),SUB); // for lazy - is equivalent to -=
2059 m_data=c->getPtr();
2060 return (*this);
2061 }
2062 else
2063 {
2064 binaryOp(tmp,minus<double>());
2065 return (*this);
2066 }
2067 }
2068
2069 Data&
2070 Data::operator*=(const Data& right)
2071 {
2072 if (isProtected()) {
2073 throw DataException("Error - attempt to update protected Data object.");
2074 }
2075 if (isLazy() || right.isLazy())
2076 {
2077 DataLazy* c=new DataLazy(m_data,right.borrowDataPtr(),MUL); // for lazy * is equivalent to *=
2078 m_data=c->getPtr();
2079 return (*this);
2080 }
2081 else
2082 {
2083 binaryOp(right,multiplies<double>());
2084 return (*this);
2085 }
2086 }
2087
2088 Data&
2089 Data::operator*=(const boost::python::object& right)
2090 {
2091 if (isProtected()) {
2092 throw DataException("Error - attempt to update protected Data object.");
2093 }
2094 Data tmp(right,getFunctionSpace(),false);
2095 if (isLazy())
2096 {
2097 DataLazy* c=new DataLazy(m_data,tmp.borrowDataPtr(),MUL); // for lazy * is equivalent to *=
2098 m_data=c->getPtr();
2099 return (*this);
2100 }
2101 else
2102 {
2103 binaryOp(tmp,multiplies<double>());
2104 return (*this);
2105 }
2106 }
2107
2108 Data&
2109 Data::operator/=(const Data& right)
2110 {
2111 if (isProtected()) {
2112 throw DataException("Error - attempt to update protected Data object.");
2113 }
2114 if (isLazy() || right.isLazy())
2115 {
2116 DataLazy* c=new DataLazy(m_data,right.borrowDataPtr(),DIV); // for lazy / is equivalent to /=
2117 m_data=c->getPtr();
2118 return (*this);
2119 }
2120 else
2121 {
2122 binaryOp(right,divides<double>());
2123 return (*this);
2124 }
2125 }
2126
2127 Data&
2128 Data::operator/=(const boost::python::object& right)
2129 {
2130 if (isProtected()) {
2131 throw DataException("Error - attempt to update protected Data object.");
2132 }
2133 Data tmp(right,getFunctionSpace(),false);
2134 if (isLazy())
2135 {
2136 DataLazy* c=new DataLazy(m_data,tmp.borrowDataPtr(),DIV); // for lazy / is equivalent to /=
2137 m_data=c->getPtr();
2138 return (*this);
2139 }
2140 else
2141 {
2142 binaryOp(tmp,divides<double>());
2143 return (*this);
2144 }
2145 }
2146
2147 Data
2148 Data::rpowO(const boost::python::object& left) const
2149 {
2150 Data left_d(left,*this);
2151 return left_d.powD(*this);
2152 }
2153
2154 Data
2155 Data::powO(const boost::python::object& right) const
2156 {
2157 Data tmp(right,getFunctionSpace(),false);
2158 return powD(tmp);
2159 }
2160
2161 Data
2162 Data::powD(const Data& right) const
2163 {
2164 if (isLazy() || right.isLazy())
2165 {
2166 DataLazy* c=new DataLazy(m_data,right.borrowDataPtr(),POW);
2167 return Data(c);
2168 }
2169 return C_TensorBinaryOperation<double (*)(double, double)>(*this, right, ::pow);
2170 }
2171
2172 //
2173 // NOTE: It is essential to specify the namespace this operator belongs to
2174 Data
2175 escript::operator+(const Data& left, const Data& right)
2176 {
2177 if (left.isLazy() || right.isLazy())
2178 {
2179 DataLazy* c=new DataLazy(left.borrowDataPtr(),right.borrowDataPtr(),ADD);
2180 return Data(c);
2181 }
2182 return C_TensorBinaryOperation(left, right, plus<double>());
2183 }
2184
2185 //
2186 // NOTE: It is essential to specify the namespace this operator belongs to
2187 Data
2188 escript::operator-(const Data& left, const Data& right)
2189 {
2190 if (left.isLazy() || right.isLazy())
2191 {
2192 DataLazy* c=new DataLazy(left.borrowDataPtr(),right.borrowDataPtr(),SUB);
2193 return Data(c);
2194 }
2195 return C_TensorBinaryOperation(left, right, minus<double>());
2196 }
2197
2198 //
2199 // NOTE: It is essential to specify the namespace this operator belongs to
2200 Data
2201 escript::operator*(const Data& left, const Data& right)
2202 {
2203 if (left.isLazy() || right.isLazy())
2204 {
2205 DataLazy* c=new DataLazy(left.borrowDataPtr(),right.borrowDataPtr(),MUL);
2206 return Data(c);
2207 }
2208 return C_TensorBinaryOperation(left, right, multiplies<double>());
2209 }
2210
2211 //
2212 // NOTE: It is essential to specify the namespace this operator belongs to
2213 Data
2214 escript::operator/(const Data& left, const Data& right)
2215 {
2216 if (left.isLazy() || right.isLazy())
2217 {
2218 DataLazy* c=new DataLazy(left.borrowDataPtr(),right.borrowDataPtr(),DIV);
2219 return Data(c);
2220 }
2221 return C_TensorBinaryOperation(left, right, divides<double>());
2222 }
2223
2224 //
2225 // NOTE: It is essential to specify the namespace this operator belongs to
2226 Data
2227 escript::operator+(const Data& left, const boost::python::object& right)
2228 {
2229 if (left.isLazy())
2230 {
2231 DataLazy* c=new DataLazy(left.borrowDataPtr(),Data(right,left.getFunctionSpace(),false).borrowDataPtr(),ADD);
2232 return Data(c);
2233 }
2234 return left+Data(right,left.getFunctionSpace(),false);
2235 }
2236
2237 //
2238 // NOTE: It is essential to specify the namespace this operator belongs to
2239 Data
2240 escript::operator-(const Data& left, const boost::python::object& right)
2241 {
2242 if (left.isLazy())
2243 {
2244 DataLazy* c=new DataLazy(left.borrowDataPtr(),Data(right,left.getFunctionSpace(),false).borrowDataPtr(),SUB);
2245 return Data(c);
2246 }
2247 return left-Data(right,left.getFunctionSpace(),false);
2248 }
2249
2250 //
2251 // NOTE: It is essential to specify the namespace this operator belongs to
2252 Data
2253 escript::operator*(const Data& left, const boost::python::object& right)
2254 {
2255 if (left.isLazy())
2256 {
2257 DataLazy* c=new DataLazy(left.borrowDataPtr(),Data(right,left.getFunctionSpace(),false).borrowDataPtr(),MUL);
2258 return Data(c);
2259 }
2260 return left*Data(right,left.getFunctionSpace(),false);
2261 }
2262
2263 //
2264 // NOTE: It is essential to specify the namespace this operator belongs to
2265 Data
2266 escript::operator/(const Data& left, const boost::python::object& right)
2267 {
2268 if (left.isLazy())
2269 {
2270 DataLazy* c=new DataLazy(left.borrowDataPtr(),Data(right,left.getFunctionSpace(),false).borrowDataPtr(),DIV);
2271 return Data(c);
2272 }
2273 return left/Data(right,left.getFunctionSpace(),false);
2274 }
2275
2276 //
2277 // NOTE: It is essential to specify the namespace this operator belongs to
2278 Data
2279 escript::operator+(const boost::python::object& left, const Data& right)
2280 {
2281 if (right.isLazy())
2282 {
2283 DataLazy* c=new DataLazy(Data(left,right.getFunctionSpace(),false).borrowDataPtr(),right.borrowDataPtr(),ADD);
2284 return Data(c);
2285 }
2286 return Data(left,right.getFunctionSpace(),false)+right;
2287 }
2288
2289 //
2290 // NOTE: It is essential to specify the namespace this operator belongs to
2291 Data
2292 escript::operator-(const boost::python::object& left, const Data& right)
2293 {
2294 if (right.isLazy())
2295 {
2296 DataLazy* c=new DataLazy(Data(left,right.getFunctionSpace(),false).borrowDataPtr(),right.borrowDataPtr(),SUB);
2297 return Data(c);
2298 }
2299 return Data(left,right.getFunctionSpace(),false)-right;
2300 }
2301
2302 //
2303 // NOTE: It is essential to specify the namespace this operator belongs to
2304 Data
2305 escript::operator*(const boost::python::object& left, const Data& right)
2306 {
2307 if (right.isLazy())
2308 {
2309 DataLazy* c=new DataLazy(Data(left,right.getFunctionSpace(),false).borrowDataPtr(),right.borrowDataPtr(),MUL);
2310 return Data(c);
2311 }
2312 return Data(left,right.getFunctionSpace(),false)*right;
2313 }
2314
2315 //
2316 // NOTE: It is essential to specify the namespace this operator belongs to
2317 Data
2318 escript::operator/(const boost::python::object& left, const Data& right)
2319 {
2320 if (right.isLazy())
2321 {
2322 DataLazy* c=new DataLazy(Data(left,right.getFunctionSpace(),false).borrowDataPtr(),right.borrowDataPtr(),DIV);
2323 return Data(c);
2324 }
2325 return Data(left,right.getFunctionSpace(),false)/right;
2326 }
2327
2328
2329 /* TODO */
2330 /* global reduction */
2331 Data
2332 Data::getItem(const boost::python::object& key) const
2333 {
2334
2335 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2336
2337 if (slice_region.size()!=getDataPointRank()) {
2338 throw DataException("Error - slice size does not match Data rank.");
2339 }
2340
2341 return getSlice(slice_region);
2342 }
2343
2344 /* TODO */
2345 /* global reduction */
2346 Data
2347 Data::getSlice(const DataTypes::RegionType& region) const
2348 {
2349 return Data(*this,region);
2350 }
2351
2352 /* TODO */
2353 /* global reduction */
2354 void
2355 Data::setItemO(const boost::python::object& key,
2356 const boost::python::object& value)
2357 {
2358 Data tempData(value,getFunctionSpace());
2359 setItemD(key,tempData);
2360 }
2361
2362 void
2363 Data::setItemD(const boost::python::object& key,
2364 const Data& value)
2365 {
2366 // const DataArrayView& view=getPointDataView();
2367
2368 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2369 if (slice_region.size()!=getDataPointRank()) {
2370 throw DataException("Error - slice size does not match Data rank.");
2371 }
2372 if (getFunctionSpace()!=value.getFunctionSpace()) {
2373 setSlice(Data(value,getFunctionSpace()),slice_region);
2374 } else {
2375 setSlice(value,slice_region);
2376 }
2377 }
2378
2379 void
2380 Data::setSlice(const Data& value,
2381 const DataTypes::RegionType& region)
2382 {
2383 if (isProtected()) {
2384 throw DataException("Error - attempt to update protected Data object.");
2385 }
2386 FORCERESOLVE;
2387 /* if (isLazy())
2388 {
2389 throw DataException("Error - setSlice not permitted on lazy data.");
2390 }*/
2391 Data tempValue(value);
2392 typeMatchLeft(tempValue);
2393 typeMatchRight(tempValue);
2394 getReady()->setSlice(tempValue.m_data.get(),region);
2395 }
2396
2397 void
2398 Data::typeMatchLeft(Data& right) const
2399 {
2400 if (right.isLazy() && !isLazy())
2401 {
2402 right.resolve();
2403 }
2404 if (isExpanded()){
2405 right.expand();
2406 } else if (isTagged()) {
2407 if (right.isConstant()) {
2408 right.tag();
2409 }
2410 }
2411 }
2412
2413 void
2414 Data::typeMatchRight(const Data& right)
2415 {
2416 if (isLazy() && !right.isLazy())
2417 {
2418 resolve();
2419 }
2420 if (isTagged()) {
2421 if (right.isExpanded()) {
2422 expand();
2423 }
2424 } else if (isConstant()) {
2425 if (right.isExpanded()) {
2426 expand();
2427 } else if (right.isTagged()) {
2428 tag();
2429 }
2430 }
2431 }
2432
2433 void
2434 Data::setTaggedValueByName(std::string name,
2435 const boost::python::object& value)
2436 {
2437 if (getFunctionSpace().getDomain()->isValidTagName(name)) {
2438 FORCERESOLVE;
2439 int tagKey=getFunctionSpace().getDomain()->getTag(name);
2440 setTaggedValue(tagKey,value);
2441 }
2442 }
2443 void
2444 Data::setTaggedValue(int tagKey,
2445 const boost::python::object& value)
2446 {
2447 if (isProtected()) {
2448 throw DataException("Error - attempt to update protected Data object.");
2449 }
2450 //
2451 // Ensure underlying data object is of type DataTagged
2452 FORCERESOLVE;
2453 if (isConstant()) tag();
2454 numeric::array asNumArray(value);
2455
2456 // extract the shape of the numarray
2457 DataTypes::ShapeType tempShape;
2458 for (int i=0; i < asNumArray.getrank(); i++) {
2459 tempShape.push_back(extract<int>(asNumArray.getshape()[i]));
2460 }
2461
2462 DataVector temp_data2;
2463 temp_data2.copyFromNumArray(asNumArray);
2464
2465 m_data->setTaggedValue(tagKey,tempShape, temp_data2);
2466 }
2467
2468
2469 void
2470 Data::setTaggedValueFromCPP(int tagKey,
2471 const DataTypes::ShapeType& pointshape,
2472 const DataTypes::ValueType& value,
2473 int dataOffset)
2474 {
2475 if (isProtected()) {
2476 throw DataException("Error - attempt to update protected Data object.");
2477 }
2478 //
2479 // Ensure underlying data object is of type DataTagged
2480 FORCERESOLVE;
2481 if (isConstant()) tag();
2482 //
2483 // Call DataAbstract::setTaggedValue
2484 m_data->setTaggedValue(tagKey,pointshape, value, dataOffset);
2485 }
2486
2487 int
2488 Data::getTagNumber(int dpno)
2489 {
2490 if (isEmpty())
2491 {
2492 throw DataException("Error - operation not permitted on instances of DataEmpty.");
2493 }
2494 return getFunctionSpace().getTagFromDataPointNo(dpno);
2495 }
2496
2497
2498 ostream& escript::operator<<(ostream& o, const Data& data)
2499 {
2500 o << data.toString();
2501 return o;
2502 }
2503
2504 Data
2505 escript::C_GeneralTensorProduct(Data& arg_0,
2506 Data& arg_1,
2507 int axis_offset,
2508 int transpose)
2509 {
2510 // General tensor product: res(SL x SR) = arg_0(SL x SM) * arg_1(SM x SR)
2511 // SM is the product of the last axis_offset entries in arg_0.getShape().
2512
2513 // deal with any lazy data
2514 // if (arg_0.isLazy()) {arg_0.resolve();}
2515 // if (arg_1.isLazy()) {arg_1.resolve();}
2516 if (arg_0.isLazy() || arg_1.isLazy())
2517 {
2518 DataLazy* c=new DataLazy(arg_0.borrowDataPtr(), arg_1.borrowDataPtr(), PROD, axis_offset,transpose);
2519 return Data(c);
2520 }
2521
2522 // Interpolate if necessary and find an appropriate function space
2523 Data arg_0_Z, arg_1_Z;
2524 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2525 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2526 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2527 arg_1_Z = Data(arg_1);
2528 }
2529 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2530 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2531 arg_0_Z =Data(arg_0);
2532 }
2533 else {
2534 throw DataException("Error - C_GeneralTensorProduct: arguments have incompatible function spaces.");
2535 }
2536 } else {
2537 arg_0_Z = Data(arg_0);
2538 arg_1_Z = Data(arg_1);
2539 }
2540 // Get rank and shape of inputs
2541 int rank0 = arg_0_Z.getDataPointRank();
2542 int rank1 = arg_1_Z.getDataPointRank();
2543 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
2544 const DataTypes::ShapeType& shape1 = arg_1_Z.getDataPointShape();
2545
2546 // Prepare for the loops of the product and verify compatibility of shapes
2547 int start0=0, start1=0;
2548 if (transpose == 0) {}
2549 else if (transpose == 1) { start0 = axis_offset; }
2550 else if (transpose == 2) { start1 = rank1-axis_offset; }
2551 else { throw DataException("C_GeneralTensorProduct: Error - transpose should be 0, 1 or 2"); }
2552
2553
2554 // Adjust the shapes for transpose
2555 DataTypes::ShapeType tmpShape0(rank0); // pre-sizing the vectors rather
2556 DataTypes::ShapeType tmpShape1(rank1); // than using push_back
2557 for (int i=0; i<rank0; i++) { tmpShape0[i]=shape0[(i+start0)%rank0]; }
2558 for (int i=0; i<rank1; i++) { tmpShape1[i]=shape1[(i+start1)%rank1]; }
2559
2560 #if 0
2561 // For debugging: show shape after transpose
2562 char tmp[100];
2563 std::string shapeStr;
2564 shapeStr = "(";
2565 for (int i=0; i<rank0; i++) { sprintf(tmp, "%d,", tmpShape0[i]); shapeStr += tmp; }
2566 shapeStr += ")";
2567 cout << "C_GeneralTensorProduct: Shape of arg0 is " << shapeStr << endl;
2568 shapeStr = "(";
2569 for (int i=0; i<rank1; i++) { sprintf(tmp, "%d,", tmpShape1[i]); shapeStr += tmp; }
2570 shapeStr += ")";
2571 cout << "C_GeneralTensorProduct: Shape of arg1 is " << shapeStr << endl;
2572 #endif
2573
2574 // Prepare for the loops of the product
2575 int SL=1, SM=1, SR=1;
2576 for (int i=0; i<rank0-axis_offset; i++) {
2577 SL *= tmpShape0[i];
2578 }
2579 for (int i=rank0-axis_offset; i<rank0; i++) {
2580 if (tmpShape0[i] != tmpShape1[i-(rank0-axis_offset)]) {
2581 throw DataException("C_GeneralTensorProduct: Error - incompatible shapes");
2582 }
2583 SM *= tmpShape0[i];
2584 }
2585 for (int i=axis_offset; i<rank1; i++) {
2586 SR *= tmpShape1[i];
2587 }
2588
2589 // Define the shape of the output (rank of shape is the sum of the loop ranges below)
2590 DataTypes::ShapeType shape2(rank0+rank1-2*axis_offset);
2591 { // block to limit the scope of out_index
2592 int out_index=0;
2593 for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z
2594 for (int i=axis_offset; i<rank1; i++, ++out_index) { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z
2595 }
2596
2597 // Declare output Data object
2598 Data res;
2599
2600 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2601 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataConstant output
2602 double *ptr_0 = &(arg_0_Z.getDataAtOffset(0));
2603 double *ptr_1 = &(arg_1_Z.getDataAtOffset(0));
2604 double *ptr_2 = &(res.getDataAtOffset(0));
2605 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2606 }
2607 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2608
2609 // Prepare the DataConstant input
2610 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2611 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2612
2613 // Borrow DataTagged input from Data object
2614 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2615 if (tmp_1==0) { throw DataException("GTP_1 Programming error - casting to DataTagged."); }
2616
2617 // Prepare a DataTagged output 2
2618 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataTagged output
2619 res.tag();
2620 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2621 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2622
2623 // Prepare offset into DataConstant
2624 int offset_0 = tmp_0->getPointOffset(0,0);
2625 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2626 // Get the views
2627 // DataArrayView view_1 = tmp_1->getDefaultValue();
2628 // DataArrayView view_2 = tmp_2->getDefaultValue();
2629 // // Get the pointers to the actual data
2630 // double *ptr_1 = &((view_1.getData())[0]);
2631 // double *ptr_2 = &((view_2.getData())[0]);
2632
2633 double *ptr_1 = &(tmp_1->getDefaultValue(0));
2634 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2635
2636
2637 // Compute an MVP for the default
2638 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2639 // Compute an MVP for each tag
2640 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2641 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2642 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2643 tmp_2->addTag(i->first);
2644 // DataArrayView view_1 = tmp_1->getDataPointByTag(i->first);
2645 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2646 // double *ptr_1 = &view_1.getData(0);
2647 // double *ptr_2 = &view_2.getData(0);
2648
2649 double *ptr_1 = &(tmp_1->getDataByTag(i->first,0));
2650 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2651
2652 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2653 }
2654
2655 }
2656 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2657
2658 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2659 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2660 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2661 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2662 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2663 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2664 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2665 int sampleNo_1,dataPointNo_1;
2666 int numSamples_1 = arg_1_Z.getNumSamples();
2667 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2668 int offset_0 = tmp_0->getPointOffset(0,0);
2669 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2670 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2671 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2672 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2673 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2674 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2675 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2676 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2677 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2678 }
2679 }
2680
2681 }
2682 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2683
2684 // Borrow DataTagged input from Data object
2685 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2686 if (tmp_0==0) { throw DataException("GTP_0 Programming error - casting to DataTagged."); }
2687
2688 // Prepare the DataConstant input
2689 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2690 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2691
2692 // Prepare a DataTagged output 2
2693 res = Data(0.0, shape2, arg_0_Z.getFunctionSpace()); // DataTagged output
2694 res.tag();
2695 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2696 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2697
2698 // Prepare offset into DataConstant
2699 int offset_1 = tmp_1->getPointOffset(0,0);
2700 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2701 // Get the views
2702 // DataArrayView view_0 = tmp_0->getDefaultValue();
2703 // DataArrayView view_2 = tmp_2->getDefaultValue();
2704 // // Get the pointers to the actual data
2705 // double *ptr_0 = &((view_0.getData())[0]);
2706 // double *ptr_2 = &((view_2.getData())[0]);
2707
2708 double *ptr_0 = &(tmp_0->getDefaultValue(0));
2709 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2710
2711 // Compute an MVP for the default
2712 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2713 // Compute an MVP for each tag
2714 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2715 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2716 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2717 // tmp_2->addTaggedValue(i->first,tmp_2->getDefaultValue());
2718 // DataArrayView view_0 = tmp_0->getDataPointByTag(i->first);
2719 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2720 // double *ptr_0 = &view_0.getData(0);
2721 // double *ptr_2 = &view_2.getData(0);
2722
2723 tmp_2->addTag(i->first);
2724 double *ptr_0 = &(tmp_0->getDataByTag(i->first,0));
2725 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2726 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2727 }
2728
2729 }
2730 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2731
2732 // Borrow DataTagged input from Data object
2733 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2734 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2735
2736 // Borrow DataTagged input from Data object
2737 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2738 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2739
2740 // Prepare a DataTagged output 2
2741 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace());
2742 res.tag(); // DataTagged output
2743 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2744 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2745
2746 // // Get the views
2747 // DataArrayView view_0 = tmp_0->getDefaultValue();
2748 // DataArrayView view_1 = tmp_1->getDefaultValue();
2749 // DataArrayView view_2 = tmp_2->getDefaultValue();
2750 // // Get the pointers to the actual data
2751 // double *ptr_0 = &((view_0.getData())[0]);
2752 // double *ptr_1 = &((view_1.getData())[0]);
2753 // double *ptr_2 = &((view_2.getData())[0]);
2754
2755 double *ptr_0 = &(tmp_0->getDefaultValue(0));
2756 double *ptr_1 = &(tmp_1->getDefaultValue(0));
2757 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2758
2759
2760 // Compute an MVP for the default
2761 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2762 // Merge the tags
2763 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2764 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2765 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2766 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2767 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2768 }
2769 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2770 tmp_2->addTag(i->first);
2771 }
2772 // Compute an MVP for each tag
2773 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2774 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2775 // DataArrayView view_0 = tmp_0->getDataPointByTag(i->first);
2776 // DataArrayView view_1 = tmp_1->getDataPointByTag(i->first);
2777 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2778 // double *ptr_0 = &view_0.getData(0);
2779 // double *ptr_1 = &view_1.getData(0);
2780 // double *ptr_2 = &view_2.getData(0);
2781
2782 double *ptr_0 = &(tmp_0->getDataByTag(i->first,0));
2783 double *ptr_1 = &(tmp_1->getDataByTag(i->first,0));
2784 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2785
2786 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2787 }
2788
2789 }
2790 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2791
2792 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2793 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2794 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2795 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2796 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2797 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2798 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2799 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2800 int sampleNo_0,dataPointNo_0;
2801 int numSamples_0 = arg_0_Z.getNumSamples();
2802 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2803 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2804 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2805 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2806 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2807 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2808 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2809 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2810 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2811 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2812 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2813 }
2814 }
2815
2816 }
2817 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2818
2819 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2820 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2821 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2822 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2823 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2824 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2825 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2826 int sampleNo_0,dataPointNo_0;
2827 int numSamples_0 = arg_0_Z.getNumSamples();
2828 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2829 int offset_1 = tmp_1->getPointOffset(0,0);
2830 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2831 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2832 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2833 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2834 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2835 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2836 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2837 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2838 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2839 }
2840 }
2841
2842
2843 }
2844 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2845
2846 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2847 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2848 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2849 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2850 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2851 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2852 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2853 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2854 int sampleNo_0,dataPointNo_0;
2855 int numSamples_0 = arg_0_Z.getNumSamples();
2856 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2857 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2858 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2859 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2860 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2861 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2862 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2863 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2864 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2865 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2866 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2867 }
2868 }
2869
2870 }
2871 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2872
2873 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2874 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2875 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2876 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2877 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2878 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2879 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2880 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2881 int sampleNo_0,dataPointNo_0;
2882 int numSamples_0 = arg_0_Z.getNumSamples();
2883 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2884 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2885 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2886 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2887 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2888 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2889 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2890 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2891 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2892 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2893 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2894 }
2895 }
2896
2897 }
2898 else {
2899 throw DataException("Error - C_GeneralTensorProduct: unknown combination of inputs");
2900 }
2901
2902 return res;
2903 }
2904
2905 DataAbstract*
2906 Data::borrowData() const
2907 {
2908 return m_data.get();
2909 }
2910
2911 // Not all that happy about returning a non-const from a const
2912 DataAbstract_ptr
2913 Data::borrowDataPtr() const
2914 {
2915 return m_data;
2916 }
2917
2918 // Not all that happy about returning a non-const from a const
2919 DataReady_ptr
2920 Data::borrowReadyPtr() const
2921 {
2922 DataReady_ptr dr=dynamic_pointer_cast<DataReady>(m_data);
2923 EsysAssert((dr!=0), "Error - casting to DataReady.");
2924 return dr;
2925 }
2926
2927 std::string
2928 Data::toString() const
2929 {
2930 if (!m_data->isEmpty() &&
2931 !m_data->isLazy() &&
2932 getLength()>escriptParams.getInt("TOO_MANY_LINES"))
2933 {
2934 stringstream temp;
2935 temp << "Summary: inf="<< inf_const() << " sup=" << sup_const() << " data points=" << getNumDataPoints();
2936 return temp.str();
2937 }
2938 return m_data->toString();
2939 }
2940
2941
2942
2943 DataTypes::ValueType::const_reference
2944 Data::getDataAtOffset(DataTypes::ValueType::size_type i) const
2945 {
2946 if (isLazy())
2947 {
2948 throw DataException("Programmer error - getDataAtOffset not permitted on lazy data (object is const which prevents resolving).");
2949 }
2950 return getReady()->getDataAtOffset(i);
2951 }
2952
2953
2954 DataTypes::ValueType::reference
2955 Data::getDataAtOffset(DataTypes::ValueType::size_type i)
2956 {
2957 // if (isLazy())
2958 // {
2959 // throw DataException("getDataAtOffset not permitted on lazy data.");
2960 // }
2961 FORCERESOLVE;
2962 return getReady()->getDataAtOffset(i);
2963 }
2964
2965 DataTypes::ValueType::const_reference
2966 Data::getDataPoint(int sampleNo, int dataPointNo) const
2967 {
2968 if (!isReady())
2969 {
2970 throw DataException("Programmer error - getDataPoint() not permitted on Lazy Data (object is const which prevents resolving).");
2971 }
2972 else
2973 {
2974 const DataReady* dr=getReady();
2975 return dr->getDataAtOffset(dr->getPointOffset(sampleNo, dataPointNo));
2976 }
2977 }
2978
2979
2980 DataTypes::ValueType::reference
2981 Data::getDataPoint(int sampleNo, int dataPointNo)
2982 {
2983 FORCERESOLVE;
2984 if (!isReady())
2985 {
2986 throw DataException("Programmer error - getDataPoint() not permitted on Lazy Data.");
2987 }
2988 else
2989 {
2990 DataReady* dr=getReady();
2991 return dr->getDataAtOffset(dr->getPointOffset(sampleNo, dataPointNo));
2992 }
2993 }
2994
2995
2996 /* Member functions specific to the MPI implementation */
2997
2998 void
2999 Data::print()
3000 {
3001 int i,j;
3002
3003 printf( "Data is %dX%d\n", getNumSamples(), getNumDataPointsPerSample() );
3004 for( i=0; i<getNumSamples(); i++ )
3005 {
3006 printf( "[%6d]", i );
3007 for( j=0; j<getNumDataPointsPerSample(); j++ )
3008 printf( "\t%10.7g", (getSampleData(i))[j] );
3009 printf( "\n" );
3010 }
3011 }
3012 void
3013 Data::dump(const std::string fileName) const
3014 {
3015 try
3016 {
3017 if (isLazy())
3018 {
3019 Data temp(*this); // this is to get a non-const object which we can resolve
3020 temp.resolve();
3021 temp.dump(fileName);
3022 }
3023 else
3024 {
3025 return m_data->dump(fileName);
3026 }
3027 }
3028 catch (exception& e)
3029 {
3030 cout << e.what() << endl;
3031 }
3032 }
3033
3034 int
3035 Data::get_MPISize() const
3036 {
3037 int size;
3038 #ifdef PASO_MPI
3039 int error;
3040 error = MPI_Comm_size( get_MPIComm(), &size );
3041 #else
3042 size = 1;
3043 #endif
3044 return size;
3045 }
3046
3047 int
3048 Data::get_MPIRank() const
3049 {
3050 int rank;
3051 #ifdef PASO_MPI
3052 int error;
3053 error = MPI_Comm_rank( get_MPIComm(), &rank );
3054 #else
3055 rank = 0;
3056 #endif
3057 return rank;
3058 }
3059
3060 MPI_Comm
3061 Data::get_MPIComm() const
3062 {
3063 #ifdef PASO_MPI
3064 return MPI_COMM_WORLD;
3065 #else
3066 return -1;
3067 #endif
3068 }
3069
3070

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26