/[escript]/trunk/escript/src/Data.cpp
ViewVC logotype

Contents of /trunk/escript/src/Data.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2066 - (show annotations)
Thu Nov 20 05:31:33 2008 UTC (10 years, 11 months ago) by jfenwick
File size: 86254 byte(s)
Fixed Data::toString to look at the amount of data actually stored rather than the number of points in the domain.

Added support for GeneralTensorProduct to LazyData
1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2008 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 #include "Data.h"
16
17 #include "DataExpanded.h"
18 #include "DataConstant.h"
19 #include "DataTagged.h"
20 #include "DataEmpty.h"
21 #include "DataLazy.h"
22 #include "FunctionSpaceFactory.h"
23 #include "AbstractContinuousDomain.h"
24 #include "UnaryFuncs.h"
25 #include "FunctionSpaceException.h"
26 #include "EscriptParams.h"
27
28 extern "C" {
29 #include "escript/blocktimer.h"
30 }
31
32 #include <fstream>
33 #include <algorithm>
34 #include <vector>
35 #include <functional>
36
37 #include <boost/python/dict.hpp>
38 #include <boost/python/extract.hpp>
39 #include <boost/python/long.hpp>
40
41 using namespace std;
42 using namespace boost::python;
43 using namespace boost;
44 using namespace escript;
45
46 // ensure the current object is not a DataLazy
47 // The idea was that we could add an optional warning whenever a resolve is forced
48 #define FORCERESOLVE if (isLazy()) {resolve();}
49
50 Data::Data()
51 {
52 //
53 // Default data is type DataEmpty
54 DataAbstract* temp=new DataEmpty();
55 m_data=temp->getPtr();
56 m_protected=false;
57 }
58
59 Data::Data(double value,
60 const tuple& shape,
61 const FunctionSpace& what,
62 bool expanded)
63 {
64 DataTypes::ShapeType dataPointShape;
65 for (int i = 0; i < shape.attr("__len__")(); ++i) {
66 dataPointShape.push_back(extract<const int>(shape[i]));
67 }
68
69 int len = DataTypes::noValues(dataPointShape);
70 DataVector temp_data(len,value,len);
71 initialise(temp_data, dataPointShape, what, expanded);
72 m_protected=false;
73 }
74
75 Data::Data(double value,
76 const DataTypes::ShapeType& dataPointShape,
77 const FunctionSpace& what,
78 bool expanded)
79 {
80 int len = DataTypes::noValues(dataPointShape);
81
82 DataVector temp_data(len,value,len);
83 // DataArrayView temp_dataView(temp_data, dataPointShape);
84
85 // initialise(temp_dataView, what, expanded);
86 initialise(temp_data, dataPointShape, what, expanded);
87
88 m_protected=false;
89 }
90
91 Data::Data(const Data& inData)
92 {
93 m_data=inData.m_data;
94 m_protected=inData.isProtected();
95 }
96
97
98 Data::Data(const Data& inData,
99 const DataTypes::RegionType& region)
100 {
101 DataAbstract_ptr dat=inData.m_data;
102 if (inData.isLazy())
103 {
104 dat=inData.m_data->resolve();
105 }
106 else
107 {
108 dat=inData.m_data;
109 }
110 //
111 // Create Data which is a slice of another Data
112 DataAbstract* tmp = dat->getSlice(region);
113 m_data=DataAbstract_ptr(tmp);
114 m_protected=false;
115 }
116
117 Data::Data(const Data& inData,
118 const FunctionSpace& functionspace)
119 {
120 if (inData.isEmpty())
121 {
122 throw DataException("Error - will not interpolate for instances of DataEmpty.");
123 }
124 if (inData.getFunctionSpace()==functionspace) {
125 m_data=inData.m_data;
126 }
127 else
128 {
129
130 if (inData.isConstant()) { // for a constant function, we just need to use the new function space
131 if (!inData.probeInterpolation(functionspace))
132 { // Even though this is constant, we still need to check whether interpolation is allowed
133 throw FunctionSpaceException("Call to probeInterpolation returned false for DataConstant.");
134 }
135 // if the data is not lazy, this will just be a cast to DataReady
136 DataReady_ptr dr=inData.m_data->resolve();
137 DataConstant* dc=new DataConstant(functionspace,inData.m_data->getShape(),dr->getVector());
138 m_data=DataAbstract_ptr(dc);
139 } else {
140 Data tmp(0,inData.getDataPointShape(),functionspace,true);
141 // Note: Must use a reference or pointer to a derived object
142 // in order to get polymorphic behaviour. Shouldn't really
143 // be able to create an instance of AbstractDomain but that was done
144 // as a boost:python work around which may no longer be required.
145 /*const AbstractDomain& inDataDomain=inData.getDomain();*/
146 const_Domain_ptr inDataDomain=inData.getDomain();
147 if (inDataDomain==functionspace.getDomain()) {
148 inDataDomain->interpolateOnDomain(tmp,inData);
149 } else {
150 inDataDomain->interpolateACross(tmp,inData);
151 }
152 m_data=tmp.m_data;
153 }
154 }
155 m_protected=false;
156 }
157
158 Data::Data(DataAbstract* underlyingdata)
159 {
160 // m_data=shared_ptr<DataAbstract>(underlyingdata);
161 m_data=underlyingdata->getPtr();
162 m_protected=false;
163 }
164
165 Data::Data(DataAbstract_ptr underlyingdata)
166 {
167 m_data=underlyingdata;
168 m_protected=false;
169 }
170
171
172 Data::Data(const numeric::array& value,
173 const FunctionSpace& what,
174 bool expanded)
175 {
176 initialise(value,what,expanded);
177 m_protected=false;
178 }
179 /*
180 Data::Data(const DataArrayView& value,
181 const FunctionSpace& what,
182 bool expanded)
183 {
184 initialise(value,what,expanded);
185 m_protected=false;
186 }*/
187
188 Data::Data(const DataTypes::ValueType& value,
189 const DataTypes::ShapeType& shape,
190 const FunctionSpace& what,
191 bool expanded)
192 {
193 initialise(value,shape,what,expanded);
194 m_protected=false;
195 }
196
197
198 Data::Data(const object& value,
199 const FunctionSpace& what,
200 bool expanded)
201 {
202 numeric::array asNumArray(value);
203 initialise(asNumArray,what,expanded);
204 m_protected=false;
205 }
206
207
208 Data::Data(const object& value,
209 const Data& other)
210 {
211 numeric::array asNumArray(value);
212
213 // extract the shape of the numarray
214 DataTypes::ShapeType tempShape=DataTypes::shapeFromNumArray(asNumArray);
215 // /* for (int i=0; i < asNumArray.getrank(); i++) {
216 // tempShape.push_back(extract<int>(asNumArray.getshape()[i]));
217 // }*/
218 // // get the space for the data vector
219 // int len = DataTypes::noValues(tempShape);
220 // DataVector temp_data(len, 0.0, len);
221 // /* DataArrayView temp_dataView(temp_data, tempShape);
222 // temp_dataView.copy(asNumArray);*/
223 // temp_data.copyFromNumArray(asNumArray);
224
225 //
226 // Create DataConstant using the given value and all other parameters
227 // copied from other. If value is a rank 0 object this Data
228 // will assume the point data shape of other.
229
230 if (DataTypes::getRank(tempShape)/*temp_dataView.getRank()*/==0) {
231
232
233 // get the space for the data vector
234 int len1 = DataTypes::noValues(tempShape);
235 DataVector temp_data(len1, 0.0, len1);
236 temp_data.copyFromNumArray(asNumArray);
237
238 int len = DataTypes::noValues(other.getDataPointShape());
239
240 DataVector temp2_data(len, temp_data[0]/*temp_dataView()*/, len);
241 //DataArrayView temp2_dataView(temp2_data, other.getPointDataView().getShape());
242 // initialise(temp2_dataView, other.getFunctionSpace(), false);
243
244 DataConstant* t=new DataConstant(other.getFunctionSpace(),other.getDataPointShape(),temp2_data);
245 // boost::shared_ptr<DataAbstract> sp(t);
246 // m_data=sp;
247 m_data=DataAbstract_ptr(t);
248
249 } else {
250 //
251 // Create a DataConstant with the same sample shape as other
252 // initialise(temp_dataView, other.getFunctionSpace(), false);
253 DataConstant* t=new DataConstant(asNumArray,other.getFunctionSpace());
254 // boost::shared_ptr<DataAbstract> sp(t);
255 // m_data=sp;
256 m_data=DataAbstract_ptr(t);
257 }
258 m_protected=false;
259 }
260
261 Data::~Data()
262 {
263
264 }
265
266
267
268 void
269 Data::initialise(const boost::python::numeric::array& value,
270 const FunctionSpace& what,
271 bool expanded)
272 {
273 //
274 // Construct a Data object of the appropriate type.
275 // Construct the object first as there seems to be a bug which causes
276 // undefined behaviour if an exception is thrown during construction
277 // within the shared_ptr constructor.
278 if (expanded) {
279 DataAbstract* temp=new DataExpanded(value, what);
280 // boost::shared_ptr<DataAbstract> temp_data(temp);
281 // m_data=temp_data;
282 m_data=temp->getPtr();
283 } else {
284 DataAbstract* temp=new DataConstant(value, what);
285 // boost::shared_ptr<DataAbstract> temp_data(temp);
286 // m_data=temp_data;
287 m_data=temp->getPtr();
288 }
289 }
290
291
292 void
293 Data::initialise(const DataTypes::ValueType& value,
294 const DataTypes::ShapeType& shape,
295 const FunctionSpace& what,
296 bool expanded)
297 {
298 //
299 // Construct a Data object of the appropriate type.
300 // Construct the object first as there seems to be a bug which causes
301 // undefined behaviour if an exception is thrown during construction
302 // within the shared_ptr constructor.
303 if (expanded) {
304 DataAbstract* temp=new DataExpanded(what, shape, value);
305 // boost::shared_ptr<DataAbstract> temp_data(temp);
306 // m_data=temp_data;
307 m_data=temp->getPtr();
308 } else {
309 DataAbstract* temp=new DataConstant(what, shape, value);
310 // boost::shared_ptr<DataAbstract> temp_data(temp);
311 // m_data=temp_data;
312 m_data=temp->getPtr();
313 }
314 }
315
316
317 // void
318 // Data::CompareDebug(const Data& rd)
319 // {
320 // using namespace std;
321 // bool mismatch=false;
322 // std::cout << "Comparing left and right" << endl;
323 // const DataTagged* left=dynamic_cast<DataTagged*>(m_data.get());
324 // const DataTagged* right=dynamic_cast<DataTagged*>(rd.m_data.get());
325 //
326 // if (left==0)
327 // {
328 // cout << "left arg is not a DataTagged\n";
329 // return;
330 // }
331 //
332 // if (right==0)
333 // {
334 // cout << "right arg is not a DataTagged\n";
335 // return;
336 // }
337 // cout << "Num elements=" << left->getVector().size() << ":" << right->getVector().size() << std::endl;
338 // cout << "Shapes ";
339 // if (left->getShape()==right->getShape())
340 // {
341 // cout << "ok\n";
342 // }
343 // else
344 // {
345 // cout << "Problem: shapes do not match\n";
346 // mismatch=true;
347 // }
348 // int lim=left->getVector().size();
349 // if (right->getVector().size()) lim=right->getVector().size();
350 // for (int i=0;i<lim;++i)
351 // {
352 // if (left->getVector()[i]!=right->getVector()[i])
353 // {
354 // cout << "[" << i << "] value mismatch " << left->getVector()[i] << ":" << right->getVector()[i] << endl;
355 // mismatch=true;
356 // }
357 // }
358 //
359 // // still need to check the tag map
360 // // also need to watch what is happening to function spaces, are they copied or what?
361 //
362 // const DataTagged::DataMapType& mapleft=left->getTagLookup();
363 // const DataTagged::DataMapType& mapright=right->getTagLookup();
364 //
365 // if (mapleft.size()!=mapright.size())
366 // {
367 // cout << "Maps are different sizes " << mapleft.size() << ":" << mapright.size() << endl;
368 // mismatch=true;
369 // cout << "Left map\n";
370 // DataTagged::DataMapType::const_iterator i,j;
371 // for (i=mapleft.begin();i!=mapleft.end();++i) {
372 // cout << "(" << i->first << "=>" << i->second << ")\n";
373 // }
374 // cout << "Right map\n";
375 // for (i=mapright.begin();i!=mapright.end();++i) {
376 // cout << "(" << i->first << "=>" << i->second << ")\n";
377 // }
378 // cout << "End map\n";
379 //
380 // }
381 //
382 // DataTagged::DataMapType::const_iterator i,j;
383 // for (i=mapleft.begin(),j=mapright.begin();i!=mapleft.end() && j!=mapright.end();++i,++j) {
384 // if ((i->first!=j->first) || (i->second!=j->second))
385 // {
386 // cout << "(" << i->first << "=>" << i->second << ")";
387 // cout << ":(" << j->first << "=>" << j->second << ") ";
388 // mismatch=true;
389 // }
390 // }
391 // if (mismatch)
392 // {
393 // cout << "#Mismatch\n";
394 // }
395 // }
396
397 escriptDataC
398 Data::getDataC()
399 {
400 escriptDataC temp;
401 temp.m_dataPtr=(void*)this;
402 return temp;
403 }
404
405 escriptDataC
406 Data::getDataC() const
407 {
408 escriptDataC temp;
409 temp.m_dataPtr=(void*)this;
410 return temp;
411 }
412
413 const boost::python::tuple
414 Data::getShapeTuple() const
415 {
416 const DataTypes::ShapeType& shape=getDataPointShape();
417 switch(getDataPointRank()) {
418 case 0:
419 return make_tuple();
420 case 1:
421 return make_tuple(long_(shape[0]));
422 case 2:
423 return make_tuple(long_(shape[0]),long_(shape[1]));
424 case 3:
425 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]));
426 case 4:
427 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]),long_(shape[3]));
428 default:
429 throw DataException("Error - illegal Data rank.");
430 }
431 }
432
433
434 // The different name is needed because boost has trouble with overloaded functions.
435 // It can't work out what type the function is based soley on its name.
436 // There are ways to fix this involving creating function pointer variables for each form
437 // but there doesn't seem to be a need given that the methods have the same name from the python point of view
438 Data*
439 Data::copySelf()
440 {
441 DataAbstract* temp=m_data->deepCopy();
442 return new Data(temp);
443 }
444
445 void
446 Data::copy(const Data& other)
447 {
448 DataAbstract* temp=other.m_data->deepCopy();
449 DataAbstract_ptr p=temp->getPtr();
450 m_data=p;
451 }
452
453
454 Data
455 Data::delay()
456 {
457 DataLazy* dl=new DataLazy(m_data);
458 return Data(dl);
459 }
460
461 void
462 Data::delaySelf()
463 {
464 if (!isLazy())
465 {
466 m_data=(new DataLazy(m_data))->getPtr();
467 }
468 }
469
470 void
471 Data::setToZero()
472 {
473 if (isEmpty())
474 {
475 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
476 }
477 m_data->setToZero();
478 }
479
480 void
481 Data::copyWithMask(const Data& other,
482 const Data& mask)
483 {
484 // 1. Interpolate if required so all Datas use the same FS as this
485 // 2. Tag or Expand so that all Data's are the same type
486 // 3. Iterate over the data vectors copying values where mask is >0
487 if (other.isEmpty() || mask.isEmpty())
488 {
489 throw DataException("Error - copyWithMask not permitted using instances of DataEmpty.");
490 }
491 Data other2(other);
492 Data mask2(mask);
493 other2.resolve();
494 mask2.resolve();
495 this->resolve();
496 FunctionSpace myFS=getFunctionSpace();
497 FunctionSpace oFS=other2.getFunctionSpace();
498 FunctionSpace mFS=mask2.getFunctionSpace();
499 if (oFS!=myFS)
500 {
501 if (other2.probeInterpolation(myFS))
502 {
503 other2=other2.interpolate(myFS);
504 }
505 else
506 {
507 throw DataException("Error - copyWithMask: other FunctionSpace is not compatible with this one.");
508 }
509 }
510 if (mFS!=myFS)
511 {
512 if (mask2.probeInterpolation(myFS))
513 {
514 mask2=mask2.interpolate(myFS);
515 }
516 else
517 {
518 throw DataException("Error - copyWithMask: mask FunctionSpace is not compatible with this one.");
519 }
520 }
521 // Ensure that all args have the same type
522 if (this->isExpanded() || mask2.isExpanded() || other2.isExpanded())
523 {
524 this->expand();
525 other2.expand();
526 mask2.expand();
527 }
528 else if (this->isTagged() || mask2.isTagged() || other2.isTagged())
529 {
530 this->tag();
531 other2.tag();
532 mask2.tag();
533 }
534 else if (this->isConstant() && mask2.isConstant() && other2.isConstant())
535 {
536 }
537 else
538 {
539 throw DataException("Error - Unknown DataAbstract passed to copyWithMask.");
540 }
541 // Now we iterate over the elements
542 DataVector& self=getReadyPtr()->getVector();
543 const DataVector& ovec=other2.getReadyPtr()->getVector();
544 const DataVector& mvec=mask2.getReadyPtr()->getVector();
545 if ((self.size()!=ovec.size()) || (self.size()!=mvec.size()))
546 {
547 throw DataException("Error - size mismatch in arguments to copyWithMask.");
548 }
549 size_t num_points=self.size();
550
551 // OPENMP 3.0 allows unsigned loop vars.
552 #if defined(_OPENMP) && (_OPENMP < 200805)
553 long i;
554 #else
555 size_t i;
556 #endif
557 #pragma omp parallel for private(i) schedule(static)
558 for (i=0;i<num_points;++i)
559 {
560 if (mvec[i]>0)
561 {
562 self[i]=ovec[i];
563 }
564 }
565 }
566
567
568
569 bool
570 Data::isExpanded() const
571 {
572 DataExpanded* temp=dynamic_cast<DataExpanded*>(m_data.get());
573 return (temp!=0);
574 }
575
576 bool
577 Data::isTagged() const
578 {
579 DataTagged* temp=dynamic_cast<DataTagged*>(m_data.get());
580 return (temp!=0);
581 }
582
583 bool
584 Data::isEmpty() const
585 {
586 DataEmpty* temp=dynamic_cast<DataEmpty*>(m_data.get());
587 return (temp!=0);
588 }
589
590 bool
591 Data::isConstant() const
592 {
593 DataConstant* temp=dynamic_cast<DataConstant*>(m_data.get());
594 return (temp!=0);
595 }
596
597 bool
598 Data::isLazy() const
599 {
600 return m_data->isLazy();
601 }
602
603 // at the moment this is synonymous with !isLazy() but that could change
604 bool
605 Data::isReady() const
606 {
607 return (dynamic_cast<DataReady*>(m_data.get())!=0);
608 }
609
610
611 void
612 Data::setProtection()
613 {
614 m_protected=true;
615 }
616
617 bool
618 Data::isProtected() const
619 {
620 return m_protected;
621 }
622
623
624
625 void
626 Data::expand()
627 {
628 if (isConstant()) {
629 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
630 DataAbstract* temp=new DataExpanded(*tempDataConst);
631 // shared_ptr<DataAbstract> temp_data(temp);
632 // m_data=temp_data;
633 m_data=temp->getPtr();
634 } else if (isTagged()) {
635 DataTagged* tempDataTag=dynamic_cast<DataTagged*>(m_data.get());
636 DataAbstract* temp=new DataExpanded(*tempDataTag);
637 // shared_ptr<DataAbstract> temp_data(temp);
638 // m_data=temp_data;
639 m_data=temp->getPtr();
640 } else if (isExpanded()) {
641 //
642 // do nothing
643 } else if (isEmpty()) {
644 throw DataException("Error - Expansion of DataEmpty not possible.");
645 } else if (isLazy()) {
646 resolve();
647 expand(); // resolve might not give us expanded data
648 } else {
649 throw DataException("Error - Expansion not implemented for this Data type.");
650 }
651 }
652
653 void
654 Data::tag()
655 {
656 if (isConstant()) {
657 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
658 DataAbstract* temp=new DataTagged(*tempDataConst);
659 // shared_ptr<DataAbstract> temp_data(temp);
660 // m_data=temp_data;
661 m_data=temp->getPtr();
662 } else if (isTagged()) {
663 // do nothing
664 } else if (isExpanded()) {
665 throw DataException("Error - Creating tag data from DataExpanded not possible.");
666 } else if (isEmpty()) {
667 throw DataException("Error - Creating tag data from DataEmpty not possible.");
668 } else if (isLazy()) {
669 DataAbstract_ptr res=m_data->resolve();
670 if (m_data->isExpanded())
671 {
672 throw DataException("Error - data would resolve to DataExpanded, tagging is not possible.");
673 }
674 m_data=res;
675 tag();
676 } else {
677 throw DataException("Error - Tagging not implemented for this Data type.");
678 }
679 }
680
681 void
682 Data::resolve()
683 {
684 if (isLazy())
685 {
686 m_data=m_data->resolve();
687 }
688 }
689
690
691 Data
692 Data::oneOver() const
693 {
694 if (isLazy())
695 {
696 DataLazy* c=new DataLazy(borrowDataPtr(),RECIP);
697 return Data(c);
698 }
699 return C_TensorUnaryOperation(*this, bind1st(divides<double>(),1.));
700 }
701
702 Data
703 Data::wherePositive() const
704 {
705 if (isLazy())
706 {
707 DataLazy* c=new DataLazy(borrowDataPtr(),GZ);
708 return Data(c);
709 }
710 return C_TensorUnaryOperation(*this, bind2nd(greater<double>(),0.0));
711 }
712
713 Data
714 Data::whereNegative() const
715 {
716 if (isLazy())
717 {
718 DataLazy* c=new DataLazy(borrowDataPtr(),LZ);
719 return Data(c);
720 }
721 return C_TensorUnaryOperation(*this, bind2nd(less<double>(),0.0));
722 }
723
724 Data
725 Data::whereNonNegative() const
726 {
727 if (isLazy())
728 {
729 DataLazy* c=new DataLazy(borrowDataPtr(),GEZ);
730 return Data(c);
731 }
732 return C_TensorUnaryOperation(*this, bind2nd(greater_equal<double>(),0.0));
733 }
734
735 Data
736 Data::whereNonPositive() const
737 {
738 if (isLazy())
739 {
740 DataLazy* c=new DataLazy(borrowDataPtr(),LEZ);
741 return Data(c);
742 }
743 return C_TensorUnaryOperation(*this, bind2nd(less_equal<double>(),0.0));
744 }
745
746 Data
747 Data::whereZero(double tol) const
748 {
749 Data dataAbs=abs();
750 return C_TensorUnaryOperation(dataAbs, bind2nd(less_equal<double>(),tol));
751 }
752
753 Data
754 Data::whereNonZero(double tol) const
755 {
756 Data dataAbs=abs();
757 return C_TensorUnaryOperation(dataAbs, bind2nd(greater<double>(),tol));
758 }
759
760 Data
761 Data::interpolate(const FunctionSpace& functionspace) const
762 {
763 return Data(*this,functionspace);
764 }
765
766 bool
767 Data::probeInterpolation(const FunctionSpace& functionspace) const
768 {
769 return getFunctionSpace().probeInterpolation(functionspace);
770 // if (getFunctionSpace()==functionspace) {
771 // return true;
772 // } else {
773 // const_Domain_ptr domain=getDomain();
774 // if (*domain==*functionspace.getDomain()) {
775 // return domain->probeInterpolationOnDomain(getFunctionSpace().getTypeCode(),functionspace.getTypeCode());
776 // } else {
777 // return domain->probeInterpolationACross(getFunctionSpace().getTypeCode(),*(functionspace.getDomain()),functionspace.getTypeCode());
778 // }
779 // }
780 }
781
782 Data
783 Data::gradOn(const FunctionSpace& functionspace) const
784 {
785 if (isEmpty())
786 {
787 throw DataException("Error - operation not permitted on instances of DataEmpty.");
788 }
789 double blocktimer_start = blocktimer_time();
790 if (functionspace.getDomain()!=getDomain())
791 throw DataException("Error - gradient cannot be calculated on different domains.");
792 DataTypes::ShapeType grad_shape=getDataPointShape();
793 grad_shape.push_back(functionspace.getDim());
794 Data out(0.0,grad_shape,functionspace,true);
795 getDomain()->setToGradient(out,*this);
796 blocktimer_increment("grad()", blocktimer_start);
797 return out;
798 }
799
800 Data
801 Data::grad() const
802 {
803 if (isEmpty())
804 {
805 throw DataException("Error - operation not permitted on instances of DataEmpty.");
806 }
807 return gradOn(escript::function(*getDomain()));
808 }
809
810 int
811 Data::getDataPointSize() const
812 {
813 return m_data->getNoValues();
814 }
815
816 DataTypes::ValueType::size_type
817 Data::getLength() const
818 {
819 return m_data->getLength();
820 }
821
822 const
823 boost::python::numeric::array
824 Data:: getValueOfDataPoint(int dataPointNo)
825 {
826 int i, j, k, l;
827
828 FORCERESOLVE;
829
830 //
831 // determine the rank and shape of each data point
832 int dataPointRank = getDataPointRank();
833 const DataTypes::ShapeType& dataPointShape = getDataPointShape();
834
835 //
836 // create the numeric array to be returned
837 boost::python::numeric::array numArray(0.0);
838
839 //
840 // the shape of the returned numeric array will be the same
841 // as that of the data point
842 int arrayRank = dataPointRank;
843 const DataTypes::ShapeType& arrayShape = dataPointShape;
844
845 //
846 // resize the numeric array to the shape just calculated
847 if (arrayRank==0) {
848 numArray.resize(1);
849 }
850 if (arrayRank==1) {
851 numArray.resize(arrayShape[0]);
852 }
853 if (arrayRank==2) {
854 numArray.resize(arrayShape[0],arrayShape[1]);
855 }
856 if (arrayRank==3) {
857 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2]);
858 }
859 if (arrayRank==4) {
860 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2],arrayShape[3]);
861 }
862
863 if (getNumDataPointsPerSample()>0) {
864 int sampleNo = dataPointNo/getNumDataPointsPerSample();
865 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
866 //
867 // Check a valid sample number has been supplied
868 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
869 throw DataException("Error - Data::convertToNumArray: invalid sampleNo.");
870 }
871
872 //
873 // Check a valid data point number has been supplied
874 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
875 throw DataException("Error - Data::convertToNumArray: invalid dataPointNoInSample.");
876 }
877 // TODO: global error handling
878 // create a view of the data if it is stored locally
879 // DataArrayView dataPointView = getDataPoint(sampleNo, dataPointNoInSample);
880 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
881
882
883 switch( dataPointRank ){
884 case 0 :
885 numArray[0] = getDataAtOffset(offset);
886 break;
887 case 1 :
888 for( i=0; i<dataPointShape[0]; i++ )
889 numArray[i]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i));
890 break;
891 case 2 :
892 for( i=0; i<dataPointShape[0]; i++ )
893 for( j=0; j<dataPointShape[1]; j++)
894 numArray[make_tuple(i,j)]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j));
895 break;
896 case 3 :
897 for( i=0; i<dataPointShape[0]; i++ )
898 for( j=0; j<dataPointShape[1]; j++ )
899 for( k=0; k<dataPointShape[2]; k++)
900 numArray[make_tuple(i,j,k)]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k));
901 break;
902 case 4 :
903 for( i=0; i<dataPointShape[0]; i++ )
904 for( j=0; j<dataPointShape[1]; j++ )
905 for( k=0; k<dataPointShape[2]; k++ )
906 for( l=0; l<dataPointShape[3]; l++)
907 numArray[make_tuple(i,j,k,l)]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k,l));
908 break;
909 }
910 }
911 //
912 // return the array
913 return numArray;
914
915 }
916
917 void
918 Data::setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object)
919 {
920 // this will throw if the value cannot be represented
921 boost::python::numeric::array num_array(py_object);
922 setValueOfDataPointToArray(dataPointNo,num_array);
923 }
924
925 void
926 Data::setValueOfDataPointToArray(int dataPointNo, const boost::python::numeric::array& num_array)
927 {
928 if (isProtected()) {
929 throw DataException("Error - attempt to update protected Data object.");
930 }
931 FORCERESOLVE;
932 //
933 // check rank
934 if (static_cast<unsigned int>(num_array.getrank())<getDataPointRank())
935 throw DataException("Rank of numarray does not match Data object rank");
936
937 //
938 // check shape of num_array
939 for (unsigned int i=0; i<getDataPointRank(); i++) {
940 if (extract<int>(num_array.getshape()[i])!=getDataPointShape()[i])
941 throw DataException("Shape of numarray does not match Data object rank");
942 }
943 //
944 // make sure data is expanded:
945 //
946 if (!isExpanded()) {
947 expand();
948 }
949 if (getNumDataPointsPerSample()>0) {
950 int sampleNo = dataPointNo/getNumDataPointsPerSample();
951 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
952 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,num_array);
953 } else {
954 m_data->copyToDataPoint(-1, 0,num_array);
955 }
956 }
957
958 void
959 Data::setValueOfDataPoint(int dataPointNo, const double value)
960 {
961 if (isProtected()) {
962 throw DataException("Error - attempt to update protected Data object.");
963 }
964 //
965 // make sure data is expanded:
966 FORCERESOLVE;
967 if (!isExpanded()) {
968 expand();
969 }
970 if (getNumDataPointsPerSample()>0) {
971 int sampleNo = dataPointNo/getNumDataPointsPerSample();
972 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
973 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,value);
974 } else {
975 m_data->copyToDataPoint(-1, 0,value);
976 }
977 }
978
979 const
980 boost::python::numeric::array
981 Data::getValueOfGlobalDataPoint(int procNo, int dataPointNo)
982 {
983 size_t length=0;
984 int i, j, k, l, pos;
985 FORCERESOLVE;
986 //
987 // determine the rank and shape of each data point
988 int dataPointRank = getDataPointRank();
989 const DataTypes::ShapeType& dataPointShape = getDataPointShape();
990
991 //
992 // create the numeric array to be returned
993 boost::python::numeric::array numArray(0.0);
994
995 //
996 // the shape of the returned numeric array will be the same
997 // as that of the data point
998 int arrayRank = dataPointRank;
999 const DataTypes::ShapeType& arrayShape = dataPointShape;
1000
1001 //
1002 // resize the numeric array to the shape just calculated
1003 if (arrayRank==0) {
1004 numArray.resize(1);
1005 }
1006 if (arrayRank==1) {
1007 numArray.resize(arrayShape[0]);
1008 }
1009 if (arrayRank==2) {
1010 numArray.resize(arrayShape[0],arrayShape[1]);
1011 }
1012 if (arrayRank==3) {
1013 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2]);
1014 }
1015 if (arrayRank==4) {
1016 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2],arrayShape[3]);
1017 }
1018
1019 // added for the MPI communication
1020 length=1;
1021 for( i=0; i<arrayRank; i++ ) length *= arrayShape[i];
1022 double *tmpData = new double[length];
1023
1024 //
1025 // load the values for the data point into the numeric array.
1026
1027 // updated for the MPI case
1028 if( get_MPIRank()==procNo ){
1029 if (getNumDataPointsPerSample()>0) {
1030 int sampleNo = dataPointNo/getNumDataPointsPerSample();
1031 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1032 //
1033 // Check a valid sample number has been supplied
1034 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
1035 throw DataException("Error - Data::convertToNumArray: invalid sampleNo.");
1036 }
1037
1038 //
1039 // Check a valid data point number has been supplied
1040 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
1041 throw DataException("Error - Data::convertToNumArray: invalid dataPointNoInSample.");
1042 }
1043 // TODO: global error handling
1044 // create a view of the data if it is stored locally
1045 //DataArrayView dataPointView = getDataPoint(sampleNo, dataPointNoInSample);
1046 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
1047
1048 // pack the data from the view into tmpData for MPI communication
1049 pos=0;
1050 switch( dataPointRank ){
1051 case 0 :
1052 tmpData[0] = getDataAtOffset(offset);
1053 break;
1054 case 1 :
1055 for( i=0; i<dataPointShape[0]; i++ )
1056 tmpData[i]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i));
1057 break;
1058 case 2 :
1059 for( i=0; i<dataPointShape[0]; i++ )
1060 for( j=0; j<dataPointShape[1]; j++, pos++ )
1061 tmpData[pos]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j));
1062 break;
1063 case 3 :
1064 for( i=0; i<dataPointShape[0]; i++ )
1065 for( j=0; j<dataPointShape[1]; j++ )
1066 for( k=0; k<dataPointShape[2]; k++, pos++ )
1067 tmpData[pos]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k));
1068 break;
1069 case 4 :
1070 for( i=0; i<dataPointShape[0]; i++ )
1071 for( j=0; j<dataPointShape[1]; j++ )
1072 for( k=0; k<dataPointShape[2]; k++ )
1073 for( l=0; l<dataPointShape[3]; l++, pos++ )
1074 tmpData[pos]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k,l));
1075 break;
1076 }
1077 }
1078 }
1079 #ifdef PASO_MPI
1080 // broadcast the data to all other processes
1081 MPI_Bcast( tmpData, length, MPI_DOUBLE, procNo, get_MPIComm() );
1082 #endif
1083
1084 // unpack the data
1085 switch( dataPointRank ){
1086 case 0 :
1087 numArray[0]=tmpData[0];
1088 break;
1089 case 1 :
1090 for( i=0; i<dataPointShape[0]; i++ )
1091 numArray[i]=tmpData[i];
1092 break;
1093 case 2 :
1094 for( i=0; i<dataPointShape[0]; i++ )
1095 for( j=0; j<dataPointShape[1]; j++ )
1096 numArray[make_tuple(i,j)]=tmpData[i+j*dataPointShape[0]];
1097 break;
1098 case 3 :
1099 for( i=0; i<dataPointShape[0]; i++ )
1100 for( j=0; j<dataPointShape[1]; j++ )
1101 for( k=0; k<dataPointShape[2]; k++ )
1102 numArray[make_tuple(i,j,k)]=tmpData[i+dataPointShape[0]*(j*+k*dataPointShape[1])];
1103 break;
1104 case 4 :
1105 for( i=0; i<dataPointShape[0]; i++ )
1106 for( j=0; j<dataPointShape[1]; j++ )
1107 for( k=0; k<dataPointShape[2]; k++ )
1108 for( l=0; l<dataPointShape[3]; l++ )
1109 numArray[make_tuple(i,j,k,l)]=tmpData[i+dataPointShape[0]*(j*+dataPointShape[1]*(k+l*dataPointShape[2]))];
1110 break;
1111 }
1112
1113 delete [] tmpData;
1114 //
1115 // return the loaded array
1116 return numArray;
1117 }
1118
1119
1120 boost::python::numeric::array
1121 Data::integrate_const() const
1122 {
1123 if (isLazy())
1124 {
1125 throw DataException("Error - cannot integrate for constant lazy data.");
1126 }
1127 return integrateWorker();
1128 }
1129
1130 boost::python::numeric::array
1131 Data::integrate()
1132 {
1133 if (isLazy())
1134 {
1135 expand();
1136 }
1137 return integrateWorker();
1138 }
1139
1140
1141
1142 boost::python::numeric::array
1143 Data::integrateWorker() const
1144 {
1145 int index;
1146 int rank = getDataPointRank();
1147 DataTypes::ShapeType shape = getDataPointShape();
1148 int dataPointSize = getDataPointSize();
1149
1150 //
1151 // calculate the integral values
1152 vector<double> integrals(dataPointSize);
1153 vector<double> integrals_local(dataPointSize);
1154 #ifdef PASO_MPI
1155 AbstractContinuousDomain::asAbstractContinuousDomain(*getDomain()).setToIntegrals(integrals_local,*this);
1156 // Global sum: use an array instead of a vector because elements of array are guaranteed to be contiguous in memory
1157 double *tmp = new double[dataPointSize];
1158 double *tmp_local = new double[dataPointSize];
1159 for (int i=0; i<dataPointSize; i++) { tmp_local[i] = integrals_local[i]; }
1160 MPI_Allreduce( &tmp_local[0], &tmp[0], dataPointSize, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD );
1161 for (int i=0; i<dataPointSize; i++) { integrals[i] = tmp[i]; }
1162 delete[] tmp;
1163 delete[] tmp_local;
1164 #else
1165 AbstractContinuousDomain::asAbstractContinuousDomain(*getDomain()).setToIntegrals(integrals,*this);
1166 #endif
1167
1168 //
1169 // create the numeric array to be returned
1170 // and load the array with the integral values
1171 boost::python::numeric::array bp_array(1.0);
1172 if (rank==0) {
1173 bp_array.resize(1);
1174 index = 0;
1175 bp_array[0] = integrals[index];
1176 }
1177 if (rank==1) {
1178 bp_array.resize(shape[0]);
1179 for (int i=0; i<shape[0]; i++) {
1180 index = i;
1181 bp_array[i] = integrals[index];
1182 }
1183 }
1184 if (rank==2) {
1185 bp_array.resize(shape[0],shape[1]);
1186 for (int i=0; i<shape[0]; i++) {
1187 for (int j=0; j<shape[1]; j++) {
1188 index = i + shape[0] * j;
1189 bp_array[make_tuple(i,j)] = integrals[index];
1190 }
1191 }
1192 }
1193 if (rank==3) {
1194 bp_array.resize(shape[0],shape[1],shape[2]);
1195 for (int i=0; i<shape[0]; i++) {
1196 for (int j=0; j<shape[1]; j++) {
1197 for (int k=0; k<shape[2]; k++) {
1198 index = i + shape[0] * ( j + shape[1] * k );
1199 bp_array[make_tuple(i,j,k)] = integrals[index];
1200 }
1201 }
1202 }
1203 }
1204 if (rank==4) {
1205 bp_array.resize(shape[0],shape[1],shape[2],shape[3]);
1206 for (int i=0; i<shape[0]; i++) {
1207 for (int j=0; j<shape[1]; j++) {
1208 for (int k=0; k<shape[2]; k++) {
1209 for (int l=0; l<shape[3]; l++) {
1210 index = i + shape[0] * ( j + shape[1] * ( k + shape[2] * l ) );
1211 bp_array[make_tuple(i,j,k,l)] = integrals[index];
1212 }
1213 }
1214 }
1215 }
1216 }
1217
1218 //
1219 // return the loaded array
1220 return bp_array;
1221 }
1222
1223 Data
1224 Data::sin() const
1225 {
1226 if (isLazy())
1227 {
1228 DataLazy* c=new DataLazy(borrowDataPtr(),SIN);
1229 return Data(c);
1230 }
1231 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sin);
1232 }
1233
1234 Data
1235 Data::cos() const
1236 {
1237 if (isLazy())
1238 {
1239 DataLazy* c=new DataLazy(borrowDataPtr(),COS);
1240 return Data(c);
1241 }
1242 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cos);
1243 }
1244
1245 Data
1246 Data::tan() const
1247 {
1248 if (isLazy())
1249 {
1250 DataLazy* c=new DataLazy(borrowDataPtr(),TAN);
1251 return Data(c);
1252 }
1253 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tan);
1254 }
1255
1256 Data
1257 Data::asin() const
1258 {
1259 if (isLazy())
1260 {
1261 DataLazy* c=new DataLazy(borrowDataPtr(),ASIN);
1262 return Data(c);
1263 }
1264 return C_TensorUnaryOperation<double (*)(double)>(*this, ::asin);
1265 }
1266
1267 Data
1268 Data::acos() const
1269 {
1270 if (isLazy())
1271 {
1272 DataLazy* c=new DataLazy(borrowDataPtr(),ACOS);
1273 return Data(c);
1274 }
1275 return C_TensorUnaryOperation<double (*)(double)>(*this, ::acos);
1276 }
1277
1278
1279 Data
1280 Data::atan() const
1281 {
1282 if (isLazy())
1283 {
1284 DataLazy* c=new DataLazy(borrowDataPtr(),ATAN);
1285 return Data(c);
1286 }
1287 return C_TensorUnaryOperation<double (*)(double)>(*this, ::atan);
1288 }
1289
1290 Data
1291 Data::sinh() const
1292 {
1293 if (isLazy())
1294 {
1295 DataLazy* c=new DataLazy(borrowDataPtr(),SINH);
1296 return Data(c);
1297 }
1298 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sinh);
1299 }
1300
1301 Data
1302 Data::cosh() const
1303 {
1304 if (isLazy())
1305 {
1306 DataLazy* c=new DataLazy(borrowDataPtr(),COSH);
1307 return Data(c);
1308 }
1309 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cosh);
1310 }
1311
1312 Data
1313 Data::tanh() const
1314 {
1315 if (isLazy())
1316 {
1317 DataLazy* c=new DataLazy(borrowDataPtr(),TANH);
1318 return Data(c);
1319 }
1320 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tanh);
1321 }
1322
1323
1324 Data
1325 Data::erf() const
1326 {
1327 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1328 throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");
1329 #else
1330 if (isLazy())
1331 {
1332 DataLazy* c=new DataLazy(borrowDataPtr(),ERF);
1333 return Data(c);
1334 }
1335 return C_TensorUnaryOperation(*this, ::erf);
1336 #endif
1337 }
1338
1339 Data
1340 Data::asinh() const
1341 {
1342 if (isLazy())
1343 {
1344 DataLazy* c=new DataLazy(borrowDataPtr(),ASINH);
1345 return Data(c);
1346 }
1347 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1348 return C_TensorUnaryOperation(*this, escript::asinh_substitute);
1349 #else
1350 return C_TensorUnaryOperation(*this, ::asinh);
1351 #endif
1352 }
1353
1354 Data
1355 Data::acosh() const
1356 {
1357 if (isLazy())
1358 {
1359 DataLazy* c=new DataLazy(borrowDataPtr(),ACOSH);
1360 return Data(c);
1361 }
1362 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1363 return C_TensorUnaryOperation(*this, escript::acosh_substitute);
1364 #else
1365 return C_TensorUnaryOperation(*this, ::acosh);
1366 #endif
1367 }
1368
1369 Data
1370 Data::atanh() const
1371 {
1372 if (isLazy())
1373 {
1374 DataLazy* c=new DataLazy(borrowDataPtr(),ATANH);
1375 return Data(c);
1376 }
1377 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1378 return C_TensorUnaryOperation(*this, escript::atanh_substitute);
1379 #else
1380 return C_TensorUnaryOperation(*this, ::atanh);
1381 #endif
1382 }
1383
1384 Data
1385 Data::log10() const
1386 { if (isLazy())
1387 {
1388 DataLazy* c=new DataLazy(borrowDataPtr(),LOG10);
1389 return Data(c);
1390 }
1391 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log10);
1392 }
1393
1394 Data
1395 Data::log() const
1396 {
1397 if (isLazy())
1398 {
1399 DataLazy* c=new DataLazy(borrowDataPtr(),LOG);
1400 return Data(c);
1401 }
1402 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log);
1403 }
1404
1405 Data
1406 Data::sign() const
1407 {
1408 if (isLazy())
1409 {
1410 DataLazy* c=new DataLazy(borrowDataPtr(),SIGN);
1411 return Data(c);
1412 }
1413 return C_TensorUnaryOperation(*this, escript::fsign);
1414 }
1415
1416 Data
1417 Data::abs() const
1418 {
1419 if (isLazy())
1420 {
1421 DataLazy* c=new DataLazy(borrowDataPtr(),ABS);
1422 return Data(c);
1423 }
1424 return C_TensorUnaryOperation<double (*)(double)>(*this, ::fabs);
1425 }
1426
1427 Data
1428 Data::neg() const
1429 {
1430 if (isLazy())
1431 {
1432 DataLazy* c=new DataLazy(borrowDataPtr(),NEG);
1433 return Data(c);
1434 }
1435 return C_TensorUnaryOperation(*this, negate<double>());
1436 }
1437
1438 Data
1439 Data::pos() const
1440 {
1441 // not doing lazy check here is deliberate.
1442 // since a deep copy of lazy data should be cheap, I'll just let it happen now
1443 Data result;
1444 // perform a deep copy
1445 result.copy(*this);
1446 return result;
1447 }
1448
1449 Data
1450 Data::exp() const
1451 {
1452 if (isLazy())
1453 {
1454 DataLazy* c=new DataLazy(borrowDataPtr(),EXP);
1455 return Data(c);
1456 }
1457 return C_TensorUnaryOperation<double (*)(double)>(*this, ::exp);
1458 }
1459
1460 Data
1461 Data::sqrt() const
1462 {
1463 if (isLazy())
1464 {
1465 DataLazy* c=new DataLazy(borrowDataPtr(),SQRT);
1466 return Data(c);
1467 }
1468 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sqrt);
1469 }
1470
1471 double
1472 Data::Lsup_const() const
1473 {
1474 if (isLazy())
1475 {
1476 throw DataException("Error - cannot compute Lsup for constant lazy data.");
1477 }
1478 return LsupWorker();
1479 }
1480
1481 double
1482 Data::Lsup()
1483 {
1484 if (isLazy())
1485 {
1486 expand();
1487 }
1488 return LsupWorker();
1489 }
1490
1491 double
1492 Data::sup_const() const
1493 {
1494 if (isLazy())
1495 {
1496 throw DataException("Error - cannot compute sup for constant lazy data.");
1497 }
1498 return supWorker();
1499 }
1500
1501 double
1502 Data::sup()
1503 {
1504 if (isLazy())
1505 {
1506 expand();
1507 }
1508 return supWorker();
1509 }
1510
1511 double
1512 Data::inf_const() const
1513 {
1514 if (isLazy())
1515 {
1516 throw DataException("Error - cannot compute inf for constant lazy data.");
1517 }
1518 return infWorker();
1519 }
1520
1521 double
1522 Data::inf()
1523 {
1524 if (isLazy())
1525 {
1526 expand();
1527 }
1528 return infWorker();
1529 }
1530
1531 double
1532 Data::LsupWorker() const
1533 {
1534 double localValue;
1535 //
1536 // set the initial absolute maximum value to zero
1537
1538 AbsMax abs_max_func;
1539 localValue = algorithm(abs_max_func,0);
1540 #ifdef PASO_MPI
1541 double globalValue;
1542 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1543 return globalValue;
1544 #else
1545 return localValue;
1546 #endif
1547 }
1548
1549 double
1550 Data::supWorker() const
1551 {
1552 double localValue;
1553 //
1554 // set the initial maximum value to min possible double
1555 FMax fmax_func;
1556 localValue = algorithm(fmax_func,numeric_limits<double>::max()*-1);
1557 #ifdef PASO_MPI
1558 double globalValue;
1559 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1560 return globalValue;
1561 #else
1562 return localValue;
1563 #endif
1564 }
1565
1566 double
1567 Data::infWorker() const
1568 {
1569 double localValue;
1570 //
1571 // set the initial minimum value to max possible double
1572 FMin fmin_func;
1573 localValue = algorithm(fmin_func,numeric_limits<double>::max());
1574 #ifdef PASO_MPI
1575 double globalValue;
1576 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD );
1577 return globalValue;
1578 #else
1579 return localValue;
1580 #endif
1581 }
1582
1583 /* TODO */
1584 /* global reduction */
1585 Data
1586 Data::maxval() const
1587 {
1588 if (isLazy())
1589 {
1590 Data temp(*this); // to get around the fact that you can't resolve a const Data
1591 temp.resolve();
1592 return temp.maxval();
1593 }
1594 //
1595 // set the initial maximum value to min possible double
1596 FMax fmax_func;
1597 return dp_algorithm(fmax_func,numeric_limits<double>::max()*-1);
1598 }
1599
1600 Data
1601 Data::minval() const
1602 {
1603 if (isLazy())
1604 {
1605 Data temp(*this); // to get around the fact that you can't resolve a const Data
1606 temp.resolve();
1607 return temp.minval();
1608 }
1609 //
1610 // set the initial minimum value to max possible double
1611 FMin fmin_func;
1612 return dp_algorithm(fmin_func,numeric_limits<double>::max());
1613 }
1614
1615 Data
1616 Data::swapaxes(const int axis0, const int axis1) const
1617 {
1618 int axis0_tmp,axis1_tmp;
1619 DataTypes::ShapeType s=getDataPointShape();
1620 DataTypes::ShapeType ev_shape;
1621 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1622 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1623 int rank=getDataPointRank();
1624 if (rank<2) {
1625 throw DataException("Error - Data::swapaxes argument must have at least rank 2.");
1626 }
1627 if (axis0<0 || axis0>rank-1) {
1628 throw DataException("Error - Data::swapaxes: axis0 must be between 0 and rank-1=" + rank-1);
1629 }
1630 if (axis1<0 || axis1>rank-1) {
1631 throw DataException("Error - Data::swapaxes: axis1 must be between 0 and rank-1=" + rank-1);
1632 }
1633 if (axis0 == axis1) {
1634 throw DataException("Error - Data::swapaxes: axis indices must be different.");
1635 }
1636 if (axis0 > axis1) {
1637 axis0_tmp=axis1;
1638 axis1_tmp=axis0;
1639 } else {
1640 axis0_tmp=axis0;
1641 axis1_tmp=axis1;
1642 }
1643 for (int i=0; i<rank; i++) {
1644 if (i == axis0_tmp) {
1645 ev_shape.push_back(s[axis1_tmp]);
1646 } else if (i == axis1_tmp) {
1647 ev_shape.push_back(s[axis0_tmp]);
1648 } else {
1649 ev_shape.push_back(s[i]);
1650 }
1651 }
1652 Data ev(0.,ev_shape,getFunctionSpace());
1653 ev.typeMatchRight(*this);
1654 m_data->swapaxes(ev.m_data.get(), axis0_tmp, axis1_tmp);
1655 return ev;
1656
1657 }
1658
1659 Data
1660 Data::symmetric() const
1661 {
1662 // check input
1663 DataTypes::ShapeType s=getDataPointShape();
1664 if (getDataPointRank()==2) {
1665 if(s[0] != s[1])
1666 throw DataException("Error - Data::symmetric can only be calculated for rank 2 object with equal first and second dimension.");
1667 }
1668 else if (getDataPointRank()==4) {
1669 if(!(s[0] == s[2] && s[1] == s[3]))
1670 throw DataException("Error - Data::symmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1671 }
1672 else {
1673 throw DataException("Error - Data::symmetric can only be calculated for rank 2 or 4 object.");
1674 }
1675 if (isLazy())
1676 {
1677 DataLazy* c=new DataLazy(borrowDataPtr(),SYM);
1678 return Data(c);
1679 }
1680 Data ev(0.,getDataPointShape(),getFunctionSpace());
1681 ev.typeMatchRight(*this);
1682 m_data->symmetric(ev.m_data.get());
1683 return ev;
1684 }
1685
1686 Data
1687 Data::nonsymmetric() const
1688 {
1689 if (isLazy())
1690 {
1691 DataLazy* c=new DataLazy(borrowDataPtr(),NSYM);
1692 return Data(c);
1693 }
1694 // check input
1695 DataTypes::ShapeType s=getDataPointShape();
1696 if (getDataPointRank()==2) {
1697 if(s[0] != s[1])
1698 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 object with equal first and second dimension.");
1699 DataTypes::ShapeType ev_shape;
1700 ev_shape.push_back(s[0]);
1701 ev_shape.push_back(s[1]);
1702 Data ev(0.,ev_shape,getFunctionSpace());
1703 ev.typeMatchRight(*this);
1704 m_data->nonsymmetric(ev.m_data.get());
1705 return ev;
1706 }
1707 else if (getDataPointRank()==4) {
1708 if(!(s[0] == s[2] && s[1] == s[3]))
1709 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1710 DataTypes::ShapeType ev_shape;
1711 ev_shape.push_back(s[0]);
1712 ev_shape.push_back(s[1]);
1713 ev_shape.push_back(s[2]);
1714 ev_shape.push_back(s[3]);
1715 Data ev(0.,ev_shape,getFunctionSpace());
1716 ev.typeMatchRight(*this);
1717 m_data->nonsymmetric(ev.m_data.get());
1718 return ev;
1719 }
1720 else {
1721 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 or 4 object.");
1722 }
1723 }
1724
1725
1726 // Doing a lazy version of this would require some thought.
1727 // First it needs a parameter (which DataLazy doesn't support at the moment).
1728 // (secondly although it does not apply to trace) we can't handle operations which return
1729 // multiple results (like eigenvectors_values) or return values of different shapes to their input
1730 // (like eigenvalues).
1731 Data
1732 Data::trace(int axis_offset) const
1733 {
1734 if (isLazy())
1735 {
1736 Data temp(*this); // to get around the fact that you can't resolve a const Data
1737 temp.resolve();
1738 return temp.trace(axis_offset);
1739 }
1740 DataTypes::ShapeType s=getDataPointShape();
1741 if (getDataPointRank()==2) {
1742 DataTypes::ShapeType ev_shape;
1743 Data ev(0.,ev_shape,getFunctionSpace());
1744 ev.typeMatchRight(*this);
1745 m_data->trace(ev.m_data.get(), axis_offset);
1746 return ev;
1747 }
1748 if (getDataPointRank()==3) {
1749 DataTypes::ShapeType ev_shape;
1750 if (axis_offset==0) {
1751 int s2=s[2];
1752 ev_shape.push_back(s2);
1753 }
1754 else if (axis_offset==1) {
1755 int s0=s[0];
1756 ev_shape.push_back(s0);
1757 }
1758 Data ev(0.,ev_shape,getFunctionSpace());
1759 ev.typeMatchRight(*this);
1760 m_data->trace(ev.m_data.get(), axis_offset);
1761 return ev;
1762 }
1763 if (getDataPointRank()==4) {
1764 DataTypes::ShapeType ev_shape;
1765 if (axis_offset==0) {
1766 ev_shape.push_back(s[2]);
1767 ev_shape.push_back(s[3]);
1768 }
1769 else if (axis_offset==1) {
1770 ev_shape.push_back(s[0]);
1771 ev_shape.push_back(s[3]);
1772 }
1773 else if (axis_offset==2) {
1774 ev_shape.push_back(s[0]);
1775 ev_shape.push_back(s[1]);
1776 }
1777 Data ev(0.,ev_shape,getFunctionSpace());
1778 ev.typeMatchRight(*this);
1779 m_data->trace(ev.m_data.get(), axis_offset);
1780 return ev;
1781 }
1782 else {
1783 throw DataException("Error - Data::trace can only be calculated for rank 2, 3 or 4 object.");
1784 }
1785 }
1786
1787 Data
1788 Data::transpose(int axis_offset) const
1789 {
1790 if (isLazy())
1791 {
1792 Data temp(*this); // to get around the fact that you can't resolve a const Data
1793 temp.resolve();
1794 return temp.transpose(axis_offset);
1795 }
1796 DataTypes::ShapeType s=getDataPointShape();
1797 DataTypes::ShapeType ev_shape;
1798 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1799 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1800 int rank=getDataPointRank();
1801 if (axis_offset<0 || axis_offset>rank) {
1802 throw DataException("Error - Data::transpose must have 0 <= axis_offset <= rank=" + rank);
1803 }
1804 for (int i=0; i<rank; i++) {
1805 int index = (axis_offset+i)%rank;
1806 ev_shape.push_back(s[index]); // Append to new shape
1807 }
1808 Data ev(0.,ev_shape,getFunctionSpace());
1809 ev.typeMatchRight(*this);
1810 m_data->transpose(ev.m_data.get(), axis_offset);
1811 return ev;
1812 }
1813
1814 Data
1815 Data::eigenvalues() const
1816 {
1817 if (isLazy())
1818 {
1819 Data temp(*this); // to get around the fact that you can't resolve a const Data
1820 temp.resolve();
1821 return temp.eigenvalues();
1822 }
1823 // check input
1824 DataTypes::ShapeType s=getDataPointShape();
1825 if (getDataPointRank()!=2)
1826 throw DataException("Error - Data::eigenvalues can only be calculated for rank 2 object.");
1827 if(s[0] != s[1])
1828 throw DataException("Error - Data::eigenvalues can only be calculated for object with equal first and second dimension.");
1829 // create return
1830 DataTypes::ShapeType ev_shape(1,s[0]);
1831 Data ev(0.,ev_shape,getFunctionSpace());
1832 ev.typeMatchRight(*this);
1833 m_data->eigenvalues(ev.m_data.get());
1834 return ev;
1835 }
1836
1837 const boost::python::tuple
1838 Data::eigenvalues_and_eigenvectors(const double tol) const
1839 {
1840 if (isLazy())
1841 {
1842 Data temp(*this); // to get around the fact that you can't resolve a const Data
1843 temp.resolve();
1844 return temp.eigenvalues_and_eigenvectors(tol);
1845 }
1846 DataTypes::ShapeType s=getDataPointShape();
1847 if (getDataPointRank()!=2)
1848 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for rank 2 object.");
1849 if(s[0] != s[1])
1850 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for object with equal first and second dimension.");
1851 // create return
1852 DataTypes::ShapeType ev_shape(1,s[0]);
1853 Data ev(0.,ev_shape,getFunctionSpace());
1854 ev.typeMatchRight(*this);
1855 DataTypes::ShapeType V_shape(2,s[0]);
1856 Data V(0.,V_shape,getFunctionSpace());
1857 V.typeMatchRight(*this);
1858 m_data->eigenvalues_and_eigenvectors(ev.m_data.get(),V.m_data.get(),tol);
1859 return make_tuple(boost::python::object(ev),boost::python::object(V));
1860 }
1861
1862 const boost::python::tuple
1863 Data::minGlobalDataPoint() const
1864 {
1865 // NB: calc_minGlobalDataPoint( had to be split off from minGlobalDataPoint( as boost::make_tuple causes an
1866 // abort (for unknown reasons) if there are openmp directives with it in the
1867 // surrounding function
1868
1869 int DataPointNo;
1870 int ProcNo;
1871 calc_minGlobalDataPoint(ProcNo,DataPointNo);
1872 return make_tuple(ProcNo,DataPointNo);
1873 }
1874
1875 void
1876 Data::calc_minGlobalDataPoint(int& ProcNo,
1877 int& DataPointNo) const
1878 {
1879 if (isLazy())
1880 {
1881 Data temp(*this); // to get around the fact that you can't resolve a const Data
1882 temp.resolve();
1883 return temp.calc_minGlobalDataPoint(ProcNo,DataPointNo);
1884 }
1885 int i,j;
1886 int lowi=0,lowj=0;
1887 double min=numeric_limits<double>::max();
1888
1889 Data temp=minval();
1890
1891 int numSamples=temp.getNumSamples();
1892 int numDPPSample=temp.getNumDataPointsPerSample();
1893
1894 double next,local_min;
1895 int local_lowi=0,local_lowj=0;
1896
1897 #pragma omp parallel private(next,local_min,local_lowi,local_lowj)
1898 {
1899 local_min=min;
1900 #pragma omp for private(i,j) schedule(static)
1901 for (i=0; i<numSamples; i++) {
1902 for (j=0; j<numDPPSample; j++) {
1903 next=temp.getDataAtOffset(temp.getDataOffset(i,j));
1904 if (next<local_min) {
1905 local_min=next;
1906 local_lowi=i;
1907 local_lowj=j;
1908 }
1909 }
1910 }
1911 #pragma omp critical
1912 if (local_min<min) {
1913 min=local_min;
1914 lowi=local_lowi;
1915 lowj=local_lowj;
1916 }
1917 }
1918
1919 #ifdef PASO_MPI
1920 // determine the processor on which the minimum occurs
1921 next = temp.getDataPoint(lowi,lowj);
1922 int lowProc = 0;
1923 double *globalMins = new double[get_MPISize()+1];
1924 int error = MPI_Gather ( &next, 1, MPI_DOUBLE, globalMins, 1, MPI_DOUBLE, 0, get_MPIComm() );
1925
1926 if( get_MPIRank()==0 ){
1927 next = globalMins[lowProc];
1928 for( i=1; i<get_MPISize(); i++ )
1929 if( next>globalMins[i] ){
1930 lowProc = i;
1931 next = globalMins[i];
1932 }
1933 }
1934 MPI_Bcast( &lowProc, 1, MPI_DOUBLE, 0, get_MPIComm() );
1935
1936 delete [] globalMins;
1937 ProcNo = lowProc;
1938 #else
1939 ProcNo = 0;
1940 #endif
1941 DataPointNo = lowj + lowi * numDPPSample;
1942 }
1943
1944 void
1945 Data::saveDX(std::string fileName) const
1946 {
1947 if (isEmpty())
1948 {
1949 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1950 }
1951 if (isLazy())
1952 {
1953 Data temp(*this); // to get around the fact that you can't resolve a const Data
1954 temp.resolve();
1955 temp.saveDX(fileName);
1956 return;
1957 }
1958 boost::python::dict args;
1959 args["data"]=boost::python::object(this);
1960 getDomain()->saveDX(fileName,args);
1961 return;
1962 }
1963
1964 void
1965 Data::saveVTK(std::string fileName) const
1966 {
1967 if (isEmpty())
1968 {
1969 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1970 }
1971 if (isLazy())
1972 {
1973 Data temp(*this); // to get around the fact that you can't resolve a const Data
1974 temp.resolve();
1975 temp.saveVTK(fileName);
1976 return;
1977 }
1978 boost::python::dict args;
1979 args["data"]=boost::python::object(this);
1980 getDomain()->saveVTK(fileName,args);
1981 return;
1982 }
1983
1984 Data&
1985 Data::operator+=(const Data& right)
1986 {
1987 if (isProtected()) {
1988 throw DataException("Error - attempt to update protected Data object.");
1989 }
1990 if (isLazy() || right.isLazy())
1991 {
1992 DataLazy* c=new DataLazy(m_data,right.borrowDataPtr(),ADD); // for lazy + is equivalent to +=
1993 m_data=c->getPtr();
1994 return (*this);
1995 }
1996 else
1997 {
1998 binaryOp(right,plus<double>());
1999 return (*this);
2000 }
2001 }
2002
2003 Data&
2004 Data::operator+=(const boost::python::object& right)
2005 {
2006 if (isProtected()) {
2007 throw DataException("Error - attempt to update protected Data object.");
2008 }
2009 Data tmp(right,getFunctionSpace(),false);
2010 if (isLazy())
2011 {
2012 DataLazy* c=new DataLazy(m_data,tmp.borrowDataPtr(),ADD); // for lazy + is equivalent to +=
2013 m_data=c->getPtr();
2014 return (*this);
2015 }
2016 else
2017 {
2018 binaryOp(tmp,plus<double>());
2019 return (*this);
2020 }
2021 }
2022
2023 // Hmmm, operator= makes a deep copy but the copy constructor does not?
2024 Data&
2025 Data::operator=(const Data& other)
2026 {
2027 copy(other);
2028 return (*this);
2029 }
2030
2031 Data&
2032 Data::operator-=(const Data& right)
2033 {
2034 if (isProtected()) {
2035 throw DataException("Error - attempt to update protected Data object.");
2036 }
2037 if (isLazy() || right.isLazy())
2038 {
2039 DataLazy* c=new DataLazy(m_data,right.borrowDataPtr(),SUB); // for lazy - is equivalent to -=
2040 m_data=c->getPtr();
2041 return (*this);
2042 }
2043 else
2044 {
2045 binaryOp(right,minus<double>());
2046 return (*this);
2047 }
2048 }
2049
2050 Data&
2051 Data::operator-=(const boost::python::object& right)
2052 {
2053 if (isProtected()) {
2054 throw DataException("Error - attempt to update protected Data object.");
2055 }
2056 Data tmp(right,getFunctionSpace(),false);
2057 if (isLazy())
2058 {
2059 DataLazy* c=new DataLazy(m_data,tmp.borrowDataPtr(),SUB); // for lazy - is equivalent to -=
2060 m_data=c->getPtr();
2061 return (*this);
2062 }
2063 else
2064 {
2065 binaryOp(tmp,minus<double>());
2066 return (*this);
2067 }
2068 }
2069
2070 Data&
2071 Data::operator*=(const Data& right)
2072 {
2073 if (isProtected()) {
2074 throw DataException("Error - attempt to update protected Data object.");
2075 }
2076 if (isLazy() || right.isLazy())
2077 {
2078 DataLazy* c=new DataLazy(m_data,right.borrowDataPtr(),MUL); // for lazy * is equivalent to *=
2079 m_data=c->getPtr();
2080 return (*this);
2081 }
2082 else
2083 {
2084 binaryOp(right,multiplies<double>());
2085 return (*this);
2086 }
2087 }
2088
2089 Data&
2090 Data::operator*=(const boost::python::object& right)
2091 {
2092 if (isProtected()) {
2093 throw DataException("Error - attempt to update protected Data object.");
2094 }
2095 Data tmp(right,getFunctionSpace(),false);
2096 if (isLazy())
2097 {
2098 DataLazy* c=new DataLazy(m_data,tmp.borrowDataPtr(),MUL); // for lazy * is equivalent to *=
2099 m_data=c->getPtr();
2100 return (*this);
2101 }
2102 else
2103 {
2104 binaryOp(tmp,multiplies<double>());
2105 return (*this);
2106 }
2107 }
2108
2109 Data&
2110 Data::operator/=(const Data& right)
2111 {
2112 if (isProtected()) {
2113 throw DataException("Error - attempt to update protected Data object.");
2114 }
2115 if (isLazy() || right.isLazy())
2116 {
2117 DataLazy* c=new DataLazy(m_data,right.borrowDataPtr(),DIV); // for lazy / is equivalent to /=
2118 m_data=c->getPtr();
2119 return (*this);
2120 }
2121 else
2122 {
2123 binaryOp(right,divides<double>());
2124 return (*this);
2125 }
2126 }
2127
2128 Data&
2129 Data::operator/=(const boost::python::object& right)
2130 {
2131 if (isProtected()) {
2132 throw DataException("Error - attempt to update protected Data object.");
2133 }
2134 Data tmp(right,getFunctionSpace(),false);
2135 if (isLazy())
2136 {
2137 DataLazy* c=new DataLazy(m_data,tmp.borrowDataPtr(),DIV); // for lazy / is equivalent to /=
2138 m_data=c->getPtr();
2139 return (*this);
2140 }
2141 else
2142 {
2143 binaryOp(tmp,divides<double>());
2144 return (*this);
2145 }
2146 }
2147
2148 Data
2149 Data::rpowO(const boost::python::object& left) const
2150 {
2151 Data left_d(left,*this);
2152 return left_d.powD(*this);
2153 }
2154
2155 Data
2156 Data::powO(const boost::python::object& right) const
2157 {
2158 Data tmp(right,getFunctionSpace(),false);
2159 return powD(tmp);
2160 }
2161
2162 Data
2163 Data::powD(const Data& right) const
2164 {
2165 if (isLazy() || right.isLazy())
2166 {
2167 DataLazy* c=new DataLazy(m_data,right.borrowDataPtr(),POW);
2168 return Data(c);
2169 }
2170 return C_TensorBinaryOperation<double (*)(double, double)>(*this, right, ::pow);
2171 }
2172
2173 //
2174 // NOTE: It is essential to specify the namespace this operator belongs to
2175 Data
2176 escript::operator+(const Data& left, const Data& right)
2177 {
2178 if (left.isLazy() || right.isLazy())
2179 {
2180 DataLazy* c=new DataLazy(left.borrowDataPtr(),right.borrowDataPtr(),ADD);
2181 return Data(c);
2182 }
2183 return C_TensorBinaryOperation(left, right, plus<double>());
2184 }
2185
2186 //
2187 // NOTE: It is essential to specify the namespace this operator belongs to
2188 Data
2189 escript::operator-(const Data& left, const Data& right)
2190 {
2191 if (left.isLazy() || right.isLazy())
2192 {
2193 DataLazy* c=new DataLazy(left.borrowDataPtr(),right.borrowDataPtr(),SUB);
2194 return Data(c);
2195 }
2196 return C_TensorBinaryOperation(left, right, minus<double>());
2197 }
2198
2199 //
2200 // NOTE: It is essential to specify the namespace this operator belongs to
2201 Data
2202 escript::operator*(const Data& left, const Data& right)
2203 {
2204 if (left.isLazy() || right.isLazy())
2205 {
2206 DataLazy* c=new DataLazy(left.borrowDataPtr(),right.borrowDataPtr(),MUL);
2207 return Data(c);
2208 }
2209 return C_TensorBinaryOperation(left, right, multiplies<double>());
2210 }
2211
2212 //
2213 // NOTE: It is essential to specify the namespace this operator belongs to
2214 Data
2215 escript::operator/(const Data& left, const Data& right)
2216 {
2217 if (left.isLazy() || right.isLazy())
2218 {
2219 DataLazy* c=new DataLazy(left.borrowDataPtr(),right.borrowDataPtr(),DIV);
2220 return Data(c);
2221 }
2222 return C_TensorBinaryOperation(left, right, divides<double>());
2223 }
2224
2225 //
2226 // NOTE: It is essential to specify the namespace this operator belongs to
2227 Data
2228 escript::operator+(const Data& left, const boost::python::object& right)
2229 {
2230 if (left.isLazy())
2231 {
2232 DataLazy* c=new DataLazy(left.borrowDataPtr(),Data(right,left.getFunctionSpace(),false).borrowDataPtr(),ADD);
2233 return Data(c);
2234 }
2235 return left+Data(right,left.getFunctionSpace(),false);
2236 }
2237
2238 //
2239 // NOTE: It is essential to specify the namespace this operator belongs to
2240 Data
2241 escript::operator-(const Data& left, const boost::python::object& right)
2242 {
2243 if (left.isLazy())
2244 {
2245 DataLazy* c=new DataLazy(left.borrowDataPtr(),Data(right,left.getFunctionSpace(),false).borrowDataPtr(),SUB);
2246 return Data(c);
2247 }
2248 return left-Data(right,left.getFunctionSpace(),false);
2249 }
2250
2251 //
2252 // NOTE: It is essential to specify the namespace this operator belongs to
2253 Data
2254 escript::operator*(const Data& left, const boost::python::object& right)
2255 {
2256 if (left.isLazy())
2257 {
2258 DataLazy* c=new DataLazy(left.borrowDataPtr(),Data(right,left.getFunctionSpace(),false).borrowDataPtr(),MUL);
2259 return Data(c);
2260 }
2261 return left*Data(right,left.getFunctionSpace(),false);
2262 }
2263
2264 //
2265 // NOTE: It is essential to specify the namespace this operator belongs to
2266 Data
2267 escript::operator/(const Data& left, const boost::python::object& right)
2268 {
2269 if (left.isLazy())
2270 {
2271 DataLazy* c=new DataLazy(left.borrowDataPtr(),Data(right,left.getFunctionSpace(),false).borrowDataPtr(),DIV);
2272 return Data(c);
2273 }
2274 return left/Data(right,left.getFunctionSpace(),false);
2275 }
2276
2277 //
2278 // NOTE: It is essential to specify the namespace this operator belongs to
2279 Data
2280 escript::operator+(const boost::python::object& left, const Data& right)
2281 {
2282 if (right.isLazy())
2283 {
2284 DataLazy* c=new DataLazy(Data(left,right.getFunctionSpace(),false).borrowDataPtr(),right.borrowDataPtr(),ADD);
2285 return Data(c);
2286 }
2287 return Data(left,right.getFunctionSpace(),false)+right;
2288 }
2289
2290 //
2291 // NOTE: It is essential to specify the namespace this operator belongs to
2292 Data
2293 escript::operator-(const boost::python::object& left, const Data& right)
2294 {
2295 if (right.isLazy())
2296 {
2297 DataLazy* c=new DataLazy(Data(left,right.getFunctionSpace(),false).borrowDataPtr(),right.borrowDataPtr(),SUB);
2298 return Data(c);
2299 }
2300 return Data(left,right.getFunctionSpace(),false)-right;
2301 }
2302
2303 //
2304 // NOTE: It is essential to specify the namespace this operator belongs to
2305 Data
2306 escript::operator*(const boost::python::object& left, const Data& right)
2307 {
2308 if (right.isLazy())
2309 {
2310 DataLazy* c=new DataLazy(Data(left,right.getFunctionSpace(),false).borrowDataPtr(),right.borrowDataPtr(),MUL);
2311 return Data(c);
2312 }
2313 return Data(left,right.getFunctionSpace(),false)*right;
2314 }
2315
2316 //
2317 // NOTE: It is essential to specify the namespace this operator belongs to
2318 Data
2319 escript::operator/(const boost::python::object& left, const Data& right)
2320 {
2321 if (right.isLazy())
2322 {
2323 DataLazy* c=new DataLazy(Data(left,right.getFunctionSpace(),false).borrowDataPtr(),right.borrowDataPtr(),DIV);
2324 return Data(c);
2325 }
2326 return Data(left,right.getFunctionSpace(),false)/right;
2327 }
2328
2329
2330 /* TODO */
2331 /* global reduction */
2332 Data
2333 Data::getItem(const boost::python::object& key) const
2334 {
2335
2336 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2337
2338 if (slice_region.size()!=getDataPointRank()) {
2339 throw DataException("Error - slice size does not match Data rank.");
2340 }
2341
2342 return getSlice(slice_region);
2343 }
2344
2345 /* TODO */
2346 /* global reduction */
2347 Data
2348 Data::getSlice(const DataTypes::RegionType& region) const
2349 {
2350 return Data(*this,region);
2351 }
2352
2353 /* TODO */
2354 /* global reduction */
2355 void
2356 Data::setItemO(const boost::python::object& key,
2357 const boost::python::object& value)
2358 {
2359 Data tempData(value,getFunctionSpace());
2360 setItemD(key,tempData);
2361 }
2362
2363 void
2364 Data::setItemD(const boost::python::object& key,
2365 const Data& value)
2366 {
2367 // const DataArrayView& view=getPointDataView();
2368
2369 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2370 if (slice_region.size()!=getDataPointRank()) {
2371 throw DataException("Error - slice size does not match Data rank.");
2372 }
2373 if (getFunctionSpace()!=value.getFunctionSpace()) {
2374 setSlice(Data(value,getFunctionSpace()),slice_region);
2375 } else {
2376 setSlice(value,slice_region);
2377 }
2378 }
2379
2380 void
2381 Data::setSlice(const Data& value,
2382 const DataTypes::RegionType& region)
2383 {
2384 if (isProtected()) {
2385 throw DataException("Error - attempt to update protected Data object.");
2386 }
2387 FORCERESOLVE;
2388 /* if (isLazy())
2389 {
2390 throw DataException("Error - setSlice not permitted on lazy data.");
2391 }*/
2392 Data tempValue(value);
2393 typeMatchLeft(tempValue);
2394 typeMatchRight(tempValue);
2395 getReady()->setSlice(tempValue.m_data.get(),region);
2396 }
2397
2398 void
2399 Data::typeMatchLeft(Data& right) const
2400 {
2401 if (right.isLazy() && !isLazy())
2402 {
2403 right.resolve();
2404 }
2405 if (isExpanded()){
2406 right.expand();
2407 } else if (isTagged()) {
2408 if (right.isConstant()) {
2409 right.tag();
2410 }
2411 }
2412 }
2413
2414 void
2415 Data::typeMatchRight(const Data& right)
2416 {
2417 if (isLazy() && !right.isLazy())
2418 {
2419 resolve();
2420 }
2421 if (isTagged()) {
2422 if (right.isExpanded()) {
2423 expand();
2424 }
2425 } else if (isConstant()) {
2426 if (right.isExpanded()) {
2427 expand();
2428 } else if (right.isTagged()) {
2429 tag();
2430 }
2431 }
2432 }
2433
2434 void
2435 Data::setTaggedValueByName(std::string name,
2436 const boost::python::object& value)
2437 {
2438 if (getFunctionSpace().getDomain()->isValidTagName(name)) {
2439 FORCERESOLVE;
2440 int tagKey=getFunctionSpace().getDomain()->getTag(name);
2441 setTaggedValue(tagKey,value);
2442 }
2443 }
2444 void
2445 Data::setTaggedValue(int tagKey,
2446 const boost::python::object& value)
2447 {
2448 if (isProtected()) {
2449 throw DataException("Error - attempt to update protected Data object.");
2450 }
2451 //
2452 // Ensure underlying data object is of type DataTagged
2453 FORCERESOLVE;
2454 if (isConstant()) tag();
2455 numeric::array asNumArray(value);
2456
2457 // extract the shape of the numarray
2458 DataTypes::ShapeType tempShape;
2459 for (int i=0; i < asNumArray.getrank(); i++) {
2460 tempShape.push_back(extract<int>(asNumArray.getshape()[i]));
2461 }
2462
2463 DataVector temp_data2;
2464 temp_data2.copyFromNumArray(asNumArray);
2465
2466 m_data->setTaggedValue(tagKey,tempShape, temp_data2);
2467 }
2468
2469
2470 void
2471 Data::setTaggedValueFromCPP(int tagKey,
2472 const DataTypes::ShapeType& pointshape,
2473 const DataTypes::ValueType& value,
2474 int dataOffset)
2475 {
2476 if (isProtected()) {
2477 throw DataException("Error - attempt to update protected Data object.");
2478 }
2479 //
2480 // Ensure underlying data object is of type DataTagged
2481 FORCERESOLVE;
2482 if (isConstant()) tag();
2483 //
2484 // Call DataAbstract::setTaggedValue
2485 m_data->setTaggedValue(tagKey,pointshape, value, dataOffset);
2486 }
2487
2488 int
2489 Data::getTagNumber(int dpno)
2490 {
2491 if (isEmpty())
2492 {
2493 throw DataException("Error - operation not permitted on instances of DataEmpty.");
2494 }
2495 return getFunctionSpace().getTagFromDataPointNo(dpno);
2496 }
2497
2498
2499 ostream& escript::operator<<(ostream& o, const Data& data)
2500 {
2501 o << data.toString();
2502 return o;
2503 }
2504
2505 Data
2506 escript::C_GeneralTensorProduct(Data& arg_0,
2507 Data& arg_1,
2508 int axis_offset,
2509 int transpose)
2510 {
2511 // General tensor product: res(SL x SR) = arg_0(SL x SM) * arg_1(SM x SR)
2512 // SM is the product of the last axis_offset entries in arg_0.getShape().
2513
2514 // deal with any lazy data
2515 // if (arg_0.isLazy()) {arg_0.resolve();}
2516 // if (arg_1.isLazy()) {arg_1.resolve();}
2517 if (arg_0.isLazy() || arg_1.isLazy())
2518 {
2519 DataLazy* c=new DataLazy(arg_0.borrowDataPtr(), arg_1.borrowDataPtr(), PROD, axis_offset,transpose);
2520 return Data(c);
2521 }
2522
2523 // Interpolate if necessary and find an appropriate function space
2524 Data arg_0_Z, arg_1_Z;
2525 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2526 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2527 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2528 arg_1_Z = Data(arg_1);
2529 }
2530 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2531 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2532 arg_0_Z =Data(arg_0);
2533 }
2534 else {
2535 throw DataException("Error - C_GeneralTensorProduct: arguments have incompatible function spaces.");
2536 }
2537 } else {
2538 arg_0_Z = Data(arg_0);
2539 arg_1_Z = Data(arg_1);
2540 }
2541 // Get rank and shape of inputs
2542 int rank0 = arg_0_Z.getDataPointRank();
2543 int rank1 = arg_1_Z.getDataPointRank();
2544 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
2545 const DataTypes::ShapeType& shape1 = arg_1_Z.getDataPointShape();
2546
2547 // Prepare for the loops of the product and verify compatibility of shapes
2548 int start0=0, start1=0;
2549 if (transpose == 0) {}
2550 else if (transpose == 1) { start0 = axis_offset; }
2551 else if (transpose == 2) { start1 = rank1-axis_offset; }
2552 else { throw DataException("C_GeneralTensorProduct: Error - transpose should be 0, 1 or 2"); }
2553
2554
2555 // Adjust the shapes for transpose
2556 DataTypes::ShapeType tmpShape0(rank0); // pre-sizing the vectors rather
2557 DataTypes::ShapeType tmpShape1(rank1); // than using push_back
2558 for (int i=0; i<rank0; i++) { tmpShape0[i]=shape0[(i+start0)%rank0]; }
2559 for (int i=0; i<rank1; i++) { tmpShape1[i]=shape1[(i+start1)%rank1]; }
2560
2561 #if 0
2562 // For debugging: show shape after transpose
2563 char tmp[100];
2564 std::string shapeStr;
2565 shapeStr = "(";
2566 for (int i=0; i<rank0; i++) { sprintf(tmp, "%d,", tmpShape0[i]); shapeStr += tmp; }
2567 shapeStr += ")";
2568 cout << "C_GeneralTensorProduct: Shape of arg0 is " << shapeStr << endl;
2569 shapeStr = "(";
2570 for (int i=0; i<rank1; i++) { sprintf(tmp, "%d,", tmpShape1[i]); shapeStr += tmp; }
2571 shapeStr += ")";
2572 cout << "C_GeneralTensorProduct: Shape of arg1 is " << shapeStr << endl;
2573 #endif
2574
2575 // Prepare for the loops of the product
2576 int SL=1, SM=1, SR=1;
2577 for (int i=0; i<rank0-axis_offset; i++) {
2578 SL *= tmpShape0[i];
2579 }
2580 for (int i=rank0-axis_offset; i<rank0; i++) {
2581 if (tmpShape0[i] != tmpShape1[i-(rank0-axis_offset)]) {
2582 throw DataException("C_GeneralTensorProduct: Error - incompatible shapes");
2583 }
2584 SM *= tmpShape0[i];
2585 }
2586 for (int i=axis_offset; i<rank1; i++) {
2587 SR *= tmpShape1[i];
2588 }
2589
2590 // Define the shape of the output (rank of shape is the sum of the loop ranges below)
2591 DataTypes::ShapeType shape2(rank0+rank1-2*axis_offset);
2592 { // block to limit the scope of out_index
2593 int out_index=0;
2594 for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z
2595 for (int i=axis_offset; i<rank1; i++, ++out_index) { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z
2596 }
2597
2598 // Declare output Data object
2599 Data res;
2600
2601 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2602 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataConstant output
2603 double *ptr_0 = &(arg_0_Z.getDataAtOffset(0));
2604 double *ptr_1 = &(arg_1_Z.getDataAtOffset(0));
2605 double *ptr_2 = &(res.getDataAtOffset(0));
2606 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2607 }
2608 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2609
2610 // Prepare the DataConstant input
2611 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2612 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2613
2614 // Borrow DataTagged input from Data object
2615 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2616 if (tmp_1==0) { throw DataException("GTP_1 Programming error - casting to DataTagged."); }
2617
2618 // Prepare a DataTagged output 2
2619 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataTagged output
2620 res.tag();
2621 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2622 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2623
2624 // Prepare offset into DataConstant
2625 int offset_0 = tmp_0->getPointOffset(0,0);
2626 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2627 // Get the views
2628 // DataArrayView view_1 = tmp_1->getDefaultValue();
2629 // DataArrayView view_2 = tmp_2->getDefaultValue();
2630 // // Get the pointers to the actual data
2631 // double *ptr_1 = &((view_1.getData())[0]);
2632 // double *ptr_2 = &((view_2.getData())[0]);
2633
2634 double *ptr_1 = &(tmp_1->getDefaultValue(0));
2635 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2636
2637
2638 // Compute an MVP for the default
2639 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2640 // Compute an MVP for each tag
2641 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2642 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2643 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2644 tmp_2->addTag(i->first);
2645 // DataArrayView view_1 = tmp_1->getDataPointByTag(i->first);
2646 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2647 // double *ptr_1 = &view_1.getData(0);
2648 // double *ptr_2 = &view_2.getData(0);
2649
2650 double *ptr_1 = &(tmp_1->getDataByTag(i->first,0));
2651 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2652
2653 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2654 }
2655
2656 }
2657 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2658
2659 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2660 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2661 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2662 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2663 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2664 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2665 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2666 int sampleNo_1,dataPointNo_1;
2667 int numSamples_1 = arg_1_Z.getNumSamples();
2668 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2669 int offset_0 = tmp_0->getPointOffset(0,0);
2670 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2671 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2672 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2673 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2674 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2675 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2676 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2677 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2678 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2679 }
2680 }
2681
2682 }
2683 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2684
2685 // Borrow DataTagged input from Data object
2686 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2687 if (tmp_0==0) { throw DataException("GTP_0 Programming error - casting to DataTagged."); }
2688
2689 // Prepare the DataConstant input
2690 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2691 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2692
2693 // Prepare a DataTagged output 2
2694 res = Data(0.0, shape2, arg_0_Z.getFunctionSpace()); // DataTagged output
2695 res.tag();
2696 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2697 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2698
2699 // Prepare offset into DataConstant
2700 int offset_1 = tmp_1->getPointOffset(0,0);
2701 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2702 // Get the views
2703 // DataArrayView view_0 = tmp_0->getDefaultValue();
2704 // DataArrayView view_2 = tmp_2->getDefaultValue();
2705 // // Get the pointers to the actual data
2706 // double *ptr_0 = &((view_0.getData())[0]);
2707 // double *ptr_2 = &((view_2.getData())[0]);
2708
2709 double *ptr_0 = &(tmp_0->getDefaultValue(0));
2710 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2711
2712 // Compute an MVP for the default
2713 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2714 // Compute an MVP for each tag
2715 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2716 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2717 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2718 // tmp_2->addTaggedValue(i->first,tmp_2->getDefaultValue());
2719 // DataArrayView view_0 = tmp_0->getDataPointByTag(i->first);
2720 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2721 // double *ptr_0 = &view_0.getData(0);
2722 // double *ptr_2 = &view_2.getData(0);
2723
2724 tmp_2->addTag(i->first);
2725 double *ptr_0 = &(tmp_0->getDataByTag(i->first,0));
2726 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2727 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2728 }
2729
2730 }
2731 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2732
2733 // Borrow DataTagged input from Data object
2734 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2735 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2736
2737 // Borrow DataTagged input from Data object
2738 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2739 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2740
2741 // Prepare a DataTagged output 2
2742 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace());
2743 res.tag(); // DataTagged output
2744 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2745 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2746
2747 // // Get the views
2748 // DataArrayView view_0 = tmp_0->getDefaultValue();
2749 // DataArrayView view_1 = tmp_1->getDefaultValue();
2750 // DataArrayView view_2 = tmp_2->getDefaultValue();
2751 // // Get the pointers to the actual data
2752 // double *ptr_0 = &((view_0.getData())[0]);
2753 // double *ptr_1 = &((view_1.getData())[0]);
2754 // double *ptr_2 = &((view_2.getData())[0]);
2755
2756 double *ptr_0 = &(tmp_0->getDefaultValue(0));
2757 double *ptr_1 = &(tmp_1->getDefaultValue(0));
2758 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2759
2760
2761 // Compute an MVP for the default
2762 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2763 // Merge the tags
2764 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2765 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2766 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2767 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2768 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2769 }
2770 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2771 tmp_2->addTag(i->first);
2772 }
2773 // Compute an MVP for each tag
2774 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2775 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2776 // DataArrayView view_0 = tmp_0->getDataPointByTag(i->first);
2777 // DataArrayView view_1 = tmp_1->getDataPointByTag(i->first);
2778 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2779 // double *ptr_0 = &view_0.getData(0);
2780 // double *ptr_1 = &view_1.getData(0);
2781 // double *ptr_2 = &view_2.getData(0);
2782
2783 double *ptr_0 = &(tmp_0->getDataByTag(i->first,0));
2784 double *ptr_1 = &(tmp_1->getDataByTag(i->first,0));
2785 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2786
2787 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2788 }
2789
2790 }
2791 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2792
2793 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2794 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2795 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2796 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2797 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2798 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2799 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2800 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2801 int sampleNo_0,dataPointNo_0;
2802 int numSamples_0 = arg_0_Z.getNumSamples();
2803 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2804 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2805 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2806 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2807 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2808 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2809 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2810 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2811 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2812 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2813 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2814 }
2815 }
2816
2817 }
2818 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2819
2820 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2821 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2822 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2823 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2824 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2825 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2826 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2827 int sampleNo_0,dataPointNo_0;
2828 int numSamples_0 = arg_0_Z.getNumSamples();
2829 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2830 int offset_1 = tmp_1->getPointOffset(0,0);
2831 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2832 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2833 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2834 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2835 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2836 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2837 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2838 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2839 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2840 }
2841 }
2842
2843
2844 }
2845 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2846
2847 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2848 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2849 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2850 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2851 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2852 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2853 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2854 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2855 int sampleNo_0,dataPointNo_0;
2856 int numSamples_0 = arg_0_Z.getNumSamples();
2857 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2858 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2859 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2860 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2861 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2862 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2863 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2864 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2865 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2866 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2867 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2868 }
2869 }
2870
2871 }
2872 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2873
2874 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2875 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2876 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2877 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2878 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2879 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2880 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2881 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2882 int sampleNo_0,dataPointNo_0;
2883 int numSamples_0 = arg_0_Z.getNumSamples();
2884 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2885 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2886 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2887 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2888 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2889 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2890 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2891 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2892 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2893 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2894 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2895 }
2896 }
2897
2898 }
2899 else {
2900 throw DataException("Error - C_GeneralTensorProduct: unknown combination of inputs");
2901 }
2902
2903 return res;
2904 }
2905
2906 DataAbstract*
2907 Data::borrowData() const
2908 {
2909 return m_data.get();
2910 }
2911
2912 // Not all that happy about returning a non-const from a const
2913 DataAbstract_ptr
2914 Data::borrowDataPtr() const
2915 {
2916 return m_data;
2917 }
2918
2919 // Not all that happy about returning a non-const from a const
2920 DataReady_ptr
2921 Data::borrowReadyPtr() const
2922 {
2923 DataReady_ptr dr=dynamic_pointer_cast<DataReady>(m_data);
2924 EsysAssert((dr!=0), "Error - casting to DataReady.");
2925 return dr;
2926 }
2927
2928 std::string
2929 Data::toString() const
2930 {
2931 if (!m_data->isEmpty() &&
2932 !m_data->isLazy() &&
2933 getLength()>escriptParams.getInt("TOO_MANY_LINES"))
2934 {
2935 stringstream temp;
2936 temp << "Summary: inf="<< inf_const() << " sup=" << sup_const() << " data points=" << getNumDataPoints();
2937 return temp.str();
2938 }
2939 return m_data->toString();
2940 }
2941
2942
2943
2944 DataTypes::ValueType::const_reference
2945 Data::getDataAtOffset(DataTypes::ValueType::size_type i) const
2946 {
2947 if (isLazy())
2948 {
2949 throw DataException("Programmer error - getDataAtOffset not permitted on lazy data (object is const which prevents resolving).");
2950 }
2951 return getReady()->getDataAtOffset(i);
2952 }
2953
2954
2955 DataTypes::ValueType::reference
2956 Data::getDataAtOffset(DataTypes::ValueType::size_type i)
2957 {
2958 // if (isLazy())
2959 // {
2960 // throw DataException("getDataAtOffset not permitted on lazy data.");
2961 // }
2962 FORCERESOLVE;
2963 return getReady()->getDataAtOffset(i);
2964 }
2965
2966 DataTypes::ValueType::const_reference
2967 Data::getDataPoint(int sampleNo, int dataPointNo) const
2968 {
2969 if (!isReady())
2970 {
2971 throw DataException("Programmer error - getDataPoint() not permitted on Lazy Data (object is const which prevents resolving).");
2972 }
2973 else
2974 {
2975 const DataReady* dr=getReady();
2976 return dr->getDataAtOffset(dr->getPointOffset(sampleNo, dataPointNo));
2977 }
2978 }
2979
2980
2981 DataTypes::ValueType::reference
2982 Data::getDataPoint(int sampleNo, int dataPointNo)
2983 {
2984 FORCERESOLVE;
2985 if (!isReady())
2986 {
2987 throw DataException("Programmer error - getDataPoint() not permitted on Lazy Data.");
2988 }
2989 else
2990 {
2991 DataReady* dr=getReady();
2992 return dr->getDataAtOffset(dr->getPointOffset(sampleNo, dataPointNo));
2993 }
2994 }
2995
2996
2997 /* Member functions specific to the MPI implementation */
2998
2999 void
3000 Data::print()
3001 {
3002 int i,j;
3003
3004 printf( "Data is %dX%d\n", getNumSamples(), getNumDataPointsPerSample() );
3005 for( i=0; i<getNumSamples(); i++ )
3006 {
3007 printf( "[%6d]", i );
3008 for( j=0; j<getNumDataPointsPerSample(); j++ )
3009 printf( "\t%10.7g", (getSampleData(i))[j] );
3010 printf( "\n" );
3011 }
3012 }
3013 void
3014 Data::dump(const std::string fileName) const
3015 {
3016 try
3017 {
3018 if (isLazy())
3019 {
3020 Data temp(*this); // this is to get a non-const object which we can resolve
3021 temp.resolve();
3022 temp.dump(fileName);
3023 }
3024 else
3025 {
3026 return m_data->dump(fileName);
3027 }
3028 }
3029 catch (exception& e)
3030 {
3031 cout << e.what() << endl;
3032 }
3033 }
3034
3035 int
3036 Data::get_MPISize() const
3037 {
3038 int size;
3039 #ifdef PASO_MPI
3040 int error;
3041 error = MPI_Comm_size( get_MPIComm(), &size );
3042 #else
3043 size = 1;
3044 #endif
3045 return size;
3046 }
3047
3048 int
3049 Data::get_MPIRank() const
3050 {
3051 int rank;
3052 #ifdef PASO_MPI
3053 int error;
3054 error = MPI_Comm_rank( get_MPIComm(), &rank );
3055 #else
3056 rank = 0;
3057 #endif
3058 return rank;
3059 }
3060
3061 MPI_Comm
3062 Data::get_MPIComm() const
3063 {
3064 #ifdef PASO_MPI
3065 return MPI_COMM_WORLD;
3066 #else
3067 return -1;
3068 #endif
3069 }
3070
3071

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26