/[escript]/trunk/escript/src/Data.cpp
ViewVC logotype

Contents of /trunk/escript/src/Data.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2081 - (show annotations)
Fri Nov 21 01:28:31 2008 UTC (11 years, 2 months ago) by caltinay
File size: 86281 byte(s)
escript/Data: Another fix for parallel var initialization. Also, the (error) return value of MPI_Gather was not used. I applied the same 'hack' as in other places in the file, namely declaring the variable beforehand but still ignoring the return value :-/

1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2008 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 #include "Data.h"
16
17 #include "DataExpanded.h"
18 #include "DataConstant.h"
19 #include "DataTagged.h"
20 #include "DataEmpty.h"
21 #include "DataLazy.h"
22 #include "FunctionSpaceFactory.h"
23 #include "AbstractContinuousDomain.h"
24 #include "UnaryFuncs.h"
25 #include "FunctionSpaceException.h"
26 #include "EscriptParams.h"
27
28 extern "C" {
29 #include "esysUtils/blocktimer.h"
30 }
31
32 #include <fstream>
33 #include <algorithm>
34 #include <vector>
35 #include <functional>
36
37 #include <boost/python/dict.hpp>
38 #include <boost/python/extract.hpp>
39 #include <boost/python/long.hpp>
40
41 using namespace std;
42 using namespace boost::python;
43 using namespace boost;
44 using namespace escript;
45
46 // ensure the current object is not a DataLazy
47 // The idea was that we could add an optional warning whenever a resolve is forced
48 #define FORCERESOLVE if (isLazy()) {resolve();}
49
50 Data::Data()
51 {
52 //
53 // Default data is type DataEmpty
54 DataAbstract* temp=new DataEmpty();
55 m_data=temp->getPtr();
56 m_protected=false;
57 }
58
59 Data::Data(double value,
60 const tuple& shape,
61 const FunctionSpace& what,
62 bool expanded)
63 {
64 DataTypes::ShapeType dataPointShape;
65 for (int i = 0; i < shape.attr("__len__")(); ++i) {
66 dataPointShape.push_back(extract<const int>(shape[i]));
67 }
68
69 int len = DataTypes::noValues(dataPointShape);
70 DataVector temp_data(len,value,len);
71 initialise(temp_data, dataPointShape, what, expanded);
72 m_protected=false;
73 }
74
75 Data::Data(double value,
76 const DataTypes::ShapeType& dataPointShape,
77 const FunctionSpace& what,
78 bool expanded)
79 {
80 int len = DataTypes::noValues(dataPointShape);
81
82 DataVector temp_data(len,value,len);
83 // DataArrayView temp_dataView(temp_data, dataPointShape);
84
85 // initialise(temp_dataView, what, expanded);
86 initialise(temp_data, dataPointShape, what, expanded);
87
88 m_protected=false;
89 }
90
91 Data::Data(const Data& inData)
92 {
93 m_data=inData.m_data;
94 m_protected=inData.isProtected();
95 }
96
97
98 Data::Data(const Data& inData,
99 const DataTypes::RegionType& region)
100 {
101 DataAbstract_ptr dat=inData.m_data;
102 if (inData.isLazy())
103 {
104 dat=inData.m_data->resolve();
105 }
106 else
107 {
108 dat=inData.m_data;
109 }
110 //
111 // Create Data which is a slice of another Data
112 DataAbstract* tmp = dat->getSlice(region);
113 m_data=DataAbstract_ptr(tmp);
114 m_protected=false;
115 }
116
117 Data::Data(const Data& inData,
118 const FunctionSpace& functionspace)
119 {
120 if (inData.isEmpty())
121 {
122 throw DataException("Error - will not interpolate for instances of DataEmpty.");
123 }
124 if (inData.getFunctionSpace()==functionspace) {
125 m_data=inData.m_data;
126 }
127 else
128 {
129
130 if (inData.isConstant()) { // for a constant function, we just need to use the new function space
131 if (!inData.probeInterpolation(functionspace))
132 { // Even though this is constant, we still need to check whether interpolation is allowed
133 throw FunctionSpaceException("Call to probeInterpolation returned false for DataConstant.");
134 }
135 // if the data is not lazy, this will just be a cast to DataReady
136 DataReady_ptr dr=inData.m_data->resolve();
137 DataConstant* dc=new DataConstant(functionspace,inData.m_data->getShape(),dr->getVector());
138 m_data=DataAbstract_ptr(dc);
139 } else {
140 Data tmp(0,inData.getDataPointShape(),functionspace,true);
141 // Note: Must use a reference or pointer to a derived object
142 // in order to get polymorphic behaviour. Shouldn't really
143 // be able to create an instance of AbstractDomain but that was done
144 // as a boost:python work around which may no longer be required.
145 /*const AbstractDomain& inDataDomain=inData.getDomain();*/
146 const_Domain_ptr inDataDomain=inData.getDomain();
147 if (inDataDomain==functionspace.getDomain()) {
148 inDataDomain->interpolateOnDomain(tmp,inData);
149 } else {
150 inDataDomain->interpolateACross(tmp,inData);
151 }
152 m_data=tmp.m_data;
153 }
154 }
155 m_protected=false;
156 }
157
158 Data::Data(DataAbstract* underlyingdata)
159 {
160 // m_data=shared_ptr<DataAbstract>(underlyingdata);
161 m_data=underlyingdata->getPtr();
162 m_protected=false;
163 }
164
165 Data::Data(DataAbstract_ptr underlyingdata)
166 {
167 m_data=underlyingdata;
168 m_protected=false;
169 }
170
171
172 Data::Data(const numeric::array& value,
173 const FunctionSpace& what,
174 bool expanded)
175 {
176 initialise(value,what,expanded);
177 m_protected=false;
178 }
179 /*
180 Data::Data(const DataArrayView& value,
181 const FunctionSpace& what,
182 bool expanded)
183 {
184 initialise(value,what,expanded);
185 m_protected=false;
186 }*/
187
188 Data::Data(const DataTypes::ValueType& value,
189 const DataTypes::ShapeType& shape,
190 const FunctionSpace& what,
191 bool expanded)
192 {
193 initialise(value,shape,what,expanded);
194 m_protected=false;
195 }
196
197
198 Data::Data(const object& value,
199 const FunctionSpace& what,
200 bool expanded)
201 {
202 numeric::array asNumArray(value);
203 initialise(asNumArray,what,expanded);
204 m_protected=false;
205 }
206
207
208 Data::Data(const object& value,
209 const Data& other)
210 {
211 numeric::array asNumArray(value);
212
213 // extract the shape of the numarray
214 DataTypes::ShapeType tempShape=DataTypes::shapeFromNumArray(asNumArray);
215 // /* for (int i=0; i < asNumArray.getrank(); i++) {
216 // tempShape.push_back(extract<int>(asNumArray.getshape()[i]));
217 // }*/
218 // // get the space for the data vector
219 // int len = DataTypes::noValues(tempShape);
220 // DataVector temp_data(len, 0.0, len);
221 // /* DataArrayView temp_dataView(temp_data, tempShape);
222 // temp_dataView.copy(asNumArray);*/
223 // temp_data.copyFromNumArray(asNumArray);
224
225 //
226 // Create DataConstant using the given value and all other parameters
227 // copied from other. If value is a rank 0 object this Data
228 // will assume the point data shape of other.
229
230 if (DataTypes::getRank(tempShape)/*temp_dataView.getRank()*/==0) {
231
232
233 // get the space for the data vector
234 int len1 = DataTypes::noValues(tempShape);
235 DataVector temp_data(len1, 0.0, len1);
236 temp_data.copyFromNumArray(asNumArray);
237
238 int len = DataTypes::noValues(other.getDataPointShape());
239
240 DataVector temp2_data(len, temp_data[0]/*temp_dataView()*/, len);
241 //DataArrayView temp2_dataView(temp2_data, other.getPointDataView().getShape());
242 // initialise(temp2_dataView, other.getFunctionSpace(), false);
243
244 DataConstant* t=new DataConstant(other.getFunctionSpace(),other.getDataPointShape(),temp2_data);
245 // boost::shared_ptr<DataAbstract> sp(t);
246 // m_data=sp;
247 m_data=DataAbstract_ptr(t);
248
249 } else {
250 //
251 // Create a DataConstant with the same sample shape as other
252 // initialise(temp_dataView, other.getFunctionSpace(), false);
253 DataConstant* t=new DataConstant(asNumArray,other.getFunctionSpace());
254 // boost::shared_ptr<DataAbstract> sp(t);
255 // m_data=sp;
256 m_data=DataAbstract_ptr(t);
257 }
258 m_protected=false;
259 }
260
261 Data::~Data()
262 {
263
264 }
265
266
267
268 void
269 Data::initialise(const boost::python::numeric::array& value,
270 const FunctionSpace& what,
271 bool expanded)
272 {
273 //
274 // Construct a Data object of the appropriate type.
275 // Construct the object first as there seems to be a bug which causes
276 // undefined behaviour if an exception is thrown during construction
277 // within the shared_ptr constructor.
278 if (expanded) {
279 DataAbstract* temp=new DataExpanded(value, what);
280 // boost::shared_ptr<DataAbstract> temp_data(temp);
281 // m_data=temp_data;
282 m_data=temp->getPtr();
283 } else {
284 DataAbstract* temp=new DataConstant(value, what);
285 // boost::shared_ptr<DataAbstract> temp_data(temp);
286 // m_data=temp_data;
287 m_data=temp->getPtr();
288 }
289 }
290
291
292 void
293 Data::initialise(const DataTypes::ValueType& value,
294 const DataTypes::ShapeType& shape,
295 const FunctionSpace& what,
296 bool expanded)
297 {
298 //
299 // Construct a Data object of the appropriate type.
300 // Construct the object first as there seems to be a bug which causes
301 // undefined behaviour if an exception is thrown during construction
302 // within the shared_ptr constructor.
303 if (expanded) {
304 DataAbstract* temp=new DataExpanded(what, shape, value);
305 // boost::shared_ptr<DataAbstract> temp_data(temp);
306 // m_data=temp_data;
307 m_data=temp->getPtr();
308 } else {
309 DataAbstract* temp=new DataConstant(what, shape, value);
310 // boost::shared_ptr<DataAbstract> temp_data(temp);
311 // m_data=temp_data;
312 m_data=temp->getPtr();
313 }
314 }
315
316
317 // void
318 // Data::CompareDebug(const Data& rd)
319 // {
320 // using namespace std;
321 // bool mismatch=false;
322 // std::cout << "Comparing left and right" << endl;
323 // const DataTagged* left=dynamic_cast<DataTagged*>(m_data.get());
324 // const DataTagged* right=dynamic_cast<DataTagged*>(rd.m_data.get());
325 //
326 // if (left==0)
327 // {
328 // cout << "left arg is not a DataTagged\n";
329 // return;
330 // }
331 //
332 // if (right==0)
333 // {
334 // cout << "right arg is not a DataTagged\n";
335 // return;
336 // }
337 // cout << "Num elements=" << left->getVector().size() << ":" << right->getVector().size() << std::endl;
338 // cout << "Shapes ";
339 // if (left->getShape()==right->getShape())
340 // {
341 // cout << "ok\n";
342 // }
343 // else
344 // {
345 // cout << "Problem: shapes do not match\n";
346 // mismatch=true;
347 // }
348 // int lim=left->getVector().size();
349 // if (right->getVector().size()) lim=right->getVector().size();
350 // for (int i=0;i<lim;++i)
351 // {
352 // if (left->getVector()[i]!=right->getVector()[i])
353 // {
354 // cout << "[" << i << "] value mismatch " << left->getVector()[i] << ":" << right->getVector()[i] << endl;
355 // mismatch=true;
356 // }
357 // }
358 //
359 // // still need to check the tag map
360 // // also need to watch what is happening to function spaces, are they copied or what?
361 //
362 // const DataTagged::DataMapType& mapleft=left->getTagLookup();
363 // const DataTagged::DataMapType& mapright=right->getTagLookup();
364 //
365 // if (mapleft.size()!=mapright.size())
366 // {
367 // cout << "Maps are different sizes " << mapleft.size() << ":" << mapright.size() << endl;
368 // mismatch=true;
369 // cout << "Left map\n";
370 // DataTagged::DataMapType::const_iterator i,j;
371 // for (i=mapleft.begin();i!=mapleft.end();++i) {
372 // cout << "(" << i->first << "=>" << i->second << ")\n";
373 // }
374 // cout << "Right map\n";
375 // for (i=mapright.begin();i!=mapright.end();++i) {
376 // cout << "(" << i->first << "=>" << i->second << ")\n";
377 // }
378 // cout << "End map\n";
379 //
380 // }
381 //
382 // DataTagged::DataMapType::const_iterator i,j;
383 // for (i=mapleft.begin(),j=mapright.begin();i!=mapleft.end() && j!=mapright.end();++i,++j) {
384 // if ((i->first!=j->first) || (i->second!=j->second))
385 // {
386 // cout << "(" << i->first << "=>" << i->second << ")";
387 // cout << ":(" << j->first << "=>" << j->second << ") ";
388 // mismatch=true;
389 // }
390 // }
391 // if (mismatch)
392 // {
393 // cout << "#Mismatch\n";
394 // }
395 // }
396
397 escriptDataC
398 Data::getDataC()
399 {
400 escriptDataC temp;
401 temp.m_dataPtr=(void*)this;
402 return temp;
403 }
404
405 escriptDataC
406 Data::getDataC() const
407 {
408 escriptDataC temp;
409 temp.m_dataPtr=(void*)this;
410 return temp;
411 }
412
413 const boost::python::tuple
414 Data::getShapeTuple() const
415 {
416 const DataTypes::ShapeType& shape=getDataPointShape();
417 switch(getDataPointRank()) {
418 case 0:
419 return make_tuple();
420 case 1:
421 return make_tuple(long_(shape[0]));
422 case 2:
423 return make_tuple(long_(shape[0]),long_(shape[1]));
424 case 3:
425 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]));
426 case 4:
427 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]),long_(shape[3]));
428 default:
429 throw DataException("Error - illegal Data rank.");
430 }
431 }
432
433
434 // The different name is needed because boost has trouble with overloaded functions.
435 // It can't work out what type the function is based soley on its name.
436 // There are ways to fix this involving creating function pointer variables for each form
437 // but there doesn't seem to be a need given that the methods have the same name from the python point of view
438 Data*
439 Data::copySelf()
440 {
441 DataAbstract* temp=m_data->deepCopy();
442 return new Data(temp);
443 }
444
445 void
446 Data::copy(const Data& other)
447 {
448 DataAbstract* temp=other.m_data->deepCopy();
449 DataAbstract_ptr p=temp->getPtr();
450 m_data=p;
451 }
452
453
454 Data
455 Data::delay()
456 {
457 DataLazy* dl=new DataLazy(m_data);
458 return Data(dl);
459 }
460
461 void
462 Data::delaySelf()
463 {
464 if (!isLazy())
465 {
466 m_data=(new DataLazy(m_data))->getPtr();
467 }
468 }
469
470 void
471 Data::setToZero()
472 {
473 if (isEmpty())
474 {
475 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
476 }
477 m_data->setToZero();
478 }
479
480 void
481 Data::copyWithMask(const Data& other,
482 const Data& mask)
483 {
484 // 1. Interpolate if required so all Datas use the same FS as this
485 // 2. Tag or Expand so that all Data's are the same type
486 // 3. Iterate over the data vectors copying values where mask is >0
487 if (other.isEmpty() || mask.isEmpty())
488 {
489 throw DataException("Error - copyWithMask not permitted using instances of DataEmpty.");
490 }
491 Data other2(other);
492 Data mask2(mask);
493 other2.resolve();
494 mask2.resolve();
495 this->resolve();
496 FunctionSpace myFS=getFunctionSpace();
497 FunctionSpace oFS=other2.getFunctionSpace();
498 FunctionSpace mFS=mask2.getFunctionSpace();
499 if (oFS!=myFS)
500 {
501 if (other2.probeInterpolation(myFS))
502 {
503 other2=other2.interpolate(myFS);
504 }
505 else
506 {
507 throw DataException("Error - copyWithMask: other FunctionSpace is not compatible with this one.");
508 }
509 }
510 if (mFS!=myFS)
511 {
512 if (mask2.probeInterpolation(myFS))
513 {
514 mask2=mask2.interpolate(myFS);
515 }
516 else
517 {
518 throw DataException("Error - copyWithMask: mask FunctionSpace is not compatible with this one.");
519 }
520 }
521 // Ensure that all args have the same type
522 if (this->isExpanded() || mask2.isExpanded() || other2.isExpanded())
523 {
524 this->expand();
525 other2.expand();
526 mask2.expand();
527 }
528 else if (this->isTagged() || mask2.isTagged() || other2.isTagged())
529 {
530 this->tag();
531 other2.tag();
532 mask2.tag();
533 }
534 else if (this->isConstant() && mask2.isConstant() && other2.isConstant())
535 {
536 }
537 else
538 {
539 throw DataException("Error - Unknown DataAbstract passed to copyWithMask.");
540 }
541 // Now we iterate over the elements
542 DataVector& self=getReadyPtr()->getVector();
543 const DataVector& ovec=other2.getReadyPtr()->getVector();
544 const DataVector& mvec=mask2.getReadyPtr()->getVector();
545 if ((self.size()!=ovec.size()) || (self.size()!=mvec.size()))
546 {
547 throw DataException("Error - size mismatch in arguments to copyWithMask.");
548 }
549 size_t num_points=self.size();
550
551 // OPENMP 3.0 allows unsigned loop vars.
552 #if defined(_OPENMP) && (_OPENMP < 200805)
553 long i;
554 #else
555 size_t i;
556 #endif
557 #pragma omp parallel for private(i) schedule(static)
558 for (i=0;i<num_points;++i)
559 {
560 if (mvec[i]>0)
561 {
562 self[i]=ovec[i];
563 }
564 }
565 }
566
567
568
569 bool
570 Data::isExpanded() const
571 {
572 DataExpanded* temp=dynamic_cast<DataExpanded*>(m_data.get());
573 return (temp!=0);
574 }
575
576 bool
577 Data::isTagged() const
578 {
579 DataTagged* temp=dynamic_cast<DataTagged*>(m_data.get());
580 return (temp!=0);
581 }
582
583 bool
584 Data::isEmpty() const
585 {
586 DataEmpty* temp=dynamic_cast<DataEmpty*>(m_data.get());
587 return (temp!=0);
588 }
589
590 bool
591 Data::isConstant() const
592 {
593 DataConstant* temp=dynamic_cast<DataConstant*>(m_data.get());
594 return (temp!=0);
595 }
596
597 bool
598 Data::isLazy() const
599 {
600 return m_data->isLazy();
601 }
602
603 // at the moment this is synonymous with !isLazy() but that could change
604 bool
605 Data::isReady() const
606 {
607 return (dynamic_cast<DataReady*>(m_data.get())!=0);
608 }
609
610
611 void
612 Data::setProtection()
613 {
614 m_protected=true;
615 }
616
617 bool
618 Data::isProtected() const
619 {
620 return m_protected;
621 }
622
623
624
625 void
626 Data::expand()
627 {
628 if (isConstant()) {
629 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
630 DataAbstract* temp=new DataExpanded(*tempDataConst);
631 // shared_ptr<DataAbstract> temp_data(temp);
632 // m_data=temp_data;
633 m_data=temp->getPtr();
634 } else if (isTagged()) {
635 DataTagged* tempDataTag=dynamic_cast<DataTagged*>(m_data.get());
636 DataAbstract* temp=new DataExpanded(*tempDataTag);
637 // shared_ptr<DataAbstract> temp_data(temp);
638 // m_data=temp_data;
639 m_data=temp->getPtr();
640 } else if (isExpanded()) {
641 //
642 // do nothing
643 } else if (isEmpty()) {
644 throw DataException("Error - Expansion of DataEmpty not possible.");
645 } else if (isLazy()) {
646 resolve();
647 expand(); // resolve might not give us expanded data
648 } else {
649 throw DataException("Error - Expansion not implemented for this Data type.");
650 }
651 }
652
653 void
654 Data::tag()
655 {
656 if (isConstant()) {
657 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
658 DataAbstract* temp=new DataTagged(*tempDataConst);
659 // shared_ptr<DataAbstract> temp_data(temp);
660 // m_data=temp_data;
661 m_data=temp->getPtr();
662 } else if (isTagged()) {
663 // do nothing
664 } else if (isExpanded()) {
665 throw DataException("Error - Creating tag data from DataExpanded not possible.");
666 } else if (isEmpty()) {
667 throw DataException("Error - Creating tag data from DataEmpty not possible.");
668 } else if (isLazy()) {
669 DataAbstract_ptr res=m_data->resolve();
670 if (m_data->isExpanded())
671 {
672 throw DataException("Error - data would resolve to DataExpanded, tagging is not possible.");
673 }
674 m_data=res;
675 tag();
676 } else {
677 throw DataException("Error - Tagging not implemented for this Data type.");
678 }
679 }
680
681 void
682 Data::resolve()
683 {
684 if (isLazy())
685 {
686 m_data=m_data->resolve();
687 }
688 }
689
690
691 Data
692 Data::oneOver() const
693 {
694 if (isLazy())
695 {
696 DataLazy* c=new DataLazy(borrowDataPtr(),RECIP);
697 return Data(c);
698 }
699 return C_TensorUnaryOperation(*this, bind1st(divides<double>(),1.));
700 }
701
702 Data
703 Data::wherePositive() const
704 {
705 if (isLazy())
706 {
707 DataLazy* c=new DataLazy(borrowDataPtr(),GZ);
708 return Data(c);
709 }
710 return C_TensorUnaryOperation(*this, bind2nd(greater<double>(),0.0));
711 }
712
713 Data
714 Data::whereNegative() const
715 {
716 if (isLazy())
717 {
718 DataLazy* c=new DataLazy(borrowDataPtr(),LZ);
719 return Data(c);
720 }
721 return C_TensorUnaryOperation(*this, bind2nd(less<double>(),0.0));
722 }
723
724 Data
725 Data::whereNonNegative() const
726 {
727 if (isLazy())
728 {
729 DataLazy* c=new DataLazy(borrowDataPtr(),GEZ);
730 return Data(c);
731 }
732 return C_TensorUnaryOperation(*this, bind2nd(greater_equal<double>(),0.0));
733 }
734
735 Data
736 Data::whereNonPositive() const
737 {
738 if (isLazy())
739 {
740 DataLazy* c=new DataLazy(borrowDataPtr(),LEZ);
741 return Data(c);
742 }
743 return C_TensorUnaryOperation(*this, bind2nd(less_equal<double>(),0.0));
744 }
745
746 Data
747 Data::whereZero(double tol) const
748 {
749 Data dataAbs=abs();
750 return C_TensorUnaryOperation(dataAbs, bind2nd(less_equal<double>(),tol));
751 }
752
753 Data
754 Data::whereNonZero(double tol) const
755 {
756 Data dataAbs=abs();
757 return C_TensorUnaryOperation(dataAbs, bind2nd(greater<double>(),tol));
758 }
759
760 Data
761 Data::interpolate(const FunctionSpace& functionspace) const
762 {
763 return Data(*this,functionspace);
764 }
765
766 bool
767 Data::probeInterpolation(const FunctionSpace& functionspace) const
768 {
769 return getFunctionSpace().probeInterpolation(functionspace);
770 // if (getFunctionSpace()==functionspace) {
771 // return true;
772 // } else {
773 // const_Domain_ptr domain=getDomain();
774 // if (*domain==*functionspace.getDomain()) {
775 // return domain->probeInterpolationOnDomain(getFunctionSpace().getTypeCode(),functionspace.getTypeCode());
776 // } else {
777 // return domain->probeInterpolationACross(getFunctionSpace().getTypeCode(),*(functionspace.getDomain()),functionspace.getTypeCode());
778 // }
779 // }
780 }
781
782 Data
783 Data::gradOn(const FunctionSpace& functionspace) const
784 {
785 if (isEmpty())
786 {
787 throw DataException("Error - operation not permitted on instances of DataEmpty.");
788 }
789 double blocktimer_start = blocktimer_time();
790 if (functionspace.getDomain()!=getDomain())
791 throw DataException("Error - gradient cannot be calculated on different domains.");
792 DataTypes::ShapeType grad_shape=getDataPointShape();
793 grad_shape.push_back(functionspace.getDim());
794 Data out(0.0,grad_shape,functionspace,true);
795 getDomain()->setToGradient(out,*this);
796 blocktimer_increment("grad()", blocktimer_start);
797 return out;
798 }
799
800 Data
801 Data::grad() const
802 {
803 if (isEmpty())
804 {
805 throw DataException("Error - operation not permitted on instances of DataEmpty.");
806 }
807 return gradOn(escript::function(*getDomain()));
808 }
809
810 int
811 Data::getDataPointSize() const
812 {
813 return m_data->getNoValues();
814 }
815
816 DataTypes::ValueType::size_type
817 Data::getLength() const
818 {
819 return m_data->getLength();
820 }
821
822 const
823 boost::python::numeric::array
824 Data:: getValueOfDataPoint(int dataPointNo)
825 {
826 int i, j, k, l;
827
828 FORCERESOLVE;
829
830 //
831 // determine the rank and shape of each data point
832 int dataPointRank = getDataPointRank();
833 const DataTypes::ShapeType& dataPointShape = getDataPointShape();
834
835 //
836 // create the numeric array to be returned
837 boost::python::numeric::array numArray(0.0);
838
839 //
840 // the shape of the returned numeric array will be the same
841 // as that of the data point
842 int arrayRank = dataPointRank;
843 const DataTypes::ShapeType& arrayShape = dataPointShape;
844
845 //
846 // resize the numeric array to the shape just calculated
847 if (arrayRank==0) {
848 numArray.resize(1);
849 }
850 if (arrayRank==1) {
851 numArray.resize(arrayShape[0]);
852 }
853 if (arrayRank==2) {
854 numArray.resize(arrayShape[0],arrayShape[1]);
855 }
856 if (arrayRank==3) {
857 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2]);
858 }
859 if (arrayRank==4) {
860 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2],arrayShape[3]);
861 }
862
863 if (getNumDataPointsPerSample()>0) {
864 int sampleNo = dataPointNo/getNumDataPointsPerSample();
865 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
866 //
867 // Check a valid sample number has been supplied
868 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
869 throw DataException("Error - Data::convertToNumArray: invalid sampleNo.");
870 }
871
872 //
873 // Check a valid data point number has been supplied
874 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
875 throw DataException("Error - Data::convertToNumArray: invalid dataPointNoInSample.");
876 }
877 // TODO: global error handling
878 // create a view of the data if it is stored locally
879 // DataArrayView dataPointView = getDataPoint(sampleNo, dataPointNoInSample);
880 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
881
882
883 switch( dataPointRank ){
884 case 0 :
885 numArray[0] = getDataAtOffset(offset);
886 break;
887 case 1 :
888 for( i=0; i<dataPointShape[0]; i++ )
889 numArray[i]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i));
890 break;
891 case 2 :
892 for( i=0; i<dataPointShape[0]; i++ )
893 for( j=0; j<dataPointShape[1]; j++)
894 numArray[make_tuple(i,j)]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j));
895 break;
896 case 3 :
897 for( i=0; i<dataPointShape[0]; i++ )
898 for( j=0; j<dataPointShape[1]; j++ )
899 for( k=0; k<dataPointShape[2]; k++)
900 numArray[make_tuple(i,j,k)]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k));
901 break;
902 case 4 :
903 for( i=0; i<dataPointShape[0]; i++ )
904 for( j=0; j<dataPointShape[1]; j++ )
905 for( k=0; k<dataPointShape[2]; k++ )
906 for( l=0; l<dataPointShape[3]; l++)
907 numArray[make_tuple(i,j,k,l)]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k,l));
908 break;
909 }
910 }
911 //
912 // return the array
913 return numArray;
914
915 }
916
917 void
918 Data::setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object)
919 {
920 // this will throw if the value cannot be represented
921 boost::python::numeric::array num_array(py_object);
922 setValueOfDataPointToArray(dataPointNo,num_array);
923 }
924
925 void
926 Data::setValueOfDataPointToArray(int dataPointNo, const boost::python::numeric::array& num_array)
927 {
928 if (isProtected()) {
929 throw DataException("Error - attempt to update protected Data object.");
930 }
931 FORCERESOLVE;
932 //
933 // check rank
934 if (static_cast<unsigned int>(num_array.getrank())<getDataPointRank())
935 throw DataException("Rank of numarray does not match Data object rank");
936
937 //
938 // check shape of num_array
939 for (unsigned int i=0; i<getDataPointRank(); i++) {
940 if (extract<int>(num_array.getshape()[i])!=getDataPointShape()[i])
941 throw DataException("Shape of numarray does not match Data object rank");
942 }
943 //
944 // make sure data is expanded:
945 //
946 if (!isExpanded()) {
947 expand();
948 }
949 if (getNumDataPointsPerSample()>0) {
950 int sampleNo = dataPointNo/getNumDataPointsPerSample();
951 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
952 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,num_array);
953 } else {
954 m_data->copyToDataPoint(-1, 0,num_array);
955 }
956 }
957
958 void
959 Data::setValueOfDataPoint(int dataPointNo, const double value)
960 {
961 if (isProtected()) {
962 throw DataException("Error - attempt to update protected Data object.");
963 }
964 //
965 // make sure data is expanded:
966 FORCERESOLVE;
967 if (!isExpanded()) {
968 expand();
969 }
970 if (getNumDataPointsPerSample()>0) {
971 int sampleNo = dataPointNo/getNumDataPointsPerSample();
972 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
973 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,value);
974 } else {
975 m_data->copyToDataPoint(-1, 0,value);
976 }
977 }
978
979 const
980 boost::python::numeric::array
981 Data::getValueOfGlobalDataPoint(int procNo, int dataPointNo)
982 {
983 size_t length=0;
984 int i, j, k, l, pos;
985 FORCERESOLVE;
986 //
987 // determine the rank and shape of each data point
988 int dataPointRank = getDataPointRank();
989 const DataTypes::ShapeType& dataPointShape = getDataPointShape();
990
991 //
992 // create the numeric array to be returned
993 boost::python::numeric::array numArray(0.0);
994
995 //
996 // the shape of the returned numeric array will be the same
997 // as that of the data point
998 int arrayRank = dataPointRank;
999 const DataTypes::ShapeType& arrayShape = dataPointShape;
1000
1001 //
1002 // resize the numeric array to the shape just calculated
1003 if (arrayRank==0) {
1004 numArray.resize(1);
1005 }
1006 if (arrayRank==1) {
1007 numArray.resize(arrayShape[0]);
1008 }
1009 if (arrayRank==2) {
1010 numArray.resize(arrayShape[0],arrayShape[1]);
1011 }
1012 if (arrayRank==3) {
1013 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2]);
1014 }
1015 if (arrayRank==4) {
1016 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2],arrayShape[3]);
1017 }
1018
1019 // added for the MPI communication
1020 length=1;
1021 for( i=0; i<arrayRank; i++ ) length *= arrayShape[i];
1022 double *tmpData = new double[length];
1023
1024 //
1025 // load the values for the data point into the numeric array.
1026
1027 // updated for the MPI case
1028 if( get_MPIRank()==procNo ){
1029 if (getNumDataPointsPerSample()>0) {
1030 int sampleNo = dataPointNo/getNumDataPointsPerSample();
1031 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1032 //
1033 // Check a valid sample number has been supplied
1034 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
1035 throw DataException("Error - Data::convertToNumArray: invalid sampleNo.");
1036 }
1037
1038 //
1039 // Check a valid data point number has been supplied
1040 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
1041 throw DataException("Error - Data::convertToNumArray: invalid dataPointNoInSample.");
1042 }
1043 // TODO: global error handling
1044 // create a view of the data if it is stored locally
1045 //DataArrayView dataPointView = getDataPoint(sampleNo, dataPointNoInSample);
1046 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
1047
1048 // pack the data from the view into tmpData for MPI communication
1049 pos=0;
1050 switch( dataPointRank ){
1051 case 0 :
1052 tmpData[0] = getDataAtOffset(offset);
1053 break;
1054 case 1 :
1055 for( i=0; i<dataPointShape[0]; i++ )
1056 tmpData[i]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i));
1057 break;
1058 case 2 :
1059 for( i=0; i<dataPointShape[0]; i++ )
1060 for( j=0; j<dataPointShape[1]; j++, pos++ )
1061 tmpData[pos]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j));
1062 break;
1063 case 3 :
1064 for( i=0; i<dataPointShape[0]; i++ )
1065 for( j=0; j<dataPointShape[1]; j++ )
1066 for( k=0; k<dataPointShape[2]; k++, pos++ )
1067 tmpData[pos]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k));
1068 break;
1069 case 4 :
1070 for( i=0; i<dataPointShape[0]; i++ )
1071 for( j=0; j<dataPointShape[1]; j++ )
1072 for( k=0; k<dataPointShape[2]; k++ )
1073 for( l=0; l<dataPointShape[3]; l++, pos++ )
1074 tmpData[pos]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k,l));
1075 break;
1076 }
1077 }
1078 }
1079 #ifdef PASO_MPI
1080 // broadcast the data to all other processes
1081 MPI_Bcast( tmpData, length, MPI_DOUBLE, procNo, get_MPIComm() );
1082 #endif
1083
1084 // unpack the data
1085 switch( dataPointRank ){
1086 case 0 :
1087 numArray[0]=tmpData[0];
1088 break;
1089 case 1 :
1090 for( i=0; i<dataPointShape[0]; i++ )
1091 numArray[i]=tmpData[i];
1092 break;
1093 case 2 :
1094 for( i=0; i<dataPointShape[0]; i++ )
1095 for( j=0; j<dataPointShape[1]; j++ )
1096 numArray[make_tuple(i,j)]=tmpData[i+j*dataPointShape[0]];
1097 break;
1098 case 3 :
1099 for( i=0; i<dataPointShape[0]; i++ )
1100 for( j=0; j<dataPointShape[1]; j++ )
1101 for( k=0; k<dataPointShape[2]; k++ )
1102 numArray[make_tuple(i,j,k)]=tmpData[i+dataPointShape[0]*(j*+k*dataPointShape[1])];
1103 break;
1104 case 4 :
1105 for( i=0; i<dataPointShape[0]; i++ )
1106 for( j=0; j<dataPointShape[1]; j++ )
1107 for( k=0; k<dataPointShape[2]; k++ )
1108 for( l=0; l<dataPointShape[3]; l++ )
1109 numArray[make_tuple(i,j,k,l)]=tmpData[i+dataPointShape[0]*(j*+dataPointShape[1]*(k+l*dataPointShape[2]))];
1110 break;
1111 }
1112
1113 delete [] tmpData;
1114 //
1115 // return the loaded array
1116 return numArray;
1117 }
1118
1119
1120 boost::python::numeric::array
1121 Data::integrate_const() const
1122 {
1123 if (isLazy())
1124 {
1125 throw DataException("Error - cannot integrate for constant lazy data.");
1126 }
1127 return integrateWorker();
1128 }
1129
1130 boost::python::numeric::array
1131 Data::integrate()
1132 {
1133 if (isLazy())
1134 {
1135 expand();
1136 }
1137 return integrateWorker();
1138 }
1139
1140
1141
1142 boost::python::numeric::array
1143 Data::integrateWorker() const
1144 {
1145 int index;
1146 int rank = getDataPointRank();
1147 DataTypes::ShapeType shape = getDataPointShape();
1148 int dataPointSize = getDataPointSize();
1149
1150 //
1151 // calculate the integral values
1152 vector<double> integrals(dataPointSize);
1153 vector<double> integrals_local(dataPointSize);
1154 #ifdef PASO_MPI
1155 AbstractContinuousDomain::asAbstractContinuousDomain(*getDomain()).setToIntegrals(integrals_local,*this);
1156 // Global sum: use an array instead of a vector because elements of array are guaranteed to be contiguous in memory
1157 double *tmp = new double[dataPointSize];
1158 double *tmp_local = new double[dataPointSize];
1159 for (int i=0; i<dataPointSize; i++) { tmp_local[i] = integrals_local[i]; }
1160 MPI_Allreduce( &tmp_local[0], &tmp[0], dataPointSize, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD );
1161 for (int i=0; i<dataPointSize; i++) { integrals[i] = tmp[i]; }
1162 delete[] tmp;
1163 delete[] tmp_local;
1164 #else
1165 AbstractContinuousDomain::asAbstractContinuousDomain(*getDomain()).setToIntegrals(integrals,*this);
1166 #endif
1167
1168 //
1169 // create the numeric array to be returned
1170 // and load the array with the integral values
1171 boost::python::numeric::array bp_array(1.0);
1172 if (rank==0) {
1173 bp_array.resize(1);
1174 index = 0;
1175 bp_array[0] = integrals[index];
1176 }
1177 if (rank==1) {
1178 bp_array.resize(shape[0]);
1179 for (int i=0; i<shape[0]; i++) {
1180 index = i;
1181 bp_array[i] = integrals[index];
1182 }
1183 }
1184 if (rank==2) {
1185 bp_array.resize(shape[0],shape[1]);
1186 for (int i=0; i<shape[0]; i++) {
1187 for (int j=0; j<shape[1]; j++) {
1188 index = i + shape[0] * j;
1189 bp_array[make_tuple(i,j)] = integrals[index];
1190 }
1191 }
1192 }
1193 if (rank==3) {
1194 bp_array.resize(shape[0],shape[1],shape[2]);
1195 for (int i=0; i<shape[0]; i++) {
1196 for (int j=0; j<shape[1]; j++) {
1197 for (int k=0; k<shape[2]; k++) {
1198 index = i + shape[0] * ( j + shape[1] * k );
1199 bp_array[make_tuple(i,j,k)] = integrals[index];
1200 }
1201 }
1202 }
1203 }
1204 if (rank==4) {
1205 bp_array.resize(shape[0],shape[1],shape[2],shape[3]);
1206 for (int i=0; i<shape[0]; i++) {
1207 for (int j=0; j<shape[1]; j++) {
1208 for (int k=0; k<shape[2]; k++) {
1209 for (int l=0; l<shape[3]; l++) {
1210 index = i + shape[0] * ( j + shape[1] * ( k + shape[2] * l ) );
1211 bp_array[make_tuple(i,j,k,l)] = integrals[index];
1212 }
1213 }
1214 }
1215 }
1216 }
1217
1218 //
1219 // return the loaded array
1220 return bp_array;
1221 }
1222
1223 Data
1224 Data::sin() const
1225 {
1226 if (isLazy())
1227 {
1228 DataLazy* c=new DataLazy(borrowDataPtr(),SIN);
1229 return Data(c);
1230 }
1231 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sin);
1232 }
1233
1234 Data
1235 Data::cos() const
1236 {
1237 if (isLazy())
1238 {
1239 DataLazy* c=new DataLazy(borrowDataPtr(),COS);
1240 return Data(c);
1241 }
1242 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cos);
1243 }
1244
1245 Data
1246 Data::tan() const
1247 {
1248 if (isLazy())
1249 {
1250 DataLazy* c=new DataLazy(borrowDataPtr(),TAN);
1251 return Data(c);
1252 }
1253 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tan);
1254 }
1255
1256 Data
1257 Data::asin() const
1258 {
1259 if (isLazy())
1260 {
1261 DataLazy* c=new DataLazy(borrowDataPtr(),ASIN);
1262 return Data(c);
1263 }
1264 return C_TensorUnaryOperation<double (*)(double)>(*this, ::asin);
1265 }
1266
1267 Data
1268 Data::acos() const
1269 {
1270 if (isLazy())
1271 {
1272 DataLazy* c=new DataLazy(borrowDataPtr(),ACOS);
1273 return Data(c);
1274 }
1275 return C_TensorUnaryOperation<double (*)(double)>(*this, ::acos);
1276 }
1277
1278
1279 Data
1280 Data::atan() const
1281 {
1282 if (isLazy())
1283 {
1284 DataLazy* c=new DataLazy(borrowDataPtr(),ATAN);
1285 return Data(c);
1286 }
1287 return C_TensorUnaryOperation<double (*)(double)>(*this, ::atan);
1288 }
1289
1290 Data
1291 Data::sinh() const
1292 {
1293 if (isLazy())
1294 {
1295 DataLazy* c=new DataLazy(borrowDataPtr(),SINH);
1296 return Data(c);
1297 }
1298 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sinh);
1299 }
1300
1301 Data
1302 Data::cosh() const
1303 {
1304 if (isLazy())
1305 {
1306 DataLazy* c=new DataLazy(borrowDataPtr(),COSH);
1307 return Data(c);
1308 }
1309 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cosh);
1310 }
1311
1312 Data
1313 Data::tanh() const
1314 {
1315 if (isLazy())
1316 {
1317 DataLazy* c=new DataLazy(borrowDataPtr(),TANH);
1318 return Data(c);
1319 }
1320 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tanh);
1321 }
1322
1323
1324 Data
1325 Data::erf() const
1326 {
1327 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1328 throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");
1329 #else
1330 if (isLazy())
1331 {
1332 DataLazy* c=new DataLazy(borrowDataPtr(),ERF);
1333 return Data(c);
1334 }
1335 return C_TensorUnaryOperation(*this, ::erf);
1336 #endif
1337 }
1338
1339 Data
1340 Data::asinh() const
1341 {
1342 if (isLazy())
1343 {
1344 DataLazy* c=new DataLazy(borrowDataPtr(),ASINH);
1345 return Data(c);
1346 }
1347 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1348 return C_TensorUnaryOperation(*this, escript::asinh_substitute);
1349 #else
1350 return C_TensorUnaryOperation(*this, ::asinh);
1351 #endif
1352 }
1353
1354 Data
1355 Data::acosh() const
1356 {
1357 if (isLazy())
1358 {
1359 DataLazy* c=new DataLazy(borrowDataPtr(),ACOSH);
1360 return Data(c);
1361 }
1362 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1363 return C_TensorUnaryOperation(*this, escript::acosh_substitute);
1364 #else
1365 return C_TensorUnaryOperation(*this, ::acosh);
1366 #endif
1367 }
1368
1369 Data
1370 Data::atanh() const
1371 {
1372 if (isLazy())
1373 {
1374 DataLazy* c=new DataLazy(borrowDataPtr(),ATANH);
1375 return Data(c);
1376 }
1377 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1378 return C_TensorUnaryOperation(*this, escript::atanh_substitute);
1379 #else
1380 return C_TensorUnaryOperation(*this, ::atanh);
1381 #endif
1382 }
1383
1384 Data
1385 Data::log10() const
1386 { if (isLazy())
1387 {
1388 DataLazy* c=new DataLazy(borrowDataPtr(),LOG10);
1389 return Data(c);
1390 }
1391 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log10);
1392 }
1393
1394 Data
1395 Data::log() const
1396 {
1397 if (isLazy())
1398 {
1399 DataLazy* c=new DataLazy(borrowDataPtr(),LOG);
1400 return Data(c);
1401 }
1402 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log);
1403 }
1404
1405 Data
1406 Data::sign() const
1407 {
1408 if (isLazy())
1409 {
1410 DataLazy* c=new DataLazy(borrowDataPtr(),SIGN);
1411 return Data(c);
1412 }
1413 return C_TensorUnaryOperation(*this, escript::fsign);
1414 }
1415
1416 Data
1417 Data::abs() const
1418 {
1419 if (isLazy())
1420 {
1421 DataLazy* c=new DataLazy(borrowDataPtr(),ABS);
1422 return Data(c);
1423 }
1424 return C_TensorUnaryOperation<double (*)(double)>(*this, ::fabs);
1425 }
1426
1427 Data
1428 Data::neg() const
1429 {
1430 if (isLazy())
1431 {
1432 DataLazy* c=new DataLazy(borrowDataPtr(),NEG);
1433 return Data(c);
1434 }
1435 return C_TensorUnaryOperation(*this, negate<double>());
1436 }
1437
1438 Data
1439 Data::pos() const
1440 {
1441 // not doing lazy check here is deliberate.
1442 // since a deep copy of lazy data should be cheap, I'll just let it happen now
1443 Data result;
1444 // perform a deep copy
1445 result.copy(*this);
1446 return result;
1447 }
1448
1449 Data
1450 Data::exp() const
1451 {
1452 if (isLazy())
1453 {
1454 DataLazy* c=new DataLazy(borrowDataPtr(),EXP);
1455 return Data(c);
1456 }
1457 return C_TensorUnaryOperation<double (*)(double)>(*this, ::exp);
1458 }
1459
1460 Data
1461 Data::sqrt() const
1462 {
1463 if (isLazy())
1464 {
1465 DataLazy* c=new DataLazy(borrowDataPtr(),SQRT);
1466 return Data(c);
1467 }
1468 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sqrt);
1469 }
1470
1471 double
1472 Data::Lsup_const() const
1473 {
1474 if (isLazy())
1475 {
1476 throw DataException("Error - cannot compute Lsup for constant lazy data.");
1477 }
1478 return LsupWorker();
1479 }
1480
1481 double
1482 Data::Lsup()
1483 {
1484 if (isLazy())
1485 {
1486 expand();
1487 }
1488 return LsupWorker();
1489 }
1490
1491 double
1492 Data::sup_const() const
1493 {
1494 if (isLazy())
1495 {
1496 throw DataException("Error - cannot compute sup for constant lazy data.");
1497 }
1498 return supWorker();
1499 }
1500
1501 double
1502 Data::sup()
1503 {
1504 if (isLazy())
1505 {
1506 expand();
1507 }
1508 return supWorker();
1509 }
1510
1511 double
1512 Data::inf_const() const
1513 {
1514 if (isLazy())
1515 {
1516 throw DataException("Error - cannot compute inf for constant lazy data.");
1517 }
1518 return infWorker();
1519 }
1520
1521 double
1522 Data::inf()
1523 {
1524 if (isLazy())
1525 {
1526 expand();
1527 }
1528 return infWorker();
1529 }
1530
1531 double
1532 Data::LsupWorker() const
1533 {
1534 double localValue;
1535 //
1536 // set the initial absolute maximum value to zero
1537
1538 AbsMax abs_max_func;
1539 localValue = algorithm(abs_max_func,0);
1540 #ifdef PASO_MPI
1541 double globalValue;
1542 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1543 return globalValue;
1544 #else
1545 return localValue;
1546 #endif
1547 }
1548
1549 double
1550 Data::supWorker() const
1551 {
1552 double localValue;
1553 //
1554 // set the initial maximum value to min possible double
1555 FMax fmax_func;
1556 localValue = algorithm(fmax_func,numeric_limits<double>::max()*-1);
1557 #ifdef PASO_MPI
1558 double globalValue;
1559 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1560 return globalValue;
1561 #else
1562 return localValue;
1563 #endif
1564 }
1565
1566 double
1567 Data::infWorker() const
1568 {
1569 double localValue;
1570 //
1571 // set the initial minimum value to max possible double
1572 FMin fmin_func;
1573 localValue = algorithm(fmin_func,numeric_limits<double>::max());
1574 #ifdef PASO_MPI
1575 double globalValue;
1576 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD );
1577 return globalValue;
1578 #else
1579 return localValue;
1580 #endif
1581 }
1582
1583 /* TODO */
1584 /* global reduction */
1585 Data
1586 Data::maxval() const
1587 {
1588 if (isLazy())
1589 {
1590 Data temp(*this); // to get around the fact that you can't resolve a const Data
1591 temp.resolve();
1592 return temp.maxval();
1593 }
1594 //
1595 // set the initial maximum value to min possible double
1596 FMax fmax_func;
1597 return dp_algorithm(fmax_func,numeric_limits<double>::max()*-1);
1598 }
1599
1600 Data
1601 Data::minval() const
1602 {
1603 if (isLazy())
1604 {
1605 Data temp(*this); // to get around the fact that you can't resolve a const Data
1606 temp.resolve();
1607 return temp.minval();
1608 }
1609 //
1610 // set the initial minimum value to max possible double
1611 FMin fmin_func;
1612 return dp_algorithm(fmin_func,numeric_limits<double>::max());
1613 }
1614
1615 Data
1616 Data::swapaxes(const int axis0, const int axis1) const
1617 {
1618 int axis0_tmp,axis1_tmp;
1619 DataTypes::ShapeType s=getDataPointShape();
1620 DataTypes::ShapeType ev_shape;
1621 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1622 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1623 int rank=getDataPointRank();
1624 if (rank<2) {
1625 throw DataException("Error - Data::swapaxes argument must have at least rank 2.");
1626 }
1627 if (axis0<0 || axis0>rank-1) {
1628 throw DataException("Error - Data::swapaxes: axis0 must be between 0 and rank-1=" + rank-1);
1629 }
1630 if (axis1<0 || axis1>rank-1) {
1631 throw DataException("Error - Data::swapaxes: axis1 must be between 0 and rank-1=" + rank-1);
1632 }
1633 if (axis0 == axis1) {
1634 throw DataException("Error - Data::swapaxes: axis indices must be different.");
1635 }
1636 if (axis0 > axis1) {
1637 axis0_tmp=axis1;
1638 axis1_tmp=axis0;
1639 } else {
1640 axis0_tmp=axis0;
1641 axis1_tmp=axis1;
1642 }
1643 for (int i=0; i<rank; i++) {
1644 if (i == axis0_tmp) {
1645 ev_shape.push_back(s[axis1_tmp]);
1646 } else if (i == axis1_tmp) {
1647 ev_shape.push_back(s[axis0_tmp]);
1648 } else {
1649 ev_shape.push_back(s[i]);
1650 }
1651 }
1652 Data ev(0.,ev_shape,getFunctionSpace());
1653 ev.typeMatchRight(*this);
1654 m_data->swapaxes(ev.m_data.get(), axis0_tmp, axis1_tmp);
1655 return ev;
1656
1657 }
1658
1659 Data
1660 Data::symmetric() const
1661 {
1662 // check input
1663 DataTypes::ShapeType s=getDataPointShape();
1664 if (getDataPointRank()==2) {
1665 if(s[0] != s[1])
1666 throw DataException("Error - Data::symmetric can only be calculated for rank 2 object with equal first and second dimension.");
1667 }
1668 else if (getDataPointRank()==4) {
1669 if(!(s[0] == s[2] && s[1] == s[3]))
1670 throw DataException("Error - Data::symmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1671 }
1672 else {
1673 throw DataException("Error - Data::symmetric can only be calculated for rank 2 or 4 object.");
1674 }
1675 if (isLazy())
1676 {
1677 DataLazy* c=new DataLazy(borrowDataPtr(),SYM);
1678 return Data(c);
1679 }
1680 Data ev(0.,getDataPointShape(),getFunctionSpace());
1681 ev.typeMatchRight(*this);
1682 m_data->symmetric(ev.m_data.get());
1683 return ev;
1684 }
1685
1686 Data
1687 Data::nonsymmetric() const
1688 {
1689 if (isLazy())
1690 {
1691 DataLazy* c=new DataLazy(borrowDataPtr(),NSYM);
1692 return Data(c);
1693 }
1694 // check input
1695 DataTypes::ShapeType s=getDataPointShape();
1696 if (getDataPointRank()==2) {
1697 if(s[0] != s[1])
1698 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 object with equal first and second dimension.");
1699 DataTypes::ShapeType ev_shape;
1700 ev_shape.push_back(s[0]);
1701 ev_shape.push_back(s[1]);
1702 Data ev(0.,ev_shape,getFunctionSpace());
1703 ev.typeMatchRight(*this);
1704 m_data->nonsymmetric(ev.m_data.get());
1705 return ev;
1706 }
1707 else if (getDataPointRank()==4) {
1708 if(!(s[0] == s[2] && s[1] == s[3]))
1709 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1710 DataTypes::ShapeType ev_shape;
1711 ev_shape.push_back(s[0]);
1712 ev_shape.push_back(s[1]);
1713 ev_shape.push_back(s[2]);
1714 ev_shape.push_back(s[3]);
1715 Data ev(0.,ev_shape,getFunctionSpace());
1716 ev.typeMatchRight(*this);
1717 m_data->nonsymmetric(ev.m_data.get());
1718 return ev;
1719 }
1720 else {
1721 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 or 4 object.");
1722 }
1723 }
1724
1725
1726 // Doing a lazy version of this would require some thought.
1727 // First it needs a parameter (which DataLazy doesn't support at the moment).
1728 // (secondly although it does not apply to trace) we can't handle operations which return
1729 // multiple results (like eigenvectors_values) or return values of different shapes to their input
1730 // (like eigenvalues).
1731 Data
1732 Data::trace(int axis_offset) const
1733 {
1734 if (isLazy())
1735 {
1736 Data temp(*this); // to get around the fact that you can't resolve a const Data
1737 temp.resolve();
1738 return temp.trace(axis_offset);
1739 }
1740 DataTypes::ShapeType s=getDataPointShape();
1741 if (getDataPointRank()==2) {
1742 DataTypes::ShapeType ev_shape;
1743 Data ev(0.,ev_shape,getFunctionSpace());
1744 ev.typeMatchRight(*this);
1745 m_data->trace(ev.m_data.get(), axis_offset);
1746 return ev;
1747 }
1748 if (getDataPointRank()==3) {
1749 DataTypes::ShapeType ev_shape;
1750 if (axis_offset==0) {
1751 int s2=s[2];
1752 ev_shape.push_back(s2);
1753 }
1754 else if (axis_offset==1) {
1755 int s0=s[0];
1756 ev_shape.push_back(s0);
1757 }
1758 Data ev(0.,ev_shape,getFunctionSpace());
1759 ev.typeMatchRight(*this);
1760 m_data->trace(ev.m_data.get(), axis_offset);
1761 return ev;
1762 }
1763 if (getDataPointRank()==4) {
1764 DataTypes::ShapeType ev_shape;
1765 if (axis_offset==0) {
1766 ev_shape.push_back(s[2]);
1767 ev_shape.push_back(s[3]);
1768 }
1769 else if (axis_offset==1) {
1770 ev_shape.push_back(s[0]);
1771 ev_shape.push_back(s[3]);
1772 }
1773 else if (axis_offset==2) {
1774 ev_shape.push_back(s[0]);
1775 ev_shape.push_back(s[1]);
1776 }
1777 Data ev(0.,ev_shape,getFunctionSpace());
1778 ev.typeMatchRight(*this);
1779 m_data->trace(ev.m_data.get(), axis_offset);
1780 return ev;
1781 }
1782 else {
1783 throw DataException("Error - Data::trace can only be calculated for rank 2, 3 or 4 object.");
1784 }
1785 }
1786
1787 Data
1788 Data::transpose(int axis_offset) const
1789 {
1790 if (isLazy())
1791 {
1792 Data temp(*this); // to get around the fact that you can't resolve a const Data
1793 temp.resolve();
1794 return temp.transpose(axis_offset);
1795 }
1796 DataTypes::ShapeType s=getDataPointShape();
1797 DataTypes::ShapeType ev_shape;
1798 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1799 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1800 int rank=getDataPointRank();
1801 if (axis_offset<0 || axis_offset>rank) {
1802 throw DataException("Error - Data::transpose must have 0 <= axis_offset <= rank=" + rank);
1803 }
1804 for (int i=0; i<rank; i++) {
1805 int index = (axis_offset+i)%rank;
1806 ev_shape.push_back(s[index]); // Append to new shape
1807 }
1808 Data ev(0.,ev_shape,getFunctionSpace());
1809 ev.typeMatchRight(*this);
1810 m_data->transpose(ev.m_data.get(), axis_offset);
1811 return ev;
1812 }
1813
1814 Data
1815 Data::eigenvalues() const
1816 {
1817 if (isLazy())
1818 {
1819 Data temp(*this); // to get around the fact that you can't resolve a const Data
1820 temp.resolve();
1821 return temp.eigenvalues();
1822 }
1823 // check input
1824 DataTypes::ShapeType s=getDataPointShape();
1825 if (getDataPointRank()!=2)
1826 throw DataException("Error - Data::eigenvalues can only be calculated for rank 2 object.");
1827 if(s[0] != s[1])
1828 throw DataException("Error - Data::eigenvalues can only be calculated for object with equal first and second dimension.");
1829 // create return
1830 DataTypes::ShapeType ev_shape(1,s[0]);
1831 Data ev(0.,ev_shape,getFunctionSpace());
1832 ev.typeMatchRight(*this);
1833 m_data->eigenvalues(ev.m_data.get());
1834 return ev;
1835 }
1836
1837 const boost::python::tuple
1838 Data::eigenvalues_and_eigenvectors(const double tol) const
1839 {
1840 if (isLazy())
1841 {
1842 Data temp(*this); // to get around the fact that you can't resolve a const Data
1843 temp.resolve();
1844 return temp.eigenvalues_and_eigenvectors(tol);
1845 }
1846 DataTypes::ShapeType s=getDataPointShape();
1847 if (getDataPointRank()!=2)
1848 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for rank 2 object.");
1849 if(s[0] != s[1])
1850 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for object with equal first and second dimension.");
1851 // create return
1852 DataTypes::ShapeType ev_shape(1,s[0]);
1853 Data ev(0.,ev_shape,getFunctionSpace());
1854 ev.typeMatchRight(*this);
1855 DataTypes::ShapeType V_shape(2,s[0]);
1856 Data V(0.,V_shape,getFunctionSpace());
1857 V.typeMatchRight(*this);
1858 m_data->eigenvalues_and_eigenvectors(ev.m_data.get(),V.m_data.get(),tol);
1859 return make_tuple(boost::python::object(ev),boost::python::object(V));
1860 }
1861
1862 const boost::python::tuple
1863 Data::minGlobalDataPoint() const
1864 {
1865 // NB: calc_minGlobalDataPoint( had to be split off from minGlobalDataPoint( as boost::make_tuple causes an
1866 // abort (for unknown reasons) if there are openmp directives with it in the
1867 // surrounding function
1868
1869 int DataPointNo;
1870 int ProcNo;
1871 calc_minGlobalDataPoint(ProcNo,DataPointNo);
1872 return make_tuple(ProcNo,DataPointNo);
1873 }
1874
1875 void
1876 Data::calc_minGlobalDataPoint(int& ProcNo,
1877 int& DataPointNo) const
1878 {
1879 if (isLazy())
1880 {
1881 Data temp(*this); // to get around the fact that you can't resolve a const Data
1882 temp.resolve();
1883 return temp.calc_minGlobalDataPoint(ProcNo,DataPointNo);
1884 }
1885 int i,j;
1886 int lowi=0,lowj=0;
1887 double min=numeric_limits<double>::max();
1888
1889 Data temp=minval();
1890
1891 int numSamples=temp.getNumSamples();
1892 int numDPPSample=temp.getNumDataPointsPerSample();
1893
1894 double next,local_min;
1895 int local_lowi=0,local_lowj=0;
1896
1897 #pragma omp parallel firstprivate(local_lowi,local_lowj) private(next,local_min)
1898 {
1899 local_min=min;
1900 #pragma omp for private(i,j) schedule(static)
1901 for (i=0; i<numSamples; i++) {
1902 for (j=0; j<numDPPSample; j++) {
1903 next=temp.getDataAtOffset(temp.getDataOffset(i,j));
1904 if (next<local_min) {
1905 local_min=next;
1906 local_lowi=i;
1907 local_lowj=j;
1908 }
1909 }
1910 }
1911 #pragma omp critical
1912 if (local_min<min) {
1913 min=local_min;
1914 lowi=local_lowi;
1915 lowj=local_lowj;
1916 }
1917 }
1918
1919 #ifdef PASO_MPI
1920 // determine the processor on which the minimum occurs
1921 next = temp.getDataPoint(lowi,lowj);
1922 int lowProc = 0;
1923 double *globalMins = new double[get_MPISize()+1];
1924 int error;
1925 error = MPI_Gather ( &next, 1, MPI_DOUBLE, globalMins, 1, MPI_DOUBLE, 0, get_MPIComm() );
1926
1927 if( get_MPIRank()==0 ){
1928 next = globalMins[lowProc];
1929 for( i=1; i<get_MPISize(); i++ )
1930 if( next>globalMins[i] ){
1931 lowProc = i;
1932 next = globalMins[i];
1933 }
1934 }
1935 MPI_Bcast( &lowProc, 1, MPI_DOUBLE, 0, get_MPIComm() );
1936
1937 delete [] globalMins;
1938 ProcNo = lowProc;
1939 #else
1940 ProcNo = 0;
1941 #endif
1942 DataPointNo = lowj + lowi * numDPPSample;
1943 }
1944
1945 void
1946 Data::saveDX(std::string fileName) const
1947 {
1948 if (isEmpty())
1949 {
1950 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1951 }
1952 if (isLazy())
1953 {
1954 Data temp(*this); // to get around the fact that you can't resolve a const Data
1955 temp.resolve();
1956 temp.saveDX(fileName);
1957 return;
1958 }
1959 boost::python::dict args;
1960 args["data"]=boost::python::object(this);
1961 getDomain()->saveDX(fileName,args);
1962 return;
1963 }
1964
1965 void
1966 Data::saveVTK(std::string fileName) const
1967 {
1968 if (isEmpty())
1969 {
1970 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1971 }
1972 if (isLazy())
1973 {
1974 Data temp(*this); // to get around the fact that you can't resolve a const Data
1975 temp.resolve();
1976 temp.saveVTK(fileName);
1977 return;
1978 }
1979 boost::python::dict args;
1980 args["data"]=boost::python::object(this);
1981 getDomain()->saveVTK(fileName,args);
1982 return;
1983 }
1984
1985 Data&
1986 Data::operator+=(const Data& right)
1987 {
1988 if (isProtected()) {
1989 throw DataException("Error - attempt to update protected Data object.");
1990 }
1991 if (isLazy() || right.isLazy())
1992 {
1993 DataLazy* c=new DataLazy(m_data,right.borrowDataPtr(),ADD); // for lazy + is equivalent to +=
1994 m_data=c->getPtr();
1995 return (*this);
1996 }
1997 else
1998 {
1999 binaryOp(right,plus<double>());
2000 return (*this);
2001 }
2002 }
2003
2004 Data&
2005 Data::operator+=(const boost::python::object& right)
2006 {
2007 if (isProtected()) {
2008 throw DataException("Error - attempt to update protected Data object.");
2009 }
2010 Data tmp(right,getFunctionSpace(),false);
2011 if (isLazy())
2012 {
2013 DataLazy* c=new DataLazy(m_data,tmp.borrowDataPtr(),ADD); // for lazy + is equivalent to +=
2014 m_data=c->getPtr();
2015 return (*this);
2016 }
2017 else
2018 {
2019 binaryOp(tmp,plus<double>());
2020 return (*this);
2021 }
2022 }
2023
2024 // Hmmm, operator= makes a deep copy but the copy constructor does not?
2025 Data&
2026 Data::operator=(const Data& other)
2027 {
2028 copy(other);
2029 return (*this);
2030 }
2031
2032 Data&
2033 Data::operator-=(const Data& right)
2034 {
2035 if (isProtected()) {
2036 throw DataException("Error - attempt to update protected Data object.");
2037 }
2038 if (isLazy() || right.isLazy())
2039 {
2040 DataLazy* c=new DataLazy(m_data,right.borrowDataPtr(),SUB); // for lazy - is equivalent to -=
2041 m_data=c->getPtr();
2042 return (*this);
2043 }
2044 else
2045 {
2046 binaryOp(right,minus<double>());
2047 return (*this);
2048 }
2049 }
2050
2051 Data&
2052 Data::operator-=(const boost::python::object& right)
2053 {
2054 if (isProtected()) {
2055 throw DataException("Error - attempt to update protected Data object.");
2056 }
2057 Data tmp(right,getFunctionSpace(),false);
2058 if (isLazy())
2059 {
2060 DataLazy* c=new DataLazy(m_data,tmp.borrowDataPtr(),SUB); // for lazy - is equivalent to -=
2061 m_data=c->getPtr();
2062 return (*this);
2063 }
2064 else
2065 {
2066 binaryOp(tmp,minus<double>());
2067 return (*this);
2068 }
2069 }
2070
2071 Data&
2072 Data::operator*=(const Data& right)
2073 {
2074 if (isProtected()) {
2075 throw DataException("Error - attempt to update protected Data object.");
2076 }
2077 if (isLazy() || right.isLazy())
2078 {
2079 DataLazy* c=new DataLazy(m_data,right.borrowDataPtr(),MUL); // for lazy * is equivalent to *=
2080 m_data=c->getPtr();
2081 return (*this);
2082 }
2083 else
2084 {
2085 binaryOp(right,multiplies<double>());
2086 return (*this);
2087 }
2088 }
2089
2090 Data&
2091 Data::operator*=(const boost::python::object& right)
2092 {
2093 if (isProtected()) {
2094 throw DataException("Error - attempt to update protected Data object.");
2095 }
2096 Data tmp(right,getFunctionSpace(),false);
2097 if (isLazy())
2098 {
2099 DataLazy* c=new DataLazy(m_data,tmp.borrowDataPtr(),MUL); // for lazy * is equivalent to *=
2100 m_data=c->getPtr();
2101 return (*this);
2102 }
2103 else
2104 {
2105 binaryOp(tmp,multiplies<double>());
2106 return (*this);
2107 }
2108 }
2109
2110 Data&
2111 Data::operator/=(const Data& right)
2112 {
2113 if (isProtected()) {
2114 throw DataException("Error - attempt to update protected Data object.");
2115 }
2116 if (isLazy() || right.isLazy())
2117 {
2118 DataLazy* c=new DataLazy(m_data,right.borrowDataPtr(),DIV); // for lazy / is equivalent to /=
2119 m_data=c->getPtr();
2120 return (*this);
2121 }
2122 else
2123 {
2124 binaryOp(right,divides<double>());
2125 return (*this);
2126 }
2127 }
2128
2129 Data&
2130 Data::operator/=(const boost::python::object& right)
2131 {
2132 if (isProtected()) {
2133 throw DataException("Error - attempt to update protected Data object.");
2134 }
2135 Data tmp(right,getFunctionSpace(),false);
2136 if (isLazy())
2137 {
2138 DataLazy* c=new DataLazy(m_data,tmp.borrowDataPtr(),DIV); // for lazy / is equivalent to /=
2139 m_data=c->getPtr();
2140 return (*this);
2141 }
2142 else
2143 {
2144 binaryOp(tmp,divides<double>());
2145 return (*this);
2146 }
2147 }
2148
2149 Data
2150 Data::rpowO(const boost::python::object& left) const
2151 {
2152 Data left_d(left,*this);
2153 return left_d.powD(*this);
2154 }
2155
2156 Data
2157 Data::powO(const boost::python::object& right) const
2158 {
2159 Data tmp(right,getFunctionSpace(),false);
2160 return powD(tmp);
2161 }
2162
2163 Data
2164 Data::powD(const Data& right) const
2165 {
2166 if (isLazy() || right.isLazy())
2167 {
2168 DataLazy* c=new DataLazy(m_data,right.borrowDataPtr(),POW);
2169 return Data(c);
2170 }
2171 return C_TensorBinaryOperation<double (*)(double, double)>(*this, right, ::pow);
2172 }
2173
2174 //
2175 // NOTE: It is essential to specify the namespace this operator belongs to
2176 Data
2177 escript::operator+(const Data& left, const Data& right)
2178 {
2179 if (left.isLazy() || right.isLazy())
2180 {
2181 DataLazy* c=new DataLazy(left.borrowDataPtr(),right.borrowDataPtr(),ADD);
2182 return Data(c);
2183 }
2184 return C_TensorBinaryOperation(left, right, plus<double>());
2185 }
2186
2187 //
2188 // NOTE: It is essential to specify the namespace this operator belongs to
2189 Data
2190 escript::operator-(const Data& left, const Data& right)
2191 {
2192 if (left.isLazy() || right.isLazy())
2193 {
2194 DataLazy* c=new DataLazy(left.borrowDataPtr(),right.borrowDataPtr(),SUB);
2195 return Data(c);
2196 }
2197 return C_TensorBinaryOperation(left, right, minus<double>());
2198 }
2199
2200 //
2201 // NOTE: It is essential to specify the namespace this operator belongs to
2202 Data
2203 escript::operator*(const Data& left, const Data& right)
2204 {
2205 if (left.isLazy() || right.isLazy())
2206 {
2207 DataLazy* c=new DataLazy(left.borrowDataPtr(),right.borrowDataPtr(),MUL);
2208 return Data(c);
2209 }
2210 return C_TensorBinaryOperation(left, right, multiplies<double>());
2211 }
2212
2213 //
2214 // NOTE: It is essential to specify the namespace this operator belongs to
2215 Data
2216 escript::operator/(const Data& left, const Data& right)
2217 {
2218 if (left.isLazy() || right.isLazy())
2219 {
2220 DataLazy* c=new DataLazy(left.borrowDataPtr(),right.borrowDataPtr(),DIV);
2221 return Data(c);
2222 }
2223 return C_TensorBinaryOperation(left, right, divides<double>());
2224 }
2225
2226 //
2227 // NOTE: It is essential to specify the namespace this operator belongs to
2228 Data
2229 escript::operator+(const Data& left, const boost::python::object& right)
2230 {
2231 if (left.isLazy())
2232 {
2233 DataLazy* c=new DataLazy(left.borrowDataPtr(),Data(right,left.getFunctionSpace(),false).borrowDataPtr(),ADD);
2234 return Data(c);
2235 }
2236 return left+Data(right,left.getFunctionSpace(),false);
2237 }
2238
2239 //
2240 // NOTE: It is essential to specify the namespace this operator belongs to
2241 Data
2242 escript::operator-(const Data& left, const boost::python::object& right)
2243 {
2244 if (left.isLazy())
2245 {
2246 DataLazy* c=new DataLazy(left.borrowDataPtr(),Data(right,left.getFunctionSpace(),false).borrowDataPtr(),SUB);
2247 return Data(c);
2248 }
2249 return left-Data(right,left.getFunctionSpace(),false);
2250 }
2251
2252 //
2253 // NOTE: It is essential to specify the namespace this operator belongs to
2254 Data
2255 escript::operator*(const Data& left, const boost::python::object& right)
2256 {
2257 if (left.isLazy())
2258 {
2259 DataLazy* c=new DataLazy(left.borrowDataPtr(),Data(right,left.getFunctionSpace(),false).borrowDataPtr(),MUL);
2260 return Data(c);
2261 }
2262 return left*Data(right,left.getFunctionSpace(),false);
2263 }
2264
2265 //
2266 // NOTE: It is essential to specify the namespace this operator belongs to
2267 Data
2268 escript::operator/(const Data& left, const boost::python::object& right)
2269 {
2270 if (left.isLazy())
2271 {
2272 DataLazy* c=new DataLazy(left.borrowDataPtr(),Data(right,left.getFunctionSpace(),false).borrowDataPtr(),DIV);
2273 return Data(c);
2274 }
2275 return left/Data(right,left.getFunctionSpace(),false);
2276 }
2277
2278 //
2279 // NOTE: It is essential to specify the namespace this operator belongs to
2280 Data
2281 escript::operator+(const boost::python::object& left, const Data& right)
2282 {
2283 if (right.isLazy())
2284 {
2285 DataLazy* c=new DataLazy(Data(left,right.getFunctionSpace(),false).borrowDataPtr(),right.borrowDataPtr(),ADD);
2286 return Data(c);
2287 }
2288 return Data(left,right.getFunctionSpace(),false)+right;
2289 }
2290
2291 //
2292 // NOTE: It is essential to specify the namespace this operator belongs to
2293 Data
2294 escript::operator-(const boost::python::object& left, const Data& right)
2295 {
2296 if (right.isLazy())
2297 {
2298 DataLazy* c=new DataLazy(Data(left,right.getFunctionSpace(),false).borrowDataPtr(),right.borrowDataPtr(),SUB);
2299 return Data(c);
2300 }
2301 return Data(left,right.getFunctionSpace(),false)-right;
2302 }
2303
2304 //
2305 // NOTE: It is essential to specify the namespace this operator belongs to
2306 Data
2307 escript::operator*(const boost::python::object& left, const Data& right)
2308 {
2309 if (right.isLazy())
2310 {
2311 DataLazy* c=new DataLazy(Data(left,right.getFunctionSpace(),false).borrowDataPtr(),right.borrowDataPtr(),MUL);
2312 return Data(c);
2313 }
2314 return Data(left,right.getFunctionSpace(),false)*right;
2315 }
2316
2317 //
2318 // NOTE: It is essential to specify the namespace this operator belongs to
2319 Data
2320 escript::operator/(const boost::python::object& left, const Data& right)
2321 {
2322 if (right.isLazy())
2323 {
2324 DataLazy* c=new DataLazy(Data(left,right.getFunctionSpace(),false).borrowDataPtr(),right.borrowDataPtr(),DIV);
2325 return Data(c);
2326 }
2327 return Data(left,right.getFunctionSpace(),false)/right;
2328 }
2329
2330
2331 /* TODO */
2332 /* global reduction */
2333 Data
2334 Data::getItem(const boost::python::object& key) const
2335 {
2336
2337 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2338
2339 if (slice_region.size()!=getDataPointRank()) {
2340 throw DataException("Error - slice size does not match Data rank.");
2341 }
2342
2343 return getSlice(slice_region);
2344 }
2345
2346 /* TODO */
2347 /* global reduction */
2348 Data
2349 Data::getSlice(const DataTypes::RegionType& region) const
2350 {
2351 return Data(*this,region);
2352 }
2353
2354 /* TODO */
2355 /* global reduction */
2356 void
2357 Data::setItemO(const boost::python::object& key,
2358 const boost::python::object& value)
2359 {
2360 Data tempData(value,getFunctionSpace());
2361 setItemD(key,tempData);
2362 }
2363
2364 void
2365 Data::setItemD(const boost::python::object& key,
2366 const Data& value)
2367 {
2368 // const DataArrayView& view=getPointDataView();
2369
2370 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2371 if (slice_region.size()!=getDataPointRank()) {
2372 throw DataException("Error - slice size does not match Data rank.");
2373 }
2374 if (getFunctionSpace()!=value.getFunctionSpace()) {
2375 setSlice(Data(value,getFunctionSpace()),slice_region);
2376 } else {
2377 setSlice(value,slice_region);
2378 }
2379 }
2380
2381 void
2382 Data::setSlice(const Data& value,
2383 const DataTypes::RegionType& region)
2384 {
2385 if (isProtected()) {
2386 throw DataException("Error - attempt to update protected Data object.");
2387 }
2388 FORCERESOLVE;
2389 /* if (isLazy())
2390 {
2391 throw DataException("Error - setSlice not permitted on lazy data.");
2392 }*/
2393 Data tempValue(value);
2394 typeMatchLeft(tempValue);
2395 typeMatchRight(tempValue);
2396 getReady()->setSlice(tempValue.m_data.get(),region);
2397 }
2398
2399 void
2400 Data::typeMatchLeft(Data& right) const
2401 {
2402 if (right.isLazy() && !isLazy())
2403 {
2404 right.resolve();
2405 }
2406 if (isExpanded()){
2407 right.expand();
2408 } else if (isTagged()) {
2409 if (right.isConstant()) {
2410 right.tag();
2411 }
2412 }
2413 }
2414
2415 void
2416 Data::typeMatchRight(const Data& right)
2417 {
2418 if (isLazy() && !right.isLazy())
2419 {
2420 resolve();
2421 }
2422 if (isTagged()) {
2423 if (right.isExpanded()) {
2424 expand();
2425 }
2426 } else if (isConstant()) {
2427 if (right.isExpanded()) {
2428 expand();
2429 } else if (right.isTagged()) {
2430 tag();
2431 }
2432 }
2433 }
2434
2435 void
2436 Data::setTaggedValueByName(std::string name,
2437 const boost::python::object& value)
2438 {
2439 if (getFunctionSpace().getDomain()->isValidTagName(name)) {
2440 FORCERESOLVE;
2441 int tagKey=getFunctionSpace().getDomain()->getTag(name);
2442 setTaggedValue(tagKey,value);
2443 }
2444 }
2445 void
2446 Data::setTaggedValue(int tagKey,
2447 const boost::python::object& value)
2448 {
2449 if (isProtected()) {
2450 throw DataException("Error - attempt to update protected Data object.");
2451 }
2452 //
2453 // Ensure underlying data object is of type DataTagged
2454 FORCERESOLVE;
2455 if (isConstant()) tag();
2456 numeric::array asNumArray(value);
2457
2458 // extract the shape of the numarray
2459 DataTypes::ShapeType tempShape;
2460 for (int i=0; i < asNumArray.getrank(); i++) {
2461 tempShape.push_back(extract<int>(asNumArray.getshape()[i]));
2462 }
2463
2464 DataVector temp_data2;
2465 temp_data2.copyFromNumArray(asNumArray);
2466
2467 m_data->setTaggedValue(tagKey,tempShape, temp_data2);
2468 }
2469
2470
2471 void
2472 Data::setTaggedValueFromCPP(int tagKey,
2473 const DataTypes::ShapeType& pointshape,
2474 const DataTypes::ValueType& value,
2475 int dataOffset)
2476 {
2477 if (isProtected()) {
2478 throw DataException("Error - attempt to update protected Data object.");
2479 }
2480 //
2481 // Ensure underlying data object is of type DataTagged
2482 FORCERESOLVE;
2483 if (isConstant()) tag();
2484 //
2485 // Call DataAbstract::setTaggedValue
2486 m_data->setTaggedValue(tagKey,pointshape, value, dataOffset);
2487 }
2488
2489 int
2490 Data::getTagNumber(int dpno)
2491 {
2492 if (isEmpty())
2493 {
2494 throw DataException("Error - operation not permitted on instances of DataEmpty.");
2495 }
2496 return getFunctionSpace().getTagFromDataPointNo(dpno);
2497 }
2498
2499
2500 ostream& escript::operator<<(ostream& o, const Data& data)
2501 {
2502 o << data.toString();
2503 return o;
2504 }
2505
2506 Data
2507 escript::C_GeneralTensorProduct(Data& arg_0,
2508 Data& arg_1,
2509 int axis_offset,
2510 int transpose)
2511 {
2512 // General tensor product: res(SL x SR) = arg_0(SL x SM) * arg_1(SM x SR)
2513 // SM is the product of the last axis_offset entries in arg_0.getShape().
2514
2515 // deal with any lazy data
2516 // if (arg_0.isLazy()) {arg_0.resolve();}
2517 // if (arg_1.isLazy()) {arg_1.resolve();}
2518 if (arg_0.isLazy() || arg_1.isLazy())
2519 {
2520 DataLazy* c=new DataLazy(arg_0.borrowDataPtr(), arg_1.borrowDataPtr(), PROD, axis_offset,transpose);
2521 return Data(c);
2522 }
2523
2524 // Interpolate if necessary and find an appropriate function space
2525 Data arg_0_Z, arg_1_Z;
2526 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2527 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2528 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2529 arg_1_Z = Data(arg_1);
2530 }
2531 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2532 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2533 arg_0_Z =Data(arg_0);
2534 }
2535 else {
2536 throw DataException("Error - C_GeneralTensorProduct: arguments have incompatible function spaces.");
2537 }
2538 } else {
2539 arg_0_Z = Data(arg_0);
2540 arg_1_Z = Data(arg_1);
2541 }
2542 // Get rank and shape of inputs
2543 int rank0 = arg_0_Z.getDataPointRank();
2544 int rank1 = arg_1_Z.getDataPointRank();
2545 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
2546 const DataTypes::ShapeType& shape1 = arg_1_Z.getDataPointShape();
2547
2548 // Prepare for the loops of the product and verify compatibility of shapes
2549 int start0=0, start1=0;
2550 if (transpose == 0) {}
2551 else if (transpose == 1) { start0 = axis_offset; }
2552 else if (transpose == 2) { start1 = rank1-axis_offset; }
2553 else { throw DataException("C_GeneralTensorProduct: Error - transpose should be 0, 1 or 2"); }
2554
2555
2556 // Adjust the shapes for transpose
2557 DataTypes::ShapeType tmpShape0(rank0); // pre-sizing the vectors rather
2558 DataTypes::ShapeType tmpShape1(rank1); // than using push_back
2559 for (int i=0; i<rank0; i++) { tmpShape0[i]=shape0[(i+start0)%rank0]; }
2560 for (int i=0; i<rank1; i++) { tmpShape1[i]=shape1[(i+start1)%rank1]; }
2561
2562 #if 0
2563 // For debugging: show shape after transpose
2564 char tmp[100];
2565 std::string shapeStr;
2566 shapeStr = "(";
2567 for (int i=0; i<rank0; i++) { sprintf(tmp, "%d,", tmpShape0[i]); shapeStr += tmp; }
2568 shapeStr += ")";
2569 cout << "C_GeneralTensorProduct: Shape of arg0 is " << shapeStr << endl;
2570 shapeStr = "(";
2571 for (int i=0; i<rank1; i++) { sprintf(tmp, "%d,", tmpShape1[i]); shapeStr += tmp; }
2572 shapeStr += ")";
2573 cout << "C_GeneralTensorProduct: Shape of arg1 is " << shapeStr << endl;
2574 #endif
2575
2576 // Prepare for the loops of the product
2577 int SL=1, SM=1, SR=1;
2578 for (int i=0; i<rank0-axis_offset; i++) {
2579 SL *= tmpShape0[i];
2580 }
2581 for (int i=rank0-axis_offset; i<rank0; i++) {
2582 if (tmpShape0[i] != tmpShape1[i-(rank0-axis_offset)]) {
2583 throw DataException("C_GeneralTensorProduct: Error - incompatible shapes");
2584 }
2585 SM *= tmpShape0[i];
2586 }
2587 for (int i=axis_offset; i<rank1; i++) {
2588 SR *= tmpShape1[i];
2589 }
2590
2591 // Define the shape of the output (rank of shape is the sum of the loop ranges below)
2592 DataTypes::ShapeType shape2(rank0+rank1-2*axis_offset);
2593 { // block to limit the scope of out_index
2594 int out_index=0;
2595 for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z
2596 for (int i=axis_offset; i<rank1; i++, ++out_index) { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z
2597 }
2598
2599 // Declare output Data object
2600 Data res;
2601
2602 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2603 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataConstant output
2604 double *ptr_0 = &(arg_0_Z.getDataAtOffset(0));
2605 double *ptr_1 = &(arg_1_Z.getDataAtOffset(0));
2606 double *ptr_2 = &(res.getDataAtOffset(0));
2607 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2608 }
2609 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2610
2611 // Prepare the DataConstant input
2612 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2613 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2614
2615 // Borrow DataTagged input from Data object
2616 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2617 if (tmp_1==0) { throw DataException("GTP_1 Programming error - casting to DataTagged."); }
2618
2619 // Prepare a DataTagged output 2
2620 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataTagged output
2621 res.tag();
2622 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2623 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2624
2625 // Prepare offset into DataConstant
2626 int offset_0 = tmp_0->getPointOffset(0,0);
2627 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2628 // Get the views
2629 // DataArrayView view_1 = tmp_1->getDefaultValue();
2630 // DataArrayView view_2 = tmp_2->getDefaultValue();
2631 // // Get the pointers to the actual data
2632 // double *ptr_1 = &((view_1.getData())[0]);
2633 // double *ptr_2 = &((view_2.getData())[0]);
2634
2635 double *ptr_1 = &(tmp_1->getDefaultValue(0));
2636 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2637
2638
2639 // Compute an MVP for the default
2640 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2641 // Compute an MVP for each tag
2642 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2643 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2644 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2645 tmp_2->addTag(i->first);
2646 // DataArrayView view_1 = tmp_1->getDataPointByTag(i->first);
2647 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2648 // double *ptr_1 = &view_1.getData(0);
2649 // double *ptr_2 = &view_2.getData(0);
2650
2651 double *ptr_1 = &(tmp_1->getDataByTag(i->first,0));
2652 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2653
2654 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2655 }
2656
2657 }
2658 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2659
2660 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2661 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2662 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2663 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2664 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2665 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2666 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2667 int sampleNo_1,dataPointNo_1;
2668 int numSamples_1 = arg_1_Z.getNumSamples();
2669 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2670 int offset_0 = tmp_0->getPointOffset(0,0);
2671 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2672 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2673 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2674 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2675 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2676 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2677 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2678 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2679 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2680 }
2681 }
2682
2683 }
2684 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2685
2686 // Borrow DataTagged input from Data object
2687 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2688 if (tmp_0==0) { throw DataException("GTP_0 Programming error - casting to DataTagged."); }
2689
2690 // Prepare the DataConstant input
2691 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2692 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2693
2694 // Prepare a DataTagged output 2
2695 res = Data(0.0, shape2, arg_0_Z.getFunctionSpace()); // DataTagged output
2696 res.tag();
2697 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2698 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2699
2700 // Prepare offset into DataConstant
2701 int offset_1 = tmp_1->getPointOffset(0,0);
2702 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2703 // Get the views
2704 // DataArrayView view_0 = tmp_0->getDefaultValue();
2705 // DataArrayView view_2 = tmp_2->getDefaultValue();
2706 // // Get the pointers to the actual data
2707 // double *ptr_0 = &((view_0.getData())[0]);
2708 // double *ptr_2 = &((view_2.getData())[0]);
2709
2710 double *ptr_0 = &(tmp_0->getDefaultValue(0));
2711 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2712
2713 // Compute an MVP for the default
2714 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2715 // Compute an MVP for each tag
2716 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2717 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2718 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2719 // tmp_2->addTaggedValue(i->first,tmp_2->getDefaultValue());
2720 // DataArrayView view_0 = tmp_0->getDataPointByTag(i->first);
2721 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2722 // double *ptr_0 = &view_0.getData(0);
2723 // double *ptr_2 = &view_2.getData(0);
2724
2725 tmp_2->addTag(i->first);
2726 double *ptr_0 = &(tmp_0->getDataByTag(i->first,0));
2727 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2728 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2729 }
2730
2731 }
2732 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2733
2734 // Borrow DataTagged input from Data object
2735 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2736 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2737
2738 // Borrow DataTagged input from Data object
2739 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2740 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2741
2742 // Prepare a DataTagged output 2
2743 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace());
2744 res.tag(); // DataTagged output
2745 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2746 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2747
2748 // // Get the views
2749 // DataArrayView view_0 = tmp_0->getDefaultValue();
2750 // DataArrayView view_1 = tmp_1->getDefaultValue();
2751 // DataArrayView view_2 = tmp_2->getDefaultValue();
2752 // // Get the pointers to the actual data
2753 // double *ptr_0 = &((view_0.getData())[0]);
2754 // double *ptr_1 = &((view_1.getData())[0]);
2755 // double *ptr_2 = &((view_2.getData())[0]);
2756
2757 double *ptr_0 = &(tmp_0->getDefaultValue(0));
2758 double *ptr_1 = &(tmp_1->getDefaultValue(0));
2759 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2760
2761
2762 // Compute an MVP for the default
2763 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2764 // Merge the tags
2765 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2766 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2767 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2768 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2769 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2770 }
2771 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2772 tmp_2->addTag(i->first);
2773 }
2774 // Compute an MVP for each tag
2775 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2776 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2777 // DataArrayView view_0 = tmp_0->getDataPointByTag(i->first);
2778 // DataArrayView view_1 = tmp_1->getDataPointByTag(i->first);
2779 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2780 // double *ptr_0 = &view_0.getData(0);
2781 // double *ptr_1 = &view_1.getData(0);
2782 // double *ptr_2 = &view_2.getData(0);
2783
2784 double *ptr_0 = &(tmp_0->getDataByTag(i->first,0));
2785 double *ptr_1 = &(tmp_1->getDataByTag(i->first,0));
2786 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2787
2788 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2789 }
2790
2791 }
2792 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2793
2794 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2795 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2796 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2797 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2798 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2799 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2800 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2801 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2802 int sampleNo_0,dataPointNo_0;
2803 int numSamples_0 = arg_0_Z.getNumSamples();
2804 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2805 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2806 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2807 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2808 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2809 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2810 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2811 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2812 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2813 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2814 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2815 }
2816 }
2817
2818 }
2819 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2820
2821 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2822 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2823 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2824 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2825 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2826 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2827 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2828 int sampleNo_0,dataPointNo_0;
2829 int numSamples_0 = arg_0_Z.getNumSamples();
2830 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2831 int offset_1 = tmp_1->getPointOffset(0,0);
2832 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2833 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2834 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2835 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2836 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2837 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2838 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2839 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2840 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2841 }
2842 }
2843
2844
2845 }
2846 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2847
2848 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2849 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2850 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2851 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2852 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2853 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2854 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2855 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2856 int sampleNo_0,dataPointNo_0;
2857 int numSamples_0 = arg_0_Z.getNumSamples();
2858 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2859 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2860 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2861 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2862 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2863 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2864 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2865 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2866 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2867 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2868 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2869 }
2870 }
2871
2872 }
2873 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2874
2875 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2876 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2877 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2878 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2879 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2880 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2881 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2882 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2883 int sampleNo_0,dataPointNo_0;
2884 int numSamples_0 = arg_0_Z.getNumSamples();
2885 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2886 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2887 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2888 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2889 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2890 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2891 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2892 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2893 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2894 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2895 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2896 }
2897 }
2898
2899 }
2900 else {
2901 throw DataException("Error - C_GeneralTensorProduct: unknown combination of inputs");
2902 }
2903
2904 return res;
2905 }
2906
2907 DataAbstract*
2908 Data::borrowData() const
2909 {
2910 return m_data.get();
2911 }
2912
2913 // Not all that happy about returning a non-const from a const
2914 DataAbstract_ptr
2915 Data::borrowDataPtr() const
2916 {
2917 return m_data;
2918 }
2919
2920 // Not all that happy about returning a non-const from a const
2921 DataReady_ptr
2922 Data::borrowReadyPtr() const
2923 {
2924 DataReady_ptr dr=dynamic_pointer_cast<DataReady>(m_data);
2925 EsysAssert((dr!=0), "Error - casting to DataReady.");
2926 return dr;
2927 }
2928
2929 std::string
2930 Data::toString() const
2931 {
2932 if (!m_data->isEmpty() &&
2933 !m_data->isLazy() &&
2934 getLength()>escriptParams.getInt("TOO_MANY_LINES"))
2935 {
2936 stringstream temp;
2937 temp << "Summary: inf="<< inf_const() << " sup=" << sup_const() << " data points=" << getNumDataPoints();
2938 return temp.str();
2939 }
2940 return m_data->toString();
2941 }
2942
2943
2944
2945 DataTypes::ValueType::const_reference
2946 Data::getDataAtOffset(DataTypes::ValueType::size_type i) const
2947 {
2948 if (isLazy())
2949 {
2950 throw DataException("Programmer error - getDataAtOffset not permitted on lazy data (object is const which prevents resolving).");
2951 }
2952 return getReady()->getDataAtOffset(i);
2953 }
2954
2955
2956 DataTypes::ValueType::reference
2957 Data::getDataAtOffset(DataTypes::ValueType::size_type i)
2958 {
2959 // if (isLazy())
2960 // {
2961 // throw DataException("getDataAtOffset not permitted on lazy data.");
2962 // }
2963 FORCERESOLVE;
2964 return getReady()->getDataAtOffset(i);
2965 }
2966
2967 DataTypes::ValueType::const_reference
2968 Data::getDataPoint(int sampleNo, int dataPointNo) const
2969 {
2970 if (!isReady())
2971 {
2972 throw DataException("Programmer error - getDataPoint() not permitted on Lazy Data (object is const which prevents resolving).");
2973 }
2974 else
2975 {
2976 const DataReady* dr=getReady();
2977 return dr->getDataAtOffset(dr->getPointOffset(sampleNo, dataPointNo));
2978 }
2979 }
2980
2981
2982 DataTypes::ValueType::reference
2983 Data::getDataPoint(int sampleNo, int dataPointNo)
2984 {
2985 FORCERESOLVE;
2986 if (!isReady())
2987 {
2988 throw DataException("Programmer error - getDataPoint() not permitted on Lazy Data.");
2989 }
2990 else
2991 {
2992 DataReady* dr=getReady();
2993 return dr->getDataAtOffset(dr->getPointOffset(sampleNo, dataPointNo));
2994 }
2995 }
2996
2997
2998 /* Member functions specific to the MPI implementation */
2999
3000 void
3001 Data::print()
3002 {
3003 int i,j;
3004
3005 printf( "Data is %dX%d\n", getNumSamples(), getNumDataPointsPerSample() );
3006 for( i=0; i<getNumSamples(); i++ )
3007 {
3008 printf( "[%6d]", i );
3009 for( j=0; j<getNumDataPointsPerSample(); j++ )
3010 printf( "\t%10.7g", (getSampleData(i))[j] );
3011 printf( "\n" );
3012 }
3013 }
3014 void
3015 Data::dump(const std::string fileName) const
3016 {
3017 try
3018 {
3019 if (isLazy())
3020 {
3021 Data temp(*this); // this is to get a non-const object which we can resolve
3022 temp.resolve();
3023 temp.dump(fileName);
3024 }
3025 else
3026 {
3027 return m_data->dump(fileName);
3028 }
3029 }
3030 catch (exception& e)
3031 {
3032 cout << e.what() << endl;
3033 }
3034 }
3035
3036 int
3037 Data::get_MPISize() const
3038 {
3039 int size;
3040 #ifdef PASO_MPI
3041 int error;
3042 error = MPI_Comm_size( get_MPIComm(), &size );
3043 #else
3044 size = 1;
3045 #endif
3046 return size;
3047 }
3048
3049 int
3050 Data::get_MPIRank() const
3051 {
3052 int rank;
3053 #ifdef PASO_MPI
3054 int error;
3055 error = MPI_Comm_rank( get_MPIComm(), &rank );
3056 #else
3057 rank = 0;
3058 #endif
3059 return rank;
3060 }
3061
3062 MPI_Comm
3063 Data::get_MPIComm() const
3064 {
3065 #ifdef PASO_MPI
3066 return MPI_COMM_WORLD;
3067 #else
3068 return -1;
3069 #endif
3070 }
3071
3072

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26