/[escript]/trunk/escript/src/Data.cpp
ViewVC logotype

Contents of /trunk/escript/src/Data.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2179 - (show annotations)
Thu Dec 18 00:23:55 2008 UTC (11 years, 3 months ago) by caltinay
File size: 81283 byte(s)
Fixed compilation with boost 1.37 (namespace clash).

1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2008 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 #include "Data.h"
16
17 #include "DataExpanded.h"
18 #include "DataConstant.h"
19 #include "DataTagged.h"
20 #include "DataEmpty.h"
21 #include "DataLazy.h"
22 #include "FunctionSpaceFactory.h"
23 #include "AbstractContinuousDomain.h"
24 #include "UnaryFuncs.h"
25 #include "FunctionSpaceException.h"
26 #include "EscriptParams.h"
27
28 extern "C" {
29 #include "esysUtils/blocktimer.h"
30 }
31
32 #include <fstream>
33 #include <algorithm>
34 #include <vector>
35 #include <functional>
36 #include <sstream> // so we can throw messages about ranks
37
38 #include <boost/python/dict.hpp>
39 #include <boost/python/extract.hpp>
40 #include <boost/python/long.hpp>
41
42 using namespace std;
43 using namespace boost::python;
44 using namespace boost;
45 using namespace escript;
46
47 // ensure the current object is not a DataLazy
48 // The idea was that we could add an optional warning whenever a resolve is forced
49 #define FORCERESOLVE if (isLazy()) {resolve();}
50 #define AUTOLAZYON escriptParams.getAUTOLAZY()
51 #define MAKELAZYOP(X) if (isLazy() || (AUTOLAZYON && m_data->isExpanded())) \
52 {\
53 DataLazy* c=new DataLazy(borrowDataPtr(),X);\
54 return Data(c);\
55 }
56 #define MAKELAZYOPOFF(X,Y) if (isLazy() || (AUTOLAZYON && m_data->isExpanded())) \
57 {\
58 DataLazy* c=new DataLazy(borrowDataPtr(),X,Y);\
59 return Data(c);\
60 }
61
62 #define MAKELAZYBINSELF(R,X) if (isLazy() || R.isLazy() || (AUTOLAZYON && (isExpanded() || R.isExpanded()))) \
63 {\
64 DataLazy* c=new DataLazy(m_data,R.borrowDataPtr(),X);\
65 m_data=c->getPtr();\
66 return (*this);\
67 }
68
69 // like the above but returns a new data rather than *this
70 #define MAKELAZYBIN(R,X) if (isLazy() || R.isLazy() || (AUTOLAZYON && (isExpanded() || R.isExpanded()))) \
71 {\
72 DataLazy* c=new DataLazy(m_data,R.borrowDataPtr(),X);\
73 return Data(c);\
74 }
75
76 #define MAKELAZYBIN2(L,R,X) if (L.isLazy() || R.isLazy() || (AUTOLAZYON && (L.isExpanded() || R.isExpanded()))) \
77 {\
78 DataLazy* c=new DataLazy(L.borrowDataPtr(),R.borrowDataPtr(),X);\
79 return Data(c);\
80 }
81
82 Data::Data()
83 {
84 //
85 // Default data is type DataEmpty
86 DataAbstract* temp=new DataEmpty();
87 m_data=temp->getPtr();
88 m_protected=false;
89 }
90
91 Data::Data(double value,
92 const tuple& shape,
93 const FunctionSpace& what,
94 bool expanded)
95 {
96 DataTypes::ShapeType dataPointShape;
97 for (int i = 0; i < shape.attr("__len__")(); ++i) {
98 dataPointShape.push_back(extract<const int>(shape[i]));
99 }
100
101 int len = DataTypes::noValues(dataPointShape);
102 DataVector temp_data(len,value,len);
103 initialise(temp_data, dataPointShape, what, expanded);
104 m_protected=false;
105 }
106
107 Data::Data(double value,
108 const DataTypes::ShapeType& dataPointShape,
109 const FunctionSpace& what,
110 bool expanded)
111 {
112 int len = DataTypes::noValues(dataPointShape);
113
114 DataVector temp_data(len,value,len);
115 // DataArrayView temp_dataView(temp_data, dataPointShape);
116
117 // initialise(temp_dataView, what, expanded);
118 initialise(temp_data, dataPointShape, what, expanded);
119
120 m_protected=false;
121 }
122
123 Data::Data(const Data& inData)
124 {
125 m_data=inData.m_data;
126 m_protected=inData.isProtected();
127 }
128
129
130 Data::Data(const Data& inData,
131 const DataTypes::RegionType& region)
132 {
133 DataAbstract_ptr dat=inData.m_data;
134 if (inData.isLazy())
135 {
136 dat=inData.m_data->resolve();
137 }
138 else
139 {
140 dat=inData.m_data;
141 }
142 //
143 // Create Data which is a slice of another Data
144 DataAbstract* tmp = dat->getSlice(region);
145 m_data=DataAbstract_ptr(tmp);
146 m_protected=false;
147 }
148
149 Data::Data(const Data& inData,
150 const FunctionSpace& functionspace)
151 {
152 if (inData.isEmpty())
153 {
154 throw DataException("Error - will not interpolate for instances of DataEmpty.");
155 }
156 if (inData.getFunctionSpace()==functionspace) {
157 m_data=inData.m_data;
158 }
159 else
160 {
161
162 if (inData.isConstant()) { // for a constant function, we just need to use the new function space
163 if (!inData.probeInterpolation(functionspace))
164 { // Even though this is constant, we still need to check whether interpolation is allowed
165 throw FunctionSpaceException("Cannot interpolate across to the domain of the specified FunctionSpace. (DataConstant)");
166 }
167 // if the data is not lazy, this will just be a cast to DataReady
168 DataReady_ptr dr=inData.m_data->resolve();
169 DataConstant* dc=new DataConstant(functionspace,inData.m_data->getShape(),dr->getVector());
170 m_data=DataAbstract_ptr(dc);
171 } else {
172 Data tmp(0,inData.getDataPointShape(),functionspace,true);
173 // Note: Must use a reference or pointer to a derived object
174 // in order to get polymorphic behaviour. Shouldn't really
175 // be able to create an instance of AbstractDomain but that was done
176 // as a boost:python work around which may no longer be required.
177 /*const AbstractDomain& inDataDomain=inData.getDomain();*/
178 const_Domain_ptr inDataDomain=inData.getDomain();
179 if (inDataDomain==functionspace.getDomain()) {
180 inDataDomain->interpolateOnDomain(tmp,inData);
181 } else {
182 inDataDomain->interpolateACross(tmp,inData);
183 }
184 m_data=tmp.m_data;
185 }
186 }
187 m_protected=false;
188 }
189
190 Data::Data(DataAbstract* underlyingdata)
191 {
192 // m_data=shared_ptr<DataAbstract>(underlyingdata);
193 m_data=underlyingdata->getPtr();
194 m_protected=false;
195 }
196
197 Data::Data(DataAbstract_ptr underlyingdata)
198 {
199 m_data=underlyingdata;
200 m_protected=false;
201 }
202
203
204 Data::Data(const numeric::array& value,
205 const FunctionSpace& what,
206 bool expanded)
207 {
208 initialise(value,what,expanded);
209 m_protected=false;
210 }
211
212 Data::Data(const DataTypes::ValueType& value,
213 const DataTypes::ShapeType& shape,
214 const FunctionSpace& what,
215 bool expanded)
216 {
217 initialise(value,shape,what,expanded);
218 m_protected=false;
219 }
220
221
222 Data::Data(const object& value,
223 const FunctionSpace& what,
224 bool expanded)
225 {
226 numeric::array asNumArray(value);
227 initialise(asNumArray,what,expanded);
228 m_protected=false;
229 }
230
231
232 Data::Data(const object& value,
233 const Data& other)
234 {
235 numeric::array asNumArray(value);
236
237 // extract the shape of the numarray
238 DataTypes::ShapeType tempShape=DataTypes::shapeFromNumArray(asNumArray);
239 if (DataTypes::getRank(tempShape)==0) {
240
241
242 // get the space for the data vector
243 int len1 = DataTypes::noValues(tempShape);
244 DataVector temp_data(len1, 0.0, len1);
245 temp_data.copyFromNumArray(asNumArray,1);
246
247 int len = DataTypes::noValues(other.getDataPointShape());
248
249 DataVector temp2_data(len, temp_data[0]/*temp_dataView()*/, len);
250 DataConstant* t=new DataConstant(other.getFunctionSpace(),other.getDataPointShape(),temp2_data);
251 m_data=DataAbstract_ptr(t);
252
253 } else {
254 //
255 // Create a DataConstant with the same sample shape as other
256 DataConstant* t=new DataConstant(asNumArray,other.getFunctionSpace());
257 // boost::shared_ptr<DataAbstract> sp(t);
258 // m_data=sp;
259 m_data=DataAbstract_ptr(t);
260 }
261 m_protected=false;
262 }
263
264 Data::~Data()
265 {
266
267 }
268
269
270
271 void
272 Data::initialise(const boost::python::numeric::array& value,
273 const FunctionSpace& what,
274 bool expanded)
275 {
276 //
277 // Construct a Data object of the appropriate type.
278 // Construct the object first as there seems to be a bug which causes
279 // undefined behaviour if an exception is thrown during construction
280 // within the shared_ptr constructor.
281 if (expanded) {
282 DataAbstract* temp=new DataExpanded(value, what);
283 // boost::shared_ptr<DataAbstract> temp_data(temp);
284 // m_data=temp_data;
285 m_data=temp->getPtr();
286 } else {
287 DataAbstract* temp=new DataConstant(value, what);
288 // boost::shared_ptr<DataAbstract> temp_data(temp);
289 // m_data=temp_data;
290 m_data=temp->getPtr();
291 }
292 }
293
294
295 void
296 Data::initialise(const DataTypes::ValueType& value,
297 const DataTypes::ShapeType& shape,
298 const FunctionSpace& what,
299 bool expanded)
300 {
301 //
302 // Construct a Data object of the appropriate type.
303 // Construct the object first as there seems to be a bug which causes
304 // undefined behaviour if an exception is thrown during construction
305 // within the shared_ptr constructor.
306 if (expanded) {
307 DataAbstract* temp=new DataExpanded(what, shape, value);
308 // boost::shared_ptr<DataAbstract> temp_data(temp);
309 // m_data=temp_data;
310 m_data=temp->getPtr();
311 } else {
312 DataAbstract* temp=new DataConstant(what, shape, value);
313 // boost::shared_ptr<DataAbstract> temp_data(temp);
314 // m_data=temp_data;
315 m_data=temp->getPtr();
316 }
317 }
318
319
320 // void
321 // Data::CompareDebug(const Data& rd)
322 // {
323 // using namespace std;
324 // bool mismatch=false;
325 // std::cout << "Comparing left and right" << endl;
326 // const DataTagged* left=dynamic_cast<DataTagged*>(m_data.get());
327 // const DataTagged* right=dynamic_cast<DataTagged*>(rd.m_data.get());
328 //
329 // if (left==0)
330 // {
331 // cout << "left arg is not a DataTagged\n";
332 // return;
333 // }
334 //
335 // if (right==0)
336 // {
337 // cout << "right arg is not a DataTagged\n";
338 // return;
339 // }
340 // cout << "Num elements=" << left->getVector().size() << ":" << right->getVector().size() << std::endl;
341 // cout << "Shapes ";
342 // if (left->getShape()==right->getShape())
343 // {
344 // cout << "ok\n";
345 // }
346 // else
347 // {
348 // cout << "Problem: shapes do not match\n";
349 // mismatch=true;
350 // }
351 // int lim=left->getVector().size();
352 // if (right->getVector().size()) lim=right->getVector().size();
353 // for (int i=0;i<lim;++i)
354 // {
355 // if (left->getVector()[i]!=right->getVector()[i])
356 // {
357 // cout << "[" << i << "] value mismatch " << left->getVector()[i] << ":" << right->getVector()[i] << endl;
358 // mismatch=true;
359 // }
360 // }
361 //
362 // // still need to check the tag map
363 // // also need to watch what is happening to function spaces, are they copied or what?
364 //
365 // const DataTagged::DataMapType& mapleft=left->getTagLookup();
366 // const DataTagged::DataMapType& mapright=right->getTagLookup();
367 //
368 // if (mapleft.size()!=mapright.size())
369 // {
370 // cout << "Maps are different sizes " << mapleft.size() << ":" << mapright.size() << endl;
371 // mismatch=true;
372 // cout << "Left map\n";
373 // DataTagged::DataMapType::const_iterator i,j;
374 // for (i=mapleft.begin();i!=mapleft.end();++i) {
375 // cout << "(" << i->first << "=>" << i->second << ")\n";
376 // }
377 // cout << "Right map\n";
378 // for (i=mapright.begin();i!=mapright.end();++i) {
379 // cout << "(" << i->first << "=>" << i->second << ")\n";
380 // }
381 // cout << "End map\n";
382 //
383 // }
384 //
385 // DataTagged::DataMapType::const_iterator i,j;
386 // for (i=mapleft.begin(),j=mapright.begin();i!=mapleft.end() && j!=mapright.end();++i,++j) {
387 // if ((i->first!=j->first) || (i->second!=j->second))
388 // {
389 // cout << "(" << i->first << "=>" << i->second << ")";
390 // cout << ":(" << j->first << "=>" << j->second << ") ";
391 // mismatch=true;
392 // }
393 // }
394 // if (mismatch)
395 // {
396 // cout << "#Mismatch\n";
397 // }
398 // }
399
400 escriptDataC
401 Data::getDataC()
402 {
403 escriptDataC temp;
404 temp.m_dataPtr=(void*)this;
405 return temp;
406 }
407
408 escriptDataC
409 Data::getDataC() const
410 {
411 escriptDataC temp;
412 temp.m_dataPtr=(void*)this;
413 return temp;
414 }
415
416 const boost::python::tuple
417 Data::getShapeTuple() const
418 {
419 const DataTypes::ShapeType& shape=getDataPointShape();
420 switch(getDataPointRank()) {
421 case 0:
422 return make_tuple();
423 case 1:
424 return make_tuple(long_(shape[0]));
425 case 2:
426 return make_tuple(long_(shape[0]),long_(shape[1]));
427 case 3:
428 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]));
429 case 4:
430 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]),long_(shape[3]));
431 default:
432 throw DataException("Error - illegal Data rank.");
433 }
434 }
435
436
437 // The different name is needed because boost has trouble with overloaded functions.
438 // It can't work out what type the function is based soley on its name.
439 // There are ways to fix this involving creating function pointer variables for each form
440 // but there doesn't seem to be a need given that the methods have the same name from the python point of view
441 Data
442 Data::copySelf()
443 {
444 DataAbstract* temp=m_data->deepCopy();
445 return Data(temp);
446 }
447
448 void
449 Data::copy(const Data& other)
450 {
451 DataAbstract* temp=other.m_data->deepCopy();
452 DataAbstract_ptr p=temp->getPtr();
453 m_data=p;
454 }
455
456
457 Data
458 Data::delay()
459 {
460 DataLazy* dl=new DataLazy(m_data);
461 return Data(dl);
462 }
463
464 void
465 Data::delaySelf()
466 {
467 if (!isLazy())
468 {
469 m_data=(new DataLazy(m_data))->getPtr();
470 }
471 }
472
473 void
474 Data::setToZero()
475 {
476 if (isEmpty())
477 {
478 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
479 }
480 m_data->setToZero();
481 }
482
483 void
484 Data::copyWithMask(const Data& other,
485 const Data& mask)
486 {
487 // 1. Interpolate if required so all Datas use the same FS as this
488 // 2. Tag or Expand so that all Data's are the same type
489 // 3. Iterate over the data vectors copying values where mask is >0
490 if (other.isEmpty() || mask.isEmpty())
491 {
492 throw DataException("Error - copyWithMask not permitted using instances of DataEmpty.");
493 }
494 Data other2(other);
495 Data mask2(mask);
496 other2.resolve();
497 mask2.resolve();
498 this->resolve();
499 FunctionSpace myFS=getFunctionSpace();
500 FunctionSpace oFS=other2.getFunctionSpace();
501 FunctionSpace mFS=mask2.getFunctionSpace();
502 if (oFS!=myFS)
503 {
504 if (other2.probeInterpolation(myFS))
505 {
506 other2=other2.interpolate(myFS);
507 }
508 else
509 {
510 throw DataException("Error - copyWithMask: other FunctionSpace is not compatible with this one.");
511 }
512 }
513 if (mFS!=myFS)
514 {
515 if (mask2.probeInterpolation(myFS))
516 {
517 mask2=mask2.interpolate(myFS);
518 }
519 else
520 {
521 throw DataException("Error - copyWithMask: mask FunctionSpace is not compatible with this one.");
522 }
523 }
524 // Ensure that all args have the same type
525 if (this->isExpanded() || mask2.isExpanded() || other2.isExpanded())
526 {
527 this->expand();
528 other2.expand();
529 mask2.expand();
530 }
531 else if (this->isTagged() || mask2.isTagged() || other2.isTagged())
532 {
533 this->tag();
534 other2.tag();
535 mask2.tag();
536 }
537 else if (this->isConstant() && mask2.isConstant() && other2.isConstant())
538 {
539 }
540 else
541 {
542 throw DataException("Error - Unknown DataAbstract passed to copyWithMask.");
543 }
544 // Now we iterate over the elements
545 DataVector& self=getReadyPtr()->getVector();
546 const DataVector& ovec=other2.getReadyPtr()->getVector();
547 const DataVector& mvec=mask2.getReadyPtr()->getVector();
548 if ((self.size()!=ovec.size()) || (self.size()!=mvec.size()))
549 {
550 throw DataException("Error - size mismatch in arguments to copyWithMask.");
551 }
552 size_t num_points=self.size();
553
554 // OPENMP 3.0 allows unsigned loop vars.
555 #if defined(_OPENMP) && (_OPENMP < 200805)
556 long i;
557 #else
558 size_t i;
559 #endif
560 #pragma omp parallel for private(i) schedule(static)
561 for (i=0;i<num_points;++i)
562 {
563 if (mvec[i]>0)
564 {
565 self[i]=ovec[i];
566 }
567 }
568 }
569
570
571
572 bool
573 Data::isExpanded() const
574 {
575 DataExpanded* temp=dynamic_cast<DataExpanded*>(m_data.get());
576 return (temp!=0);
577 }
578
579 bool
580 Data::isTagged() const
581 {
582 DataTagged* temp=dynamic_cast<DataTagged*>(m_data.get());
583 return (temp!=0);
584 }
585
586 bool
587 Data::isEmpty() const
588 {
589 DataEmpty* temp=dynamic_cast<DataEmpty*>(m_data.get());
590 return (temp!=0);
591 }
592
593 bool
594 Data::isConstant() const
595 {
596 DataConstant* temp=dynamic_cast<DataConstant*>(m_data.get());
597 return (temp!=0);
598 }
599
600 bool
601 Data::isLazy() const
602 {
603 return m_data->isLazy();
604 }
605
606 // at the moment this is synonymous with !isLazy() but that could change
607 bool
608 Data::isReady() const
609 {
610 return (dynamic_cast<DataReady*>(m_data.get())!=0);
611 }
612
613
614 void
615 Data::setProtection()
616 {
617 m_protected=true;
618 }
619
620 bool
621 Data::isProtected() const
622 {
623 return m_protected;
624 }
625
626
627
628 void
629 Data::expand()
630 {
631 if (isConstant()) {
632 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
633 DataAbstract* temp=new DataExpanded(*tempDataConst);
634 // shared_ptr<DataAbstract> temp_data(temp);
635 // m_data=temp_data;
636 m_data=temp->getPtr();
637 } else if (isTagged()) {
638 DataTagged* tempDataTag=dynamic_cast<DataTagged*>(m_data.get());
639 DataAbstract* temp=new DataExpanded(*tempDataTag);
640 // shared_ptr<DataAbstract> temp_data(temp);
641 // m_data=temp_data;
642 m_data=temp->getPtr();
643 } else if (isExpanded()) {
644 //
645 // do nothing
646 } else if (isEmpty()) {
647 throw DataException("Error - Expansion of DataEmpty not possible.");
648 } else if (isLazy()) {
649 resolve();
650 expand(); // resolve might not give us expanded data
651 } else {
652 throw DataException("Error - Expansion not implemented for this Data type.");
653 }
654 }
655
656 void
657 Data::tag()
658 {
659 if (isConstant()) {
660 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
661 DataAbstract* temp=new DataTagged(*tempDataConst);
662 // shared_ptr<DataAbstract> temp_data(temp);
663 // m_data=temp_data;
664 m_data=temp->getPtr();
665 } else if (isTagged()) {
666 // do nothing
667 } else if (isExpanded()) {
668 throw DataException("Error - Creating tag data from DataExpanded not possible.");
669 } else if (isEmpty()) {
670 throw DataException("Error - Creating tag data from DataEmpty not possible.");
671 } else if (isLazy()) {
672 DataAbstract_ptr res=m_data->resolve();
673 if (m_data->isExpanded())
674 {
675 throw DataException("Error - data would resolve to DataExpanded, tagging is not possible.");
676 }
677 m_data=res;
678 tag();
679 } else {
680 throw DataException("Error - Tagging not implemented for this Data type.");
681 }
682 }
683
684 void
685 Data::resolve()
686 {
687 if (isLazy())
688 {
689 m_data=m_data->resolve();
690 }
691 }
692
693
694 Data
695 Data::oneOver() const
696 {
697 MAKELAZYOP(RECIP)
698 return C_TensorUnaryOperation(*this, bind1st(divides<double>(),1.));
699 }
700
701 Data
702 Data::wherePositive() const
703 {
704 MAKELAZYOP(GZ)
705 return C_TensorUnaryOperation(*this, bind2nd(greater<double>(),0.0));
706 }
707
708 Data
709 Data::whereNegative() const
710 {
711 MAKELAZYOP(LZ)
712 return C_TensorUnaryOperation(*this, bind2nd(less<double>(),0.0));
713 }
714
715 Data
716 Data::whereNonNegative() const
717 {
718 MAKELAZYOP(GEZ)
719 return C_TensorUnaryOperation(*this, bind2nd(greater_equal<double>(),0.0));
720 }
721
722 Data
723 Data::whereNonPositive() const
724 {
725 MAKELAZYOP(LEZ)
726 return C_TensorUnaryOperation(*this, bind2nd(less_equal<double>(),0.0));
727 }
728
729 Data
730 Data::whereZero(double tol) const
731 {
732 // Data dataAbs=abs();
733 // return C_TensorUnaryOperation(dataAbs, bind2nd(less_equal<double>(),tol));
734 MAKELAZYOPOFF(EZ,tol)
735 return C_TensorUnaryOperation(*this, bind2nd(AbsLTE(),tol));
736
737 }
738
739 Data
740 Data::whereNonZero(double tol) const
741 {
742 // Data dataAbs=abs();
743 // return C_TensorUnaryOperation(dataAbs, bind2nd(greater<double>(),tol));
744 MAKELAZYOPOFF(NEZ,tol)
745 return C_TensorUnaryOperation(*this, bind2nd(AbsGT(),tol));
746
747 }
748
749 Data
750 Data::interpolate(const FunctionSpace& functionspace) const
751 {
752 return Data(*this,functionspace);
753 }
754
755 bool
756 Data::probeInterpolation(const FunctionSpace& functionspace) const
757 {
758 return getFunctionSpace().probeInterpolation(functionspace);
759 }
760
761 Data
762 Data::gradOn(const FunctionSpace& functionspace) const
763 {
764 if (isEmpty())
765 {
766 throw DataException("Error - operation not permitted on instances of DataEmpty.");
767 }
768 double blocktimer_start = blocktimer_time();
769 if (functionspace.getDomain()!=getDomain())
770 throw DataException("Error - gradient cannot be calculated on different domains.");
771 DataTypes::ShapeType grad_shape=getDataPointShape();
772 grad_shape.push_back(functionspace.getDim());
773 Data out(0.0,grad_shape,functionspace,true);
774 getDomain()->setToGradient(out,*this);
775 blocktimer_increment("grad()", blocktimer_start);
776 return out;
777 }
778
779 Data
780 Data::grad() const
781 {
782 if (isEmpty())
783 {
784 throw DataException("Error - operation not permitted on instances of DataEmpty.");
785 }
786 return gradOn(escript::function(*getDomain()));
787 }
788
789 int
790 Data::getDataPointSize() const
791 {
792 return m_data->getNoValues();
793 }
794
795 DataTypes::ValueType::size_type
796 Data::getLength() const
797 {
798 return m_data->getLength();
799 }
800
801 const
802 boost::python::numeric::array
803 Data:: getValueOfDataPoint(int dataPointNo)
804 {
805 int i, j, k, l;
806
807 FORCERESOLVE;
808
809 //
810 // determine the rank and shape of each data point
811 int dataPointRank = getDataPointRank();
812 const DataTypes::ShapeType& dataPointShape = getDataPointShape();
813
814 //
815 // create the numeric array to be returned
816 boost::python::numeric::array numArray(0.0);
817
818 //
819 // the shape of the returned numeric array will be the same
820 // as that of the data point
821 int arrayRank = dataPointRank;
822 const DataTypes::ShapeType& arrayShape = dataPointShape;
823
824 //
825 // resize the numeric array to the shape just calculated
826 if (arrayRank==0) {
827 numArray.resize(1);
828 }
829 if (arrayRank==1) {
830 numArray.resize(arrayShape[0]);
831 }
832 if (arrayRank==2) {
833 numArray.resize(arrayShape[0],arrayShape[1]);
834 }
835 if (arrayRank==3) {
836 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2]);
837 }
838 if (arrayRank==4) {
839 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2],arrayShape[3]);
840 }
841
842 if (getNumDataPointsPerSample()>0) {
843 int sampleNo = dataPointNo/getNumDataPointsPerSample();
844 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
845 //
846 // Check a valid sample number has been supplied
847 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
848 throw DataException("Error - Data::convertToNumArray: invalid sampleNo.");
849 }
850
851 //
852 // Check a valid data point number has been supplied
853 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
854 throw DataException("Error - Data::convertToNumArray: invalid dataPointNoInSample.");
855 }
856 // TODO: global error handling
857 // create a view of the data if it is stored locally
858 // DataArrayView dataPointView = getDataPoint(sampleNo, dataPointNoInSample);
859 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
860
861
862 switch( dataPointRank ){
863 case 0 :
864 numArray[0] = getDataAtOffset(offset);
865 break;
866 case 1 :
867 for( i=0; i<dataPointShape[0]; i++ )
868 numArray[i]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i));
869 break;
870 case 2 :
871 for( i=0; i<dataPointShape[0]; i++ )
872 for( j=0; j<dataPointShape[1]; j++)
873 numArray[make_tuple(i,j)]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j));
874 break;
875 case 3 :
876 for( i=0; i<dataPointShape[0]; i++ )
877 for( j=0; j<dataPointShape[1]; j++ )
878 for( k=0; k<dataPointShape[2]; k++)
879 numArray[make_tuple(i,j,k)]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k));
880 break;
881 case 4 :
882 for( i=0; i<dataPointShape[0]; i++ )
883 for( j=0; j<dataPointShape[1]; j++ )
884 for( k=0; k<dataPointShape[2]; k++ )
885 for( l=0; l<dataPointShape[3]; l++)
886 numArray[make_tuple(i,j,k,l)]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k,l));
887 break;
888 }
889 }
890 //
891 // return the array
892 return numArray;
893 }
894
895 void
896 Data::setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object)
897 {
898 // this will throw if the value cannot be represented
899 boost::python::numeric::array num_array(py_object);
900 setValueOfDataPointToArray(dataPointNo,num_array);
901 }
902
903 void
904 Data::setValueOfDataPointToArray(int dataPointNo, const boost::python::numeric::array& num_array)
905 {
906 if (isProtected()) {
907 throw DataException("Error - attempt to update protected Data object.");
908 }
909 FORCERESOLVE;
910 //
911 // check rank
912 if (static_cast<unsigned int>(num_array.getrank())<getDataPointRank())
913 throw DataException("Rank of numarray does not match Data object rank");
914
915 //
916 // check shape of num_array
917 for (unsigned int i=0; i<getDataPointRank(); i++) {
918 if (extract<int>(num_array.getshape()[i])!=getDataPointShape()[i])
919 throw DataException("Shape of numarray does not match Data object rank");
920 }
921 //
922 // make sure data is expanded:
923 //
924 if (!isExpanded()) {
925 expand();
926 }
927 if (getNumDataPointsPerSample()>0) {
928 int sampleNo = dataPointNo/getNumDataPointsPerSample();
929 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
930 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,num_array);
931 } else {
932 m_data->copyToDataPoint(-1, 0,num_array);
933 }
934 }
935
936 void
937 Data::setValueOfDataPoint(int dataPointNo, const double value)
938 {
939 if (isProtected()) {
940 throw DataException("Error - attempt to update protected Data object.");
941 }
942 //
943 // make sure data is expanded:
944 FORCERESOLVE;
945 if (!isExpanded()) {
946 expand();
947 }
948 if (getNumDataPointsPerSample()>0) {
949 int sampleNo = dataPointNo/getNumDataPointsPerSample();
950 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
951 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,value);
952 } else {
953 m_data->copyToDataPoint(-1, 0,value);
954 }
955 }
956
957 const
958 boost::python::numeric::array
959 Data::getValueOfGlobalDataPoint(int procNo, int dataPointNo)
960 {
961 size_t length=0;
962 int i, j, k, l, pos;
963 FORCERESOLVE;
964 //
965 // determine the rank and shape of each data point
966 int dataPointRank = getDataPointRank();
967 const DataTypes::ShapeType& dataPointShape = getDataPointShape();
968
969 //
970 // create the numeric array to be returned
971 boost::python::numeric::array numArray(0.0);
972
973 //
974 // the shape of the returned numeric array will be the same
975 // as that of the data point
976 int arrayRank = dataPointRank;
977 const DataTypes::ShapeType& arrayShape = dataPointShape;
978
979 //
980 // resize the numeric array to the shape just calculated
981 if (arrayRank==0) {
982 numArray.resize(1);
983 }
984 if (arrayRank==1) {
985 numArray.resize(arrayShape[0]);
986 }
987 if (arrayRank==2) {
988 numArray.resize(arrayShape[0],arrayShape[1]);
989 }
990 if (arrayRank==3) {
991 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2]);
992 }
993 if (arrayRank==4) {
994 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2],arrayShape[3]);
995 }
996
997 // added for the MPI communication
998 length=1;
999 for( i=0; i<arrayRank; i++ ) length *= arrayShape[i];
1000 double *tmpData = new double[length];
1001
1002 //
1003 // load the values for the data point into the numeric array.
1004
1005 // updated for the MPI case
1006 if( get_MPIRank()==procNo ){
1007 if (getNumDataPointsPerSample()>0) {
1008 int sampleNo = dataPointNo/getNumDataPointsPerSample();
1009 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1010 //
1011 // Check a valid sample number has been supplied
1012 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
1013 throw DataException("Error - Data::convertToNumArray: invalid sampleNo.");
1014 }
1015
1016 //
1017 // Check a valid data point number has been supplied
1018 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
1019 throw DataException("Error - Data::convertToNumArray: invalid dataPointNoInSample.");
1020 }
1021 // TODO: global error handling
1022 // create a view of the data if it is stored locally
1023 //DataArrayView dataPointView = getDataPoint(sampleNo, dataPointNoInSample);
1024 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
1025
1026 // pack the data from the view into tmpData for MPI communication
1027 pos=0;
1028 switch( dataPointRank ){
1029 case 0 :
1030 tmpData[0] = getDataAtOffset(offset);
1031 break;
1032 case 1 :
1033 for( i=0; i<dataPointShape[0]; i++ )
1034 tmpData[i]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i));
1035 break;
1036 case 2 :
1037 for( i=0; i<dataPointShape[0]; i++ )
1038 for( j=0; j<dataPointShape[1]; j++, pos++ )
1039 tmpData[pos]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j));
1040 break;
1041 case 3 :
1042 for( i=0; i<dataPointShape[0]; i++ )
1043 for( j=0; j<dataPointShape[1]; j++ )
1044 for( k=0; k<dataPointShape[2]; k++, pos++ )
1045 tmpData[pos]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k));
1046 break;
1047 case 4 :
1048 for( i=0; i<dataPointShape[0]; i++ )
1049 for( j=0; j<dataPointShape[1]; j++ )
1050 for( k=0; k<dataPointShape[2]; k++ )
1051 for( l=0; l<dataPointShape[3]; l++, pos++ )
1052 tmpData[pos]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k,l));
1053 break;
1054 }
1055 }
1056 }
1057 #ifdef PASO_MPI
1058 // broadcast the data to all other processes
1059 MPI_Bcast( tmpData, length, MPI_DOUBLE, procNo, get_MPIComm() );
1060 #endif
1061
1062 // unpack the data
1063 switch( dataPointRank ){
1064 case 0 :
1065 numArray[0]=tmpData[0];
1066 break;
1067 case 1 :
1068 for( i=0; i<dataPointShape[0]; i++ )
1069 numArray[i]=tmpData[i];
1070 break;
1071 case 2 :
1072 for( i=0; i<dataPointShape[0]; i++ )
1073 for( j=0; j<dataPointShape[1]; j++ )
1074 numArray[make_tuple(i,j)]=tmpData[i+j*dataPointShape[0]];
1075 break;
1076 case 3 :
1077 for( i=0; i<dataPointShape[0]; i++ )
1078 for( j=0; j<dataPointShape[1]; j++ )
1079 for( k=0; k<dataPointShape[2]; k++ )
1080 numArray[make_tuple(i,j,k)]=tmpData[i+dataPointShape[0]*(j*+k*dataPointShape[1])];
1081 break;
1082 case 4 :
1083 for( i=0; i<dataPointShape[0]; i++ )
1084 for( j=0; j<dataPointShape[1]; j++ )
1085 for( k=0; k<dataPointShape[2]; k++ )
1086 for( l=0; l<dataPointShape[3]; l++ )
1087 numArray[make_tuple(i,j,k,l)]=tmpData[i+dataPointShape[0]*(j*+dataPointShape[1]*(k+l*dataPointShape[2]))];
1088 break;
1089 }
1090
1091 delete [] tmpData;
1092 //
1093 // return the loaded array
1094 return numArray;
1095 }
1096
1097
1098 boost::python::numeric::array
1099 Data::integrate_const() const
1100 {
1101 if (isLazy())
1102 {
1103 throw DataException("Error - cannot integrate for constant lazy data.");
1104 }
1105 return integrateWorker();
1106 }
1107
1108 boost::python::numeric::array
1109 Data::integrate()
1110 {
1111 if (isLazy())
1112 {
1113 expand();
1114 }
1115 return integrateWorker();
1116 }
1117
1118
1119
1120 boost::python::numeric::array
1121 Data::integrateWorker() const
1122 {
1123 int index;
1124 int rank = getDataPointRank();
1125 DataTypes::ShapeType shape = getDataPointShape();
1126 int dataPointSize = getDataPointSize();
1127
1128 //
1129 // calculate the integral values
1130 vector<double> integrals(dataPointSize);
1131 vector<double> integrals_local(dataPointSize);
1132 const AbstractContinuousDomain* dom=dynamic_cast<const AbstractContinuousDomain*>(getDomain().get());
1133 if (dom==0)
1134 {
1135 throw DataException("Can not integrate over non-continuous domains.");
1136 }
1137 #ifdef PASO_MPI
1138 dom->setToIntegrals(integrals_local,*this);
1139 // Global sum: use an array instead of a vector because elements of array are guaranteed to be contiguous in memory
1140 double *tmp = new double[dataPointSize];
1141 double *tmp_local = new double[dataPointSize];
1142 for (int i=0; i<dataPointSize; i++) { tmp_local[i] = integrals_local[i]; }
1143 MPI_Allreduce( &tmp_local[0], &tmp[0], dataPointSize, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD );
1144 for (int i=0; i<dataPointSize; i++) { integrals[i] = tmp[i]; }
1145 delete[] tmp;
1146 delete[] tmp_local;
1147 #else
1148 dom->setToIntegrals(integrals,*this);
1149 #endif
1150
1151 //
1152 // create the numeric array to be returned
1153 // and load the array with the integral values
1154 boost::python::numeric::array bp_array(1.0);
1155 if (rank==0) {
1156 bp_array.resize(1);
1157 index = 0;
1158 bp_array[0] = integrals[index];
1159 }
1160 if (rank==1) {
1161 bp_array.resize(shape[0]);
1162 for (int i=0; i<shape[0]; i++) {
1163 index = i;
1164 bp_array[i] = integrals[index];
1165 }
1166 }
1167 if (rank==2) {
1168 bp_array.resize(shape[0],shape[1]);
1169 for (int i=0; i<shape[0]; i++) {
1170 for (int j=0; j<shape[1]; j++) {
1171 index = i + shape[0] * j;
1172 bp_array[make_tuple(i,j)] = integrals[index];
1173 }
1174 }
1175 }
1176 if (rank==3) {
1177 bp_array.resize(shape[0],shape[1],shape[2]);
1178 for (int i=0; i<shape[0]; i++) {
1179 for (int j=0; j<shape[1]; j++) {
1180 for (int k=0; k<shape[2]; k++) {
1181 index = i + shape[0] * ( j + shape[1] * k );
1182 bp_array[make_tuple(i,j,k)] = integrals[index];
1183 }
1184 }
1185 }
1186 }
1187 if (rank==4) {
1188 bp_array.resize(shape[0],shape[1],shape[2],shape[3]);
1189 for (int i=0; i<shape[0]; i++) {
1190 for (int j=0; j<shape[1]; j++) {
1191 for (int k=0; k<shape[2]; k++) {
1192 for (int l=0; l<shape[3]; l++) {
1193 index = i + shape[0] * ( j + shape[1] * ( k + shape[2] * l ) );
1194 bp_array[make_tuple(i,j,k,l)] = integrals[index];
1195 }
1196 }
1197 }
1198 }
1199 }
1200
1201 //
1202 // return the loaded array
1203 return bp_array;
1204 }
1205
1206 Data
1207 Data::sin() const
1208 {
1209 MAKELAZYOP(SIN)
1210 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sin);
1211 }
1212
1213 Data
1214 Data::cos() const
1215 {
1216 MAKELAZYOP(COS)
1217 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cos);
1218 }
1219
1220 Data
1221 Data::tan() const
1222 {
1223 MAKELAZYOP(TAN)
1224 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tan);
1225 }
1226
1227 Data
1228 Data::asin() const
1229 {
1230 MAKELAZYOP(ASIN)
1231 return C_TensorUnaryOperation<double (*)(double)>(*this, ::asin);
1232 }
1233
1234 Data
1235 Data::acos() const
1236 {
1237 MAKELAZYOP(ACOS)
1238 return C_TensorUnaryOperation<double (*)(double)>(*this, ::acos);
1239 }
1240
1241
1242 Data
1243 Data::atan() const
1244 {
1245 MAKELAZYOP(ATAN)
1246 return C_TensorUnaryOperation<double (*)(double)>(*this, ::atan);
1247 }
1248
1249 Data
1250 Data::sinh() const
1251 {
1252 MAKELAZYOP(SINH)
1253 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sinh);
1254 }
1255
1256 Data
1257 Data::cosh() const
1258 {
1259 MAKELAZYOP(COSH)
1260 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cosh);
1261 }
1262
1263 Data
1264 Data::tanh() const
1265 {
1266 MAKELAZYOP(TANH)
1267 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tanh);
1268 }
1269
1270
1271 Data
1272 Data::erf() const
1273 {
1274 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1275 throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");
1276 #else
1277 MAKELAZYOP(ERF)
1278 return C_TensorUnaryOperation(*this, ::erf);
1279 #endif
1280 }
1281
1282 Data
1283 Data::asinh() const
1284 {
1285 MAKELAZYOP(ASINH)
1286 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1287 return C_TensorUnaryOperation(*this, escript::asinh_substitute);
1288 #else
1289 return C_TensorUnaryOperation(*this, ::asinh);
1290 #endif
1291 }
1292
1293 Data
1294 Data::acosh() const
1295 {
1296 MAKELAZYOP(ACOSH)
1297 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1298 return C_TensorUnaryOperation(*this, escript::acosh_substitute);
1299 #else
1300 return C_TensorUnaryOperation(*this, ::acosh);
1301 #endif
1302 }
1303
1304 Data
1305 Data::atanh() const
1306 {
1307 MAKELAZYOP(ATANH)
1308 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1309 return C_TensorUnaryOperation(*this, escript::atanh_substitute);
1310 #else
1311 return C_TensorUnaryOperation(*this, ::atanh);
1312 #endif
1313 }
1314
1315 Data
1316 Data::log10() const
1317 {
1318 MAKELAZYOP(LOG10)
1319 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log10);
1320 }
1321
1322 Data
1323 Data::log() const
1324 {
1325 MAKELAZYOP(LOG)
1326 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log);
1327 }
1328
1329 Data
1330 Data::sign() const
1331 {
1332 MAKELAZYOP(SIGN)
1333 return C_TensorUnaryOperation(*this, escript::fsign);
1334 }
1335
1336 Data
1337 Data::abs() const
1338 {
1339 MAKELAZYOP(ABS)
1340 return C_TensorUnaryOperation<double (*)(double)>(*this, ::fabs);
1341 }
1342
1343 Data
1344 Data::neg() const
1345 {
1346 MAKELAZYOP(NEG)
1347 return C_TensorUnaryOperation(*this, negate<double>());
1348 }
1349
1350 Data
1351 Data::pos() const
1352 {
1353 // not doing lazy check here is deliberate.
1354 // since a deep copy of lazy data should be cheap, I'll just let it happen now
1355 Data result;
1356 // perform a deep copy
1357 result.copy(*this);
1358 return result;
1359 }
1360
1361 Data
1362 Data::exp() const
1363 {
1364 MAKELAZYOP(EXP)
1365 return C_TensorUnaryOperation<double (*)(double)>(*this, ::exp);
1366 }
1367
1368 Data
1369 Data::sqrt() const
1370 {
1371 MAKELAZYOP(SQRT)
1372 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sqrt);
1373 }
1374
1375 double
1376 Data::Lsup_const() const
1377 {
1378 if (isLazy())
1379 {
1380 throw DataException("Error - cannot compute Lsup for constant lazy data.");
1381 }
1382 return LsupWorker();
1383 }
1384
1385 double
1386 Data::Lsup()
1387 {
1388 if (isLazy())
1389 {
1390 resolve();
1391 }
1392 return LsupWorker();
1393 }
1394
1395 double
1396 Data::sup_const() const
1397 {
1398 if (isLazy())
1399 {
1400 throw DataException("Error - cannot compute sup for constant lazy data.");
1401 }
1402 return supWorker();
1403 }
1404
1405 double
1406 Data::sup()
1407 {
1408 if (isLazy())
1409 {
1410 resolve();
1411 }
1412 return supWorker();
1413 }
1414
1415 double
1416 Data::inf_const() const
1417 {
1418 if (isLazy())
1419 {
1420 throw DataException("Error - cannot compute inf for constant lazy data.");
1421 }
1422 return infWorker();
1423 }
1424
1425 double
1426 Data::inf()
1427 {
1428 if (isLazy())
1429 {
1430 resolve();
1431 }
1432 return infWorker();
1433 }
1434
1435 double
1436 Data::LsupWorker() const
1437 {
1438 double localValue;
1439 //
1440 // set the initial absolute maximum value to zero
1441
1442 AbsMax abs_max_func;
1443 localValue = algorithm(abs_max_func,0);
1444 #ifdef PASO_MPI
1445 double globalValue;
1446 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1447 return globalValue;
1448 #else
1449 return localValue;
1450 #endif
1451 }
1452
1453 double
1454 Data::supWorker() const
1455 {
1456 double localValue;
1457 //
1458 // set the initial maximum value to min possible double
1459 FMax fmax_func;
1460 localValue = algorithm(fmax_func,numeric_limits<double>::max()*-1);
1461 #ifdef PASO_MPI
1462 double globalValue;
1463 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1464 return globalValue;
1465 #else
1466 return localValue;
1467 #endif
1468 }
1469
1470 double
1471 Data::infWorker() const
1472 {
1473 double localValue;
1474 //
1475 // set the initial minimum value to max possible double
1476 FMin fmin_func;
1477 localValue = algorithm(fmin_func,numeric_limits<double>::max());
1478 #ifdef PASO_MPI
1479 double globalValue;
1480 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD );
1481 return globalValue;
1482 #else
1483 return localValue;
1484 #endif
1485 }
1486
1487 /* TODO */
1488 /* global reduction */
1489 Data
1490 Data::maxval() const
1491 {
1492 if (isLazy())
1493 {
1494 Data temp(*this); // to get around the fact that you can't resolve a const Data
1495 temp.resolve();
1496 return temp.maxval();
1497 }
1498 //
1499 // set the initial maximum value to min possible double
1500 FMax fmax_func;
1501 return dp_algorithm(fmax_func,numeric_limits<double>::max()*-1);
1502 }
1503
1504 Data
1505 Data::minval() const
1506 {
1507 if (isLazy())
1508 {
1509 Data temp(*this); // to get around the fact that you can't resolve a const Data
1510 temp.resolve();
1511 return temp.minval();
1512 }
1513 //
1514 // set the initial minimum value to max possible double
1515 FMin fmin_func;
1516 return dp_algorithm(fmin_func,numeric_limits<double>::max());
1517 }
1518
1519 Data
1520 Data::swapaxes(const int axis0, const int axis1) const
1521 {
1522 if (isLazy())
1523 {
1524 Data temp(*this);
1525 temp.resolve();
1526 return temp.swapaxes(axis0,axis1);
1527 }
1528 int axis0_tmp,axis1_tmp;
1529 DataTypes::ShapeType s=getDataPointShape();
1530 DataTypes::ShapeType ev_shape;
1531 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1532 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1533 int rank=getDataPointRank();
1534 if (rank<2) {
1535 throw DataException("Error - Data::swapaxes argument must have at least rank 2.");
1536 }
1537 if (axis0<0 || axis0>rank-1) {
1538 throw DataException("Error - Data::swapaxes: axis0 must be between 0 and rank-1=" + rank-1);
1539 }
1540 if (axis1<0 || axis1>rank-1) {
1541 throw DataException("Error - Data::swapaxes: axis1 must be between 0 and rank-1=" + rank-1);
1542 }
1543 if (axis0 == axis1) {
1544 throw DataException("Error - Data::swapaxes: axis indices must be different.");
1545 }
1546 if (axis0 > axis1) {
1547 axis0_tmp=axis1;
1548 axis1_tmp=axis0;
1549 } else {
1550 axis0_tmp=axis0;
1551 axis1_tmp=axis1;
1552 }
1553 for (int i=0; i<rank; i++) {
1554 if (i == axis0_tmp) {
1555 ev_shape.push_back(s[axis1_tmp]);
1556 } else if (i == axis1_tmp) {
1557 ev_shape.push_back(s[axis0_tmp]);
1558 } else {
1559 ev_shape.push_back(s[i]);
1560 }
1561 }
1562 Data ev(0.,ev_shape,getFunctionSpace());
1563 ev.typeMatchRight(*this);
1564 m_data->swapaxes(ev.m_data.get(), axis0_tmp, axis1_tmp);
1565 return ev;
1566
1567 }
1568
1569 Data
1570 Data::symmetric() const
1571 {
1572 // check input
1573 DataTypes::ShapeType s=getDataPointShape();
1574 if (getDataPointRank()==2) {
1575 if(s[0] != s[1])
1576 throw DataException("Error - Data::symmetric can only be calculated for rank 2 object with equal first and second dimension.");
1577 }
1578 else if (getDataPointRank()==4) {
1579 if(!(s[0] == s[2] && s[1] == s[3]))
1580 throw DataException("Error - Data::symmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1581 }
1582 else {
1583 throw DataException("Error - Data::symmetric can only be calculated for rank 2 or 4 object.");
1584 }
1585 MAKELAZYOP(SYM)
1586 Data ev(0.,getDataPointShape(),getFunctionSpace());
1587 ev.typeMatchRight(*this);
1588 m_data->symmetric(ev.m_data.get());
1589 return ev;
1590 }
1591
1592 Data
1593 Data::nonsymmetric() const
1594 {
1595 MAKELAZYOP(NSYM)
1596 // check input
1597 DataTypes::ShapeType s=getDataPointShape();
1598 if (getDataPointRank()==2) {
1599 if(s[0] != s[1])
1600 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 object with equal first and second dimension.");
1601 DataTypes::ShapeType ev_shape;
1602 ev_shape.push_back(s[0]);
1603 ev_shape.push_back(s[1]);
1604 Data ev(0.,ev_shape,getFunctionSpace());
1605 ev.typeMatchRight(*this);
1606 m_data->nonsymmetric(ev.m_data.get());
1607 return ev;
1608 }
1609 else if (getDataPointRank()==4) {
1610 if(!(s[0] == s[2] && s[1] == s[3]))
1611 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1612 DataTypes::ShapeType ev_shape;
1613 ev_shape.push_back(s[0]);
1614 ev_shape.push_back(s[1]);
1615 ev_shape.push_back(s[2]);
1616 ev_shape.push_back(s[3]);
1617 Data ev(0.,ev_shape,getFunctionSpace());
1618 ev.typeMatchRight(*this);
1619 m_data->nonsymmetric(ev.m_data.get());
1620 return ev;
1621 }
1622 else {
1623 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 or 4 object.");
1624 }
1625 }
1626
1627 Data
1628 Data::trace(int axis_offset) const
1629 {
1630 MAKELAZYOPOFF(TRACE,axis_offset)
1631 DataTypes::ShapeType s=getDataPointShape();
1632 if (getDataPointRank()==2) {
1633 DataTypes::ShapeType ev_shape;
1634 Data ev(0.,ev_shape,getFunctionSpace());
1635 ev.typeMatchRight(*this);
1636 m_data->trace(ev.m_data.get(), axis_offset);
1637 return ev;
1638 }
1639 if (getDataPointRank()==3) {
1640 DataTypes::ShapeType ev_shape;
1641 if (axis_offset==0) {
1642 int s2=s[2];
1643 ev_shape.push_back(s2);
1644 }
1645 else if (axis_offset==1) {
1646 int s0=s[0];
1647 ev_shape.push_back(s0);
1648 }
1649 Data ev(0.,ev_shape,getFunctionSpace());
1650 ev.typeMatchRight(*this);
1651 m_data->trace(ev.m_data.get(), axis_offset);
1652 return ev;
1653 }
1654 if (getDataPointRank()==4) {
1655 DataTypes::ShapeType ev_shape;
1656 if (axis_offset==0) {
1657 ev_shape.push_back(s[2]);
1658 ev_shape.push_back(s[3]);
1659 }
1660 else if (axis_offset==1) {
1661 ev_shape.push_back(s[0]);
1662 ev_shape.push_back(s[3]);
1663 }
1664 else if (axis_offset==2) {
1665 ev_shape.push_back(s[0]);
1666 ev_shape.push_back(s[1]);
1667 }
1668 Data ev(0.,ev_shape,getFunctionSpace());
1669 ev.typeMatchRight(*this);
1670 m_data->trace(ev.m_data.get(), axis_offset);
1671 return ev;
1672 }
1673 else {
1674 throw DataException("Error - Data::trace can only be calculated for rank 2, 3 or 4 object.");
1675 }
1676 }
1677
1678 Data
1679 Data::transpose(int axis_offset) const
1680 {
1681 MAKELAZYOPOFF(TRANS,axis_offset)
1682 DataTypes::ShapeType s=getDataPointShape();
1683 DataTypes::ShapeType ev_shape;
1684 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1685 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1686 int rank=getDataPointRank();
1687 if (axis_offset<0 || axis_offset>rank) {
1688 throw DataException("Error - Data::transpose must have 0 <= axis_offset <= rank=" + rank);
1689 }
1690 for (int i=0; i<rank; i++) {
1691 int index = (axis_offset+i)%rank;
1692 ev_shape.push_back(s[index]); // Append to new shape
1693 }
1694 Data ev(0.,ev_shape,getFunctionSpace());
1695 ev.typeMatchRight(*this);
1696 m_data->transpose(ev.m_data.get(), axis_offset);
1697 return ev;
1698 }
1699
1700 Data
1701 Data::eigenvalues() const
1702 {
1703 if (isLazy())
1704 {
1705 Data temp(*this); // to get around the fact that you can't resolve a const Data
1706 temp.resolve();
1707 return temp.eigenvalues();
1708 }
1709 // check input
1710 DataTypes::ShapeType s=getDataPointShape();
1711 if (getDataPointRank()!=2)
1712 throw DataException("Error - Data::eigenvalues can only be calculated for rank 2 object.");
1713 if(s[0] != s[1])
1714 throw DataException("Error - Data::eigenvalues can only be calculated for object with equal first and second dimension.");
1715 // create return
1716 DataTypes::ShapeType ev_shape(1,s[0]);
1717 Data ev(0.,ev_shape,getFunctionSpace());
1718 ev.typeMatchRight(*this);
1719 m_data->eigenvalues(ev.m_data.get());
1720 return ev;
1721 }
1722
1723 const boost::python::tuple
1724 Data::eigenvalues_and_eigenvectors(const double tol) const
1725 {
1726 if (isLazy())
1727 {
1728 Data temp(*this); // to get around the fact that you can't resolve a const Data
1729 temp.resolve();
1730 return temp.eigenvalues_and_eigenvectors(tol);
1731 }
1732 DataTypes::ShapeType s=getDataPointShape();
1733 if (getDataPointRank()!=2)
1734 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for rank 2 object.");
1735 if(s[0] != s[1])
1736 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for object with equal first and second dimension.");
1737 // create return
1738 DataTypes::ShapeType ev_shape(1,s[0]);
1739 Data ev(0.,ev_shape,getFunctionSpace());
1740 ev.typeMatchRight(*this);
1741 DataTypes::ShapeType V_shape(2,s[0]);
1742 Data V(0.,V_shape,getFunctionSpace());
1743 V.typeMatchRight(*this);
1744 m_data->eigenvalues_and_eigenvectors(ev.m_data.get(),V.m_data.get(),tol);
1745 return make_tuple(boost::python::object(ev),boost::python::object(V));
1746 }
1747
1748 const boost::python::tuple
1749 Data::minGlobalDataPoint() const
1750 {
1751 // NB: calc_minGlobalDataPoint( had to be split off from minGlobalDataPoint( as boost::make_tuple causes an
1752 // abort (for unknown reasons) if there are openmp directives with it in the
1753 // surrounding function
1754
1755 int DataPointNo;
1756 int ProcNo;
1757 calc_minGlobalDataPoint(ProcNo,DataPointNo);
1758 return make_tuple(ProcNo,DataPointNo);
1759 }
1760
1761 void
1762 Data::calc_minGlobalDataPoint(int& ProcNo,
1763 int& DataPointNo) const
1764 {
1765 if (isLazy())
1766 {
1767 Data temp(*this); // to get around the fact that you can't resolve a const Data
1768 temp.resolve();
1769 return temp.calc_minGlobalDataPoint(ProcNo,DataPointNo);
1770 }
1771 int i,j;
1772 int lowi=0,lowj=0;
1773 double min=numeric_limits<double>::max();
1774
1775 Data temp=minval();
1776
1777 int numSamples=temp.getNumSamples();
1778 int numDPPSample=temp.getNumDataPointsPerSample();
1779
1780 double next,local_min;
1781 int local_lowi=0,local_lowj=0;
1782
1783 #pragma omp parallel firstprivate(local_lowi,local_lowj) private(next,local_min)
1784 {
1785 local_min=min;
1786 #pragma omp for private(i,j) schedule(static)
1787 for (i=0; i<numSamples; i++) {
1788 for (j=0; j<numDPPSample; j++) {
1789 next=temp.getDataAtOffset(temp.getDataOffset(i,j));
1790 if (next<local_min) {
1791 local_min=next;
1792 local_lowi=i;
1793 local_lowj=j;
1794 }
1795 }
1796 }
1797 #pragma omp critical
1798 if (local_min<min) {
1799 min=local_min;
1800 lowi=local_lowi;
1801 lowj=local_lowj;
1802 }
1803 }
1804
1805 #ifdef PASO_MPI
1806 // determine the processor on which the minimum occurs
1807 next = temp.getDataPoint(lowi,lowj);
1808 int lowProc = 0;
1809 double *globalMins = new double[get_MPISize()+1];
1810 int error;
1811 error = MPI_Gather ( &next, 1, MPI_DOUBLE, globalMins, 1, MPI_DOUBLE, 0, get_MPIComm() );
1812
1813 if( get_MPIRank()==0 ){
1814 next = globalMins[lowProc];
1815 for( i=1; i<get_MPISize(); i++ )
1816 if( next>globalMins[i] ){
1817 lowProc = i;
1818 next = globalMins[i];
1819 }
1820 }
1821 MPI_Bcast( &lowProc, 1, MPI_DOUBLE, 0, get_MPIComm() );
1822
1823 delete [] globalMins;
1824 ProcNo = lowProc;
1825 #else
1826 ProcNo = 0;
1827 #endif
1828 DataPointNo = lowj + lowi * numDPPSample;
1829 }
1830
1831 void
1832 Data::saveDX(std::string fileName) const
1833 {
1834 if (isEmpty())
1835 {
1836 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1837 }
1838 if (isLazy())
1839 {
1840 Data temp(*this); // to get around the fact that you can't resolve a const Data
1841 temp.resolve();
1842 temp.saveDX(fileName);
1843 return;
1844 }
1845 boost::python::dict args;
1846 args["data"]=boost::python::object(this);
1847 getDomain()->saveDX(fileName,args);
1848 return;
1849 }
1850
1851 void
1852 Data::saveVTK(std::string fileName) const
1853 {
1854 if (isEmpty())
1855 {
1856 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1857 }
1858 if (isLazy())
1859 {
1860 Data temp(*this); // to get around the fact that you can't resolve a const Data
1861 temp.resolve();
1862 temp.saveVTK(fileName);
1863 return;
1864 }
1865 boost::python::dict args;
1866 args["data"]=boost::python::object(this);
1867 getDomain()->saveVTK(fileName,args);
1868 return;
1869 }
1870
1871 Data&
1872 Data::operator+=(const Data& right)
1873 {
1874 if (isProtected()) {
1875 throw DataException("Error - attempt to update protected Data object.");
1876 }
1877 MAKELAZYBINSELF(right,ADD) // for lazy + is equivalent to +=
1878 binaryOp(right,plus<double>());
1879 return (*this);
1880 }
1881
1882 Data&
1883 Data::operator+=(const boost::python::object& right)
1884 {
1885 if (isProtected()) {
1886 throw DataException("Error - attempt to update protected Data object.");
1887 }
1888 Data tmp(right,getFunctionSpace(),false);
1889 MAKELAZYBINSELF(tmp,ADD)
1890 binaryOp(tmp,plus<double>());
1891 return (*this);
1892 }
1893
1894 // Hmmm, operator= makes a deep copy but the copy constructor does not?
1895 Data&
1896 Data::operator=(const Data& other)
1897 {
1898 copy(other);
1899 return (*this);
1900 }
1901
1902 Data&
1903 Data::operator-=(const Data& right)
1904 {
1905 if (isProtected()) {
1906 throw DataException("Error - attempt to update protected Data object.");
1907 }
1908 MAKELAZYBINSELF(right,SUB)
1909 binaryOp(right,minus<double>());
1910 return (*this);
1911 }
1912
1913 Data&
1914 Data::operator-=(const boost::python::object& right)
1915 {
1916 if (isProtected()) {
1917 throw DataException("Error - attempt to update protected Data object.");
1918 }
1919 Data tmp(right,getFunctionSpace(),false);
1920 MAKELAZYBINSELF(tmp,SUB)
1921 binaryOp(tmp,minus<double>());
1922 return (*this);
1923 }
1924
1925 Data&
1926 Data::operator*=(const Data& right)
1927 {
1928 if (isProtected()) {
1929 throw DataException("Error - attempt to update protected Data object.");
1930 }
1931 MAKELAZYBINSELF(right,MUL)
1932 binaryOp(right,multiplies<double>());
1933 return (*this);
1934 }
1935
1936 Data&
1937 Data::operator*=(const boost::python::object& right)
1938 {
1939 if (isProtected()) {
1940 throw DataException("Error - attempt to update protected Data object.");
1941 }
1942 Data tmp(right,getFunctionSpace(),false);
1943 MAKELAZYBINSELF(tmp,MUL)
1944 binaryOp(tmp,multiplies<double>());
1945 return (*this);
1946 }
1947
1948 Data&
1949 Data::operator/=(const Data& right)
1950 {
1951 if (isProtected()) {
1952 throw DataException("Error - attempt to update protected Data object.");
1953 }
1954 MAKELAZYBINSELF(right,DIV)
1955 binaryOp(right,divides<double>());
1956 return (*this);
1957 }
1958
1959 Data&
1960 Data::operator/=(const boost::python::object& right)
1961 {
1962 if (isProtected()) {
1963 throw DataException("Error - attempt to update protected Data object.");
1964 }
1965 Data tmp(right,getFunctionSpace(),false);
1966 MAKELAZYBINSELF(tmp,DIV)
1967 binaryOp(tmp,divides<double>());
1968 return (*this);
1969 }
1970
1971 Data
1972 Data::rpowO(const boost::python::object& left) const
1973 {
1974 Data left_d(left,*this);
1975 return left_d.powD(*this);
1976 }
1977
1978 Data
1979 Data::powO(const boost::python::object& right) const
1980 {
1981 Data tmp(right,getFunctionSpace(),false);
1982 return powD(tmp);
1983 }
1984
1985 Data
1986 Data::powD(const Data& right) const
1987 {
1988 MAKELAZYBIN(right,POW)
1989 return C_TensorBinaryOperation<double (*)(double, double)>(*this, right, ::pow);
1990 }
1991
1992 //
1993 // NOTE: It is essential to specify the namespace this operator belongs to
1994 Data
1995 escript::operator+(const Data& left, const Data& right)
1996 {
1997 MAKELAZYBIN2(left,right,ADD)
1998 return C_TensorBinaryOperation(left, right, plus<double>());
1999 }
2000
2001 //
2002 // NOTE: It is essential to specify the namespace this operator belongs to
2003 Data
2004 escript::operator-(const Data& left, const Data& right)
2005 {
2006 MAKELAZYBIN2(left,right,SUB)
2007 return C_TensorBinaryOperation(left, right, minus<double>());
2008 }
2009
2010 //
2011 // NOTE: It is essential to specify the namespace this operator belongs to
2012 Data
2013 escript::operator*(const Data& left, const Data& right)
2014 {
2015 MAKELAZYBIN2(left,right,MUL)
2016 return C_TensorBinaryOperation(left, right, multiplies<double>());
2017 }
2018
2019 //
2020 // NOTE: It is essential to specify the namespace this operator belongs to
2021 Data
2022 escript::operator/(const Data& left, const Data& right)
2023 {
2024 MAKELAZYBIN2(left,right,DIV)
2025 return C_TensorBinaryOperation(left, right, divides<double>());
2026 }
2027
2028 //
2029 // NOTE: It is essential to specify the namespace this operator belongs to
2030 Data
2031 escript::operator+(const Data& left, const boost::python::object& right)
2032 {
2033 Data tmp(right,left.getFunctionSpace(),false);
2034 MAKELAZYBIN2(left,tmp,ADD)
2035 return left+tmp;
2036 }
2037
2038 //
2039 // NOTE: It is essential to specify the namespace this operator belongs to
2040 Data
2041 escript::operator-(const Data& left, const boost::python::object& right)
2042 {
2043 Data tmp(right,left.getFunctionSpace(),false);
2044 MAKELAZYBIN2(left,tmp,SUB)
2045 return left-tmp;
2046 }
2047
2048 //
2049 // NOTE: It is essential to specify the namespace this operator belongs to
2050 Data
2051 escript::operator*(const Data& left, const boost::python::object& right)
2052 {
2053 Data tmp(right,left.getFunctionSpace(),false);
2054 MAKELAZYBIN2(left,tmp,MUL)
2055 return left*tmp;
2056 }
2057
2058 //
2059 // NOTE: It is essential to specify the namespace this operator belongs to
2060 Data
2061 escript::operator/(const Data& left, const boost::python::object& right)
2062 {
2063 Data tmp(right,left.getFunctionSpace(),false);
2064 MAKELAZYBIN2(left,tmp,DIV)
2065 return left/tmp;
2066 }
2067
2068 //
2069 // NOTE: It is essential to specify the namespace this operator belongs to
2070 Data
2071 escript::operator+(const boost::python::object& left, const Data& right)
2072 {
2073 Data tmp(left,right.getFunctionSpace(),false);
2074 MAKELAZYBIN2(tmp,right,ADD)
2075 return tmp+right;
2076 }
2077
2078 //
2079 // NOTE: It is essential to specify the namespace this operator belongs to
2080 Data
2081 escript::operator-(const boost::python::object& left, const Data& right)
2082 {
2083 Data tmp(left,right.getFunctionSpace(),false);
2084 MAKELAZYBIN2(tmp,right,SUB)
2085 return tmp-right;
2086 }
2087
2088 //
2089 // NOTE: It is essential to specify the namespace this operator belongs to
2090 Data
2091 escript::operator*(const boost::python::object& left, const Data& right)
2092 {
2093 Data tmp(left,right.getFunctionSpace(),false);
2094 MAKELAZYBIN2(tmp,right,MUL)
2095 return tmp*right;
2096 }
2097
2098 //
2099 // NOTE: It is essential to specify the namespace this operator belongs to
2100 Data
2101 escript::operator/(const boost::python::object& left, const Data& right)
2102 {
2103 Data tmp(left,right.getFunctionSpace(),false);
2104 MAKELAZYBIN2(tmp,right,DIV)
2105 return tmp/right;
2106 }
2107
2108
2109 /* TODO */
2110 /* global reduction */
2111 Data
2112 Data::getItem(const boost::python::object& key) const
2113 {
2114
2115 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2116
2117 if (slice_region.size()!=getDataPointRank()) {
2118 throw DataException("Error - slice size does not match Data rank.");
2119 }
2120
2121 return getSlice(slice_region);
2122 }
2123
2124 /* TODO */
2125 /* global reduction */
2126 Data
2127 Data::getSlice(const DataTypes::RegionType& region) const
2128 {
2129 return Data(*this,region);
2130 }
2131
2132 /* TODO */
2133 /* global reduction */
2134 void
2135 Data::setItemO(const boost::python::object& key,
2136 const boost::python::object& value)
2137 {
2138 Data tempData(value,getFunctionSpace());
2139 setItemD(key,tempData);
2140 }
2141
2142 void
2143 Data::setItemD(const boost::python::object& key,
2144 const Data& value)
2145 {
2146 // const DataArrayView& view=getPointDataView();
2147
2148 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2149 if (slice_region.size()!=getDataPointRank()) {
2150 throw DataException("Error - slice size does not match Data rank.");
2151 }
2152 if (getFunctionSpace()!=value.getFunctionSpace()) {
2153 setSlice(Data(value,getFunctionSpace()),slice_region);
2154 } else {
2155 setSlice(value,slice_region);
2156 }
2157 }
2158
2159 void
2160 Data::setSlice(const Data& value,
2161 const DataTypes::RegionType& region)
2162 {
2163 if (isProtected()) {
2164 throw DataException("Error - attempt to update protected Data object.");
2165 }
2166 FORCERESOLVE;
2167 /* if (isLazy())
2168 {
2169 throw DataException("Error - setSlice not permitted on lazy data.");
2170 }*/
2171 Data tempValue(value);
2172 typeMatchLeft(tempValue);
2173 typeMatchRight(tempValue);
2174 getReady()->setSlice(tempValue.m_data.get(),region);
2175 }
2176
2177 void
2178 Data::typeMatchLeft(Data& right) const
2179 {
2180 if (right.isLazy() && !isLazy())
2181 {
2182 right.resolve();
2183 }
2184 if (isExpanded()){
2185 right.expand();
2186 } else if (isTagged()) {
2187 if (right.isConstant()) {
2188 right.tag();
2189 }
2190 }
2191 }
2192
2193 void
2194 Data::typeMatchRight(const Data& right)
2195 {
2196 if (isLazy() && !right.isLazy())
2197 {
2198 resolve();
2199 }
2200 if (isTagged()) {
2201 if (right.isExpanded()) {
2202 expand();
2203 }
2204 } else if (isConstant()) {
2205 if (right.isExpanded()) {
2206 expand();
2207 } else if (right.isTagged()) {
2208 tag();
2209 }
2210 }
2211 }
2212
2213 void
2214 Data::setTaggedValueByName(std::string name,
2215 const boost::python::object& value)
2216 {
2217 if (getFunctionSpace().getDomain()->isValidTagName(name)) {
2218 FORCERESOLVE;
2219 int tagKey=getFunctionSpace().getDomain()->getTag(name);
2220 setTaggedValue(tagKey,value);
2221 }
2222 }
2223 void
2224 Data::setTaggedValue(int tagKey,
2225 const boost::python::object& value)
2226 {
2227 if (isProtected()) {
2228 throw DataException("Error - attempt to update protected Data object.");
2229 }
2230 //
2231 // Ensure underlying data object is of type DataTagged
2232 FORCERESOLVE;
2233 if (isConstant()) tag();
2234 numeric::array asNumArray(value);
2235
2236 // extract the shape of the numarray
2237 DataTypes::ShapeType tempShape;
2238 for (int i=0; i < asNumArray.getrank(); i++) {
2239 tempShape.push_back(extract<int>(asNumArray.getshape()[i]));
2240 }
2241
2242 DataVector temp_data2;
2243 temp_data2.copyFromNumArray(asNumArray,1);
2244
2245 m_data->setTaggedValue(tagKey,tempShape, temp_data2);
2246 }
2247
2248
2249 void
2250 Data::setTaggedValueFromCPP(int tagKey,
2251 const DataTypes::ShapeType& pointshape,
2252 const DataTypes::ValueType& value,
2253 int dataOffset)
2254 {
2255 if (isProtected()) {
2256 throw DataException("Error - attempt to update protected Data object.");
2257 }
2258 //
2259 // Ensure underlying data object is of type DataTagged
2260 FORCERESOLVE;
2261 if (isConstant()) tag();
2262 //
2263 // Call DataAbstract::setTaggedValue
2264 m_data->setTaggedValue(tagKey,pointshape, value, dataOffset);
2265 }
2266
2267 int
2268 Data::getTagNumber(int dpno)
2269 {
2270 if (isEmpty())
2271 {
2272 throw DataException("Error - operation not permitted on instances of DataEmpty.");
2273 }
2274 return getFunctionSpace().getTagFromDataPointNo(dpno);
2275 }
2276
2277
2278 ostream& escript::operator<<(ostream& o, const Data& data)
2279 {
2280 o << data.toString();
2281 return o;
2282 }
2283
2284 Data
2285 escript::C_GeneralTensorProduct(Data& arg_0,
2286 Data& arg_1,
2287 int axis_offset,
2288 int transpose)
2289 {
2290 // General tensor product: res(SL x SR) = arg_0(SL x SM) * arg_1(SM x SR)
2291 // SM is the product of the last axis_offset entries in arg_0.getShape().
2292
2293 // deal with any lazy data
2294 // if (arg_0.isLazy()) {arg_0.resolve();}
2295 // if (arg_1.isLazy()) {arg_1.resolve();}
2296 if (arg_0.isLazy() || arg_1.isLazy())
2297 {
2298 DataLazy* c=new DataLazy(arg_0.borrowDataPtr(), arg_1.borrowDataPtr(), PROD, axis_offset,transpose);
2299 return Data(c);
2300 }
2301
2302 // Interpolate if necessary and find an appropriate function space
2303 Data arg_0_Z, arg_1_Z;
2304 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2305 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2306 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2307 arg_1_Z = Data(arg_1);
2308 }
2309 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2310 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2311 arg_0_Z =Data(arg_0);
2312 }
2313 else {
2314 throw DataException("Error - C_GeneralTensorProduct: arguments have incompatible function spaces.");
2315 }
2316 } else {
2317 arg_0_Z = Data(arg_0);
2318 arg_1_Z = Data(arg_1);
2319 }
2320 // Get rank and shape of inputs
2321 int rank0 = arg_0_Z.getDataPointRank();
2322 int rank1 = arg_1_Z.getDataPointRank();
2323 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
2324 const DataTypes::ShapeType& shape1 = arg_1_Z.getDataPointShape();
2325
2326 // Prepare for the loops of the product and verify compatibility of shapes
2327 int start0=0, start1=0;
2328 if (transpose == 0) {}
2329 else if (transpose == 1) { start0 = axis_offset; }
2330 else if (transpose == 2) { start1 = rank1-axis_offset; }
2331 else { throw DataException("C_GeneralTensorProduct: Error - transpose should be 0, 1 or 2"); }
2332
2333
2334 // Adjust the shapes for transpose
2335 DataTypes::ShapeType tmpShape0(rank0); // pre-sizing the vectors rather
2336 DataTypes::ShapeType tmpShape1(rank1); // than using push_back
2337 for (int i=0; i<rank0; i++) { tmpShape0[i]=shape0[(i+start0)%rank0]; }
2338 for (int i=0; i<rank1; i++) { tmpShape1[i]=shape1[(i+start1)%rank1]; }
2339
2340 #if 0
2341 // For debugging: show shape after transpose
2342 char tmp[100];
2343 std::string shapeStr;
2344 shapeStr = "(";
2345 for (int i=0; i<rank0; i++) { sprintf(tmp, "%d,", tmpShape0[i]); shapeStr += tmp; }
2346 shapeStr += ")";
2347 cout << "C_GeneralTensorProduct: Shape of arg0 is " << shapeStr << endl;
2348 shapeStr = "(";
2349 for (int i=0; i<rank1; i++) { sprintf(tmp, "%d,", tmpShape1[i]); shapeStr += tmp; }
2350 shapeStr += ")";
2351 cout << "C_GeneralTensorProduct: Shape of arg1 is " << shapeStr << endl;
2352 #endif
2353
2354 // Prepare for the loops of the product
2355 int SL=1, SM=1, SR=1;
2356 for (int i=0; i<rank0-axis_offset; i++) {
2357 SL *= tmpShape0[i];
2358 }
2359 for (int i=rank0-axis_offset; i<rank0; i++) {
2360 if (tmpShape0[i] != tmpShape1[i-(rank0-axis_offset)]) {
2361 throw DataException("C_GeneralTensorProduct: Error - incompatible shapes");
2362 }
2363 SM *= tmpShape0[i];
2364 }
2365 for (int i=axis_offset; i<rank1; i++) {
2366 SR *= tmpShape1[i];
2367 }
2368
2369 // Define the shape of the output (rank of shape is the sum of the loop ranges below)
2370 DataTypes::ShapeType shape2(rank0+rank1-2*axis_offset);
2371 { // block to limit the scope of out_index
2372 int out_index=0;
2373 for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z
2374 for (int i=axis_offset; i<rank1; i++, ++out_index) { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z
2375 }
2376
2377 if (shape2.size()>ESCRIPT_MAX_DATA_RANK)
2378 {
2379 ostringstream os;
2380 os << "C_GeneralTensorProduct: Error - Attempt to create a rank " << shape2.size() << " object. The maximum rank is " << ESCRIPT_MAX_DATA_RANK << ".";
2381 throw DataException(os.str());
2382 }
2383
2384 // Declare output Data object
2385 Data res;
2386
2387 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2388 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataConstant output
2389 double *ptr_0 = &(arg_0_Z.getDataAtOffset(0));
2390 double *ptr_1 = &(arg_1_Z.getDataAtOffset(0));
2391 double *ptr_2 = &(res.getDataAtOffset(0));
2392 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2393 }
2394 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2395
2396 // Prepare the DataConstant input
2397 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2398 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2399
2400 // Borrow DataTagged input from Data object
2401 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2402 if (tmp_1==0) { throw DataException("GTP_1 Programming error - casting to DataTagged."); }
2403
2404 // Prepare a DataTagged output 2
2405 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataTagged output
2406 res.tag();
2407 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2408 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2409
2410 // Prepare offset into DataConstant
2411 int offset_0 = tmp_0->getPointOffset(0,0);
2412 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2413 // Get the views
2414 // DataArrayView view_1 = tmp_1->getDefaultValue();
2415 // DataArrayView view_2 = tmp_2->getDefaultValue();
2416 // // Get the pointers to the actual data
2417 // double *ptr_1 = &((view_1.getData())[0]);
2418 // double *ptr_2 = &((view_2.getData())[0]);
2419
2420 double *ptr_1 = &(tmp_1->getDefaultValue(0));
2421 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2422
2423
2424 // Compute an MVP for the default
2425 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2426 // Compute an MVP for each tag
2427 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2428 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2429 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2430 tmp_2->addTag(i->first);
2431 // DataArrayView view_1 = tmp_1->getDataPointByTag(i->first);
2432 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2433 // double *ptr_1 = &view_1.getData(0);
2434 // double *ptr_2 = &view_2.getData(0);
2435
2436 double *ptr_1 = &(tmp_1->getDataByTag(i->first,0));
2437 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2438
2439 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2440 }
2441
2442 }
2443 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2444
2445 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2446 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2447 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2448 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2449 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2450 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2451 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2452 int sampleNo_1,dataPointNo_1;
2453 int numSamples_1 = arg_1_Z.getNumSamples();
2454 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2455 int offset_0 = tmp_0->getPointOffset(0,0);
2456 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2457 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2458 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2459 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2460 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2461 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2462 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2463 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2464 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2465 }
2466 }
2467
2468 }
2469 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2470
2471 // Borrow DataTagged input from Data object
2472 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2473 if (tmp_0==0) { throw DataException("GTP_0 Programming error - casting to DataTagged."); }
2474
2475 // Prepare the DataConstant input
2476 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2477 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2478
2479 // Prepare a DataTagged output 2
2480 res = Data(0.0, shape2, arg_0_Z.getFunctionSpace()); // DataTagged output
2481 res.tag();
2482 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2483 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2484
2485 // Prepare offset into DataConstant
2486 int offset_1 = tmp_1->getPointOffset(0,0);
2487 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2488 // Get the views
2489 // DataArrayView view_0 = tmp_0->getDefaultValue();
2490 // DataArrayView view_2 = tmp_2->getDefaultValue();
2491 // // Get the pointers to the actual data
2492 // double *ptr_0 = &((view_0.getData())[0]);
2493 // double *ptr_2 = &((view_2.getData())[0]);
2494
2495 double *ptr_0 = &(tmp_0->getDefaultValue(0));
2496 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2497
2498 // Compute an MVP for the default
2499 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2500 // Compute an MVP for each tag
2501 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2502 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2503 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2504 // tmp_2->addTaggedValue(i->first,tmp_2->getDefaultValue());
2505 // DataArrayView view_0 = tmp_0->getDataPointByTag(i->first);
2506 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2507 // double *ptr_0 = &view_0.getData(0);
2508 // double *ptr_2 = &view_2.getData(0);
2509
2510 tmp_2->addTag(i->first);
2511 double *ptr_0 = &(tmp_0->getDataByTag(i->first,0));
2512 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2513 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2514 }
2515
2516 }
2517 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2518
2519 // Borrow DataTagged input from Data object
2520 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2521 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2522
2523 // Borrow DataTagged input from Data object
2524 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2525 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2526
2527 // Prepare a DataTagged output 2
2528 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace());
2529 res.tag(); // DataTagged output
2530 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2531 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2532
2533 // // Get the views
2534 // DataArrayView view_0 = tmp_0->getDefaultValue();
2535 // DataArrayView view_1 = tmp_1->getDefaultValue();
2536 // DataArrayView view_2 = tmp_2->getDefaultValue();
2537 // // Get the pointers to the actual data
2538 // double *ptr_0 = &((view_0.getData())[0]);
2539 // double *ptr_1 = &((view_1.getData())[0]);
2540 // double *ptr_2 = &((view_2.getData())[0]);
2541
2542 double *ptr_0 = &(tmp_0->getDefaultValue(0));
2543 double *ptr_1 = &(tmp_1->getDefaultValue(0));
2544 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2545
2546
2547 // Compute an MVP for the default
2548 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2549 // Merge the tags
2550 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2551 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2552 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2553 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2554 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2555 }
2556 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2557 tmp_2->addTag(i->first);
2558 }
2559 // Compute an MVP for each tag
2560 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2561 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2562 // DataArrayView view_0 = tmp_0->getDataPointByTag(i->first);
2563 // DataArrayView view_1 = tmp_1->getDataPointByTag(i->first);
2564 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2565 // double *ptr_0 = &view_0.getData(0);
2566 // double *ptr_1 = &view_1.getData(0);
2567 // double *ptr_2 = &view_2.getData(0);
2568
2569 double *ptr_0 = &(tmp_0->getDataByTag(i->first,0));
2570 double *ptr_1 = &(tmp_1->getDataByTag(i->first,0));
2571 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2572
2573 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2574 }
2575
2576 }
2577 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2578
2579 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2580 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2581 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2582 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2583 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2584 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2585 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2586 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2587 int sampleNo_0,dataPointNo_0;
2588 int numSamples_0 = arg_0_Z.getNumSamples();
2589 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2590 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2591 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2592 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2593 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2594 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2595 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2596 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2597 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2598 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2599 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2600 }
2601 }
2602
2603 }
2604 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2605
2606 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2607 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2608 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2609 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2610 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2611 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2612 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2613 int sampleNo_0,dataPointNo_0;
2614 int numSamples_0 = arg_0_Z.getNumSamples();
2615 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2616 int offset_1 = tmp_1->getPointOffset(0,0);
2617 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2618 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2619 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2620 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2621 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2622 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2623 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2624 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2625 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2626 }
2627 }
2628
2629
2630 }
2631 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2632
2633 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2634 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2635 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2636 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2637 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2638 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2639 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2640 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2641 int sampleNo_0,dataPointNo_0;
2642 int numSamples_0 = arg_0_Z.getNumSamples();
2643 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2644 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2645 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2646 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2647 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2648 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2649 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2650 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2651 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2652 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2653 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2654 }
2655 }
2656
2657 }
2658 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2659
2660 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2661 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2662 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2663 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2664 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2665 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2666 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2667 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2668 int sampleNo_0,dataPointNo_0;
2669 int numSamples_0 = arg_0_Z.getNumSamples();
2670 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2671 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2672 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2673 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2674 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2675 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2676 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2677 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2678 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2679 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2680 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2681 }
2682 }
2683
2684 }
2685 else {
2686 throw DataException("Error - C_GeneralTensorProduct: unknown combination of inputs");
2687 }
2688
2689 return res;
2690 }
2691
2692 DataAbstract*
2693 Data::borrowData() const
2694 {
2695 return m_data.get();
2696 }
2697
2698 // Not all that happy about returning a non-const from a const
2699 DataAbstract_ptr
2700 Data::borrowDataPtr() const
2701 {
2702 return m_data;
2703 }
2704
2705 // Not all that happy about returning a non-const from a const
2706 DataReady_ptr
2707 Data::borrowReadyPtr() const
2708 {
2709 DataReady_ptr dr=dynamic_pointer_cast<DataReady>(m_data);
2710 EsysAssert((dr!=0), "Error - casting to DataReady.");
2711 return dr;
2712 }
2713
2714 std::string
2715 Data::toString() const
2716 {
2717 if (!m_data->isEmpty() &&
2718 !m_data->isLazy() &&
2719 getLength()>escriptParams.getInt("TOO_MANY_LINES"))
2720 {
2721 stringstream temp;
2722 temp << "Summary: inf="<< inf_const() << " sup=" << sup_const() << " data points=" << getNumDataPoints();
2723 return temp.str();
2724 }
2725 return m_data->toString();
2726 }
2727
2728
2729
2730 DataTypes::ValueType::const_reference
2731 Data::getDataAtOffset(DataTypes::ValueType::size_type i) const
2732 {
2733 if (isLazy())
2734 {
2735 throw DataException("Programmer error - getDataAtOffset not permitted on lazy data (object is const which prevents resolving).");
2736 }
2737 return getReady()->getDataAtOffset(i);
2738 }
2739
2740
2741 DataTypes::ValueType::reference
2742 Data::getDataAtOffset(DataTypes::ValueType::size_type i)
2743 {
2744 // if (isLazy())
2745 // {
2746 // throw DataException("getDataAtOffset not permitted on lazy data.");
2747 // }
2748 FORCERESOLVE;
2749 return getReady()->getDataAtOffset(i);
2750 }
2751
2752 DataTypes::ValueType::const_reference
2753 Data::getDataPoint(int sampleNo, int dataPointNo) const
2754 {
2755 if (!isReady())
2756 {
2757 throw DataException("Programmer error - getDataPoint() not permitted on Lazy Data (object is const which prevents resolving).");
2758 }
2759 else
2760 {
2761 const DataReady* dr=getReady();
2762 return dr->getDataAtOffset(dr->getPointOffset(sampleNo, dataPointNo));
2763 }
2764 }
2765
2766
2767 DataTypes::ValueType::reference
2768 Data::getDataPoint(int sampleNo, int dataPointNo)
2769 {
2770 FORCERESOLVE;
2771 if (!isReady())
2772 {
2773 throw DataException("Programmer error - getDataPoint() not permitted on Lazy Data.");
2774 }
2775 else
2776 {
2777 DataReady* dr=getReady();
2778 return dr->getDataAtOffset(dr->getPointOffset(sampleNo, dataPointNo));
2779 }
2780 }
2781
2782
2783 /* Member functions specific to the MPI implementation */
2784
2785 void
2786 Data::print()
2787 {
2788 int i,j;
2789
2790 printf( "Data is %dX%d\n", getNumSamples(), getNumDataPointsPerSample() );
2791 for( i=0; i<getNumSamples(); i++ )
2792 {
2793 printf( "[%6d]", i );
2794 for( j=0; j<getNumDataPointsPerSample(); j++ )
2795 printf( "\t%10.7g", (getSampleData(i))[j] );
2796 printf( "\n" );
2797 }
2798 }
2799 void
2800 Data::dump(const std::string fileName) const
2801 {
2802 try
2803 {
2804 if (isLazy())
2805 {
2806 Data temp(*this); // this is to get a non-const object which we can resolve
2807 temp.resolve();
2808 temp.dump(fileName);
2809 }
2810 else
2811 {
2812 return m_data->dump(fileName);
2813 }
2814 }
2815 catch (std::exception& e)
2816 {
2817 cout << e.what() << endl;
2818 }
2819 }
2820
2821 int
2822 Data::get_MPISize() const
2823 {
2824 int size;
2825 #ifdef PASO_MPI
2826 int error;
2827 error = MPI_Comm_size( get_MPIComm(), &size );
2828 #else
2829 size = 1;
2830 #endif
2831 return size;
2832 }
2833
2834 int
2835 Data::get_MPIRank() const
2836 {
2837 int rank;
2838 #ifdef PASO_MPI
2839 int error;
2840 error = MPI_Comm_rank( get_MPIComm(), &rank );
2841 #else
2842 rank = 0;
2843 #endif
2844 return rank;
2845 }
2846
2847 MPI_Comm
2848 Data::get_MPIComm() const
2849 {
2850 #ifdef PASO_MPI
2851 return MPI_COMM_WORLD;
2852 #else
2853 return -1;
2854 #endif
2855 }
2856
2857

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26