/[escript]/trunk/escript/src/Data.cpp
ViewVC logotype

Contents of /trunk/escript/src/Data.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2200 - (show annotations)
Thu Jan 8 23:55:40 2009 UTC (11 years, 2 months ago) by jfenwick
File size: 81519 byte(s)
Fixed python string substitution in error messages for trace and transpose.
Added tests for invalid axis offsets for trace on the c++ side.
1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2008 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 #include "Data.h"
16
17 #include "DataExpanded.h"
18 #include "DataConstant.h"
19 #include "DataTagged.h"
20 #include "DataEmpty.h"
21 #include "DataLazy.h"
22 #include "FunctionSpaceFactory.h"
23 #include "AbstractContinuousDomain.h"
24 #include "UnaryFuncs.h"
25 #include "FunctionSpaceException.h"
26 #include "EscriptParams.h"
27
28 extern "C" {
29 #include "esysUtils/blocktimer.h"
30 }
31
32 #include <fstream>
33 #include <algorithm>
34 #include <vector>
35 #include <functional>
36 #include <sstream> // so we can throw messages about ranks
37
38 #include <boost/python/dict.hpp>
39 #include <boost/python/extract.hpp>
40 #include <boost/python/long.hpp>
41
42 using namespace std;
43 using namespace boost::python;
44 using namespace boost;
45 using namespace escript;
46
47 // ensure the current object is not a DataLazy
48 // The idea was that we could add an optional warning whenever a resolve is forced
49 #define FORCERESOLVE if (isLazy()) {resolve();}
50 #define AUTOLAZYON escriptParams.getAUTOLAZY()
51 #define MAKELAZYOP(X) if (isLazy() || (AUTOLAZYON && m_data->isExpanded())) \
52 {\
53 DataLazy* c=new DataLazy(borrowDataPtr(),X);\
54 return Data(c);\
55 }
56 #define MAKELAZYOPOFF(X,Y) if (isLazy() || (AUTOLAZYON && m_data->isExpanded())) \
57 {\
58 DataLazy* c=new DataLazy(borrowDataPtr(),X,Y);\
59 return Data(c);\
60 }
61
62 #define MAKELAZYBINSELF(R,X) if (isLazy() || R.isLazy() || (AUTOLAZYON && (isExpanded() || R.isExpanded()))) \
63 {\
64 DataLazy* c=new DataLazy(m_data,R.borrowDataPtr(),X);\
65 m_data=c->getPtr();\
66 return (*this);\
67 }
68
69 // like the above but returns a new data rather than *this
70 #define MAKELAZYBIN(R,X) if (isLazy() || R.isLazy() || (AUTOLAZYON && (isExpanded() || R.isExpanded()))) \
71 {\
72 DataLazy* c=new DataLazy(m_data,R.borrowDataPtr(),X);\
73 return Data(c);\
74 }
75
76 #define MAKELAZYBIN2(L,R,X) if (L.isLazy() || R.isLazy() || (AUTOLAZYON && (L.isExpanded() || R.isExpanded()))) \
77 {\
78 DataLazy* c=new DataLazy(L.borrowDataPtr(),R.borrowDataPtr(),X);\
79 return Data(c);\
80 }
81
82 Data::Data()
83 {
84 //
85 // Default data is type DataEmpty
86 DataAbstract* temp=new DataEmpty();
87 m_data=temp->getPtr();
88 m_protected=false;
89 }
90
91 Data::Data(double value,
92 const tuple& shape,
93 const FunctionSpace& what,
94 bool expanded)
95 {
96 DataTypes::ShapeType dataPointShape;
97 for (int i = 0; i < shape.attr("__len__")(); ++i) {
98 dataPointShape.push_back(extract<const int>(shape[i]));
99 }
100
101 int len = DataTypes::noValues(dataPointShape);
102 DataVector temp_data(len,value,len);
103 initialise(temp_data, dataPointShape, what, expanded);
104 m_protected=false;
105 }
106
107 Data::Data(double value,
108 const DataTypes::ShapeType& dataPointShape,
109 const FunctionSpace& what,
110 bool expanded)
111 {
112 int len = DataTypes::noValues(dataPointShape);
113
114 DataVector temp_data(len,value,len);
115 // DataArrayView temp_dataView(temp_data, dataPointShape);
116
117 // initialise(temp_dataView, what, expanded);
118 initialise(temp_data, dataPointShape, what, expanded);
119
120 m_protected=false;
121 }
122
123 Data::Data(const Data& inData)
124 {
125 m_data=inData.m_data;
126 m_protected=inData.isProtected();
127 }
128
129
130 Data::Data(const Data& inData,
131 const DataTypes::RegionType& region)
132 {
133 DataAbstract_ptr dat=inData.m_data;
134 if (inData.isLazy())
135 {
136 dat=inData.m_data->resolve();
137 }
138 else
139 {
140 dat=inData.m_data;
141 }
142 //
143 // Create Data which is a slice of another Data
144 DataAbstract* tmp = dat->getSlice(region);
145 m_data=DataAbstract_ptr(tmp);
146 m_protected=false;
147 }
148
149 Data::Data(const Data& inData,
150 const FunctionSpace& functionspace)
151 {
152 if (inData.isEmpty())
153 {
154 throw DataException("Error - will not interpolate for instances of DataEmpty.");
155 }
156 if (inData.getFunctionSpace()==functionspace) {
157 m_data=inData.m_data;
158 }
159 else
160 {
161
162 if (inData.isConstant()) { // for a constant function, we just need to use the new function space
163 if (!inData.probeInterpolation(functionspace))
164 { // Even though this is constant, we still need to check whether interpolation is allowed
165 throw FunctionSpaceException("Cannot interpolate across to the domain of the specified FunctionSpace. (DataConstant)");
166 }
167 // if the data is not lazy, this will just be a cast to DataReady
168 DataReady_ptr dr=inData.m_data->resolve();
169 DataConstant* dc=new DataConstant(functionspace,inData.m_data->getShape(),dr->getVector());
170 m_data=DataAbstract_ptr(dc);
171 } else {
172 Data tmp(0,inData.getDataPointShape(),functionspace,true);
173 // Note: Must use a reference or pointer to a derived object
174 // in order to get polymorphic behaviour. Shouldn't really
175 // be able to create an instance of AbstractDomain but that was done
176 // as a boost:python work around which may no longer be required.
177 /*const AbstractDomain& inDataDomain=inData.getDomain();*/
178 const_Domain_ptr inDataDomain=inData.getDomain();
179 if (inDataDomain==functionspace.getDomain()) {
180 inDataDomain->interpolateOnDomain(tmp,inData);
181 } else {
182 inDataDomain->interpolateACross(tmp,inData);
183 }
184 m_data=tmp.m_data;
185 }
186 }
187 m_protected=false;
188 }
189
190 Data::Data(DataAbstract* underlyingdata)
191 {
192 // m_data=shared_ptr<DataAbstract>(underlyingdata);
193 m_data=underlyingdata->getPtr();
194 m_protected=false;
195 }
196
197 Data::Data(DataAbstract_ptr underlyingdata)
198 {
199 m_data=underlyingdata;
200 m_protected=false;
201 }
202
203
204 Data::Data(const numeric::array& value,
205 const FunctionSpace& what,
206 bool expanded)
207 {
208 initialise(value,what,expanded);
209 m_protected=false;
210 }
211
212 Data::Data(const DataTypes::ValueType& value,
213 const DataTypes::ShapeType& shape,
214 const FunctionSpace& what,
215 bool expanded)
216 {
217 initialise(value,shape,what,expanded);
218 m_protected=false;
219 }
220
221
222 Data::Data(const object& value,
223 const FunctionSpace& what,
224 bool expanded)
225 {
226 numeric::array asNumArray(value);
227 initialise(asNumArray,what,expanded);
228 m_protected=false;
229 }
230
231
232 Data::Data(const object& value,
233 const Data& other)
234 {
235 numeric::array asNumArray(value);
236
237 // extract the shape of the numarray
238 DataTypes::ShapeType tempShape=DataTypes::shapeFromNumArray(asNumArray);
239 if (DataTypes::getRank(tempShape)==0) {
240
241
242 // get the space for the data vector
243 int len1 = DataTypes::noValues(tempShape);
244 DataVector temp_data(len1, 0.0, len1);
245 temp_data.copyFromNumArray(asNumArray,1);
246
247 int len = DataTypes::noValues(other.getDataPointShape());
248
249 DataVector temp2_data(len, temp_data[0]/*temp_dataView()*/, len);
250 DataConstant* t=new DataConstant(other.getFunctionSpace(),other.getDataPointShape(),temp2_data);
251 m_data=DataAbstract_ptr(t);
252
253 } else {
254 //
255 // Create a DataConstant with the same sample shape as other
256 DataConstant* t=new DataConstant(asNumArray,other.getFunctionSpace());
257 // boost::shared_ptr<DataAbstract> sp(t);
258 // m_data=sp;
259 m_data=DataAbstract_ptr(t);
260 }
261 m_protected=false;
262 }
263
264 Data::~Data()
265 {
266
267 }
268
269
270
271 void
272 Data::initialise(const boost::python::numeric::array& value,
273 const FunctionSpace& what,
274 bool expanded)
275 {
276 //
277 // Construct a Data object of the appropriate type.
278 // Construct the object first as there seems to be a bug which causes
279 // undefined behaviour if an exception is thrown during construction
280 // within the shared_ptr constructor.
281 if (expanded) {
282 DataAbstract* temp=new DataExpanded(value, what);
283 // boost::shared_ptr<DataAbstract> temp_data(temp);
284 // m_data=temp_data;
285 m_data=temp->getPtr();
286 } else {
287 DataAbstract* temp=new DataConstant(value, what);
288 // boost::shared_ptr<DataAbstract> temp_data(temp);
289 // m_data=temp_data;
290 m_data=temp->getPtr();
291 }
292 }
293
294
295 void
296 Data::initialise(const DataTypes::ValueType& value,
297 const DataTypes::ShapeType& shape,
298 const FunctionSpace& what,
299 bool expanded)
300 {
301 //
302 // Construct a Data object of the appropriate type.
303 // Construct the object first as there seems to be a bug which causes
304 // undefined behaviour if an exception is thrown during construction
305 // within the shared_ptr constructor.
306 if (expanded) {
307 DataAbstract* temp=new DataExpanded(what, shape, value);
308 // boost::shared_ptr<DataAbstract> temp_data(temp);
309 // m_data=temp_data;
310 m_data=temp->getPtr();
311 } else {
312 DataAbstract* temp=new DataConstant(what, shape, value);
313 // boost::shared_ptr<DataAbstract> temp_data(temp);
314 // m_data=temp_data;
315 m_data=temp->getPtr();
316 }
317 }
318
319
320 // void
321 // Data::CompareDebug(const Data& rd)
322 // {
323 // using namespace std;
324 // bool mismatch=false;
325 // std::cout << "Comparing left and right" << endl;
326 // const DataTagged* left=dynamic_cast<DataTagged*>(m_data.get());
327 // const DataTagged* right=dynamic_cast<DataTagged*>(rd.m_data.get());
328 //
329 // if (left==0)
330 // {
331 // cout << "left arg is not a DataTagged\n";
332 // return;
333 // }
334 //
335 // if (right==0)
336 // {
337 // cout << "right arg is not a DataTagged\n";
338 // return;
339 // }
340 // cout << "Num elements=" << left->getVector().size() << ":" << right->getVector().size() << std::endl;
341 // cout << "Shapes ";
342 // if (left->getShape()==right->getShape())
343 // {
344 // cout << "ok\n";
345 // }
346 // else
347 // {
348 // cout << "Problem: shapes do not match\n";
349 // mismatch=true;
350 // }
351 // int lim=left->getVector().size();
352 // if (right->getVector().size()) lim=right->getVector().size();
353 // for (int i=0;i<lim;++i)
354 // {
355 // if (left->getVector()[i]!=right->getVector()[i])
356 // {
357 // cout << "[" << i << "] value mismatch " << left->getVector()[i] << ":" << right->getVector()[i] << endl;
358 // mismatch=true;
359 // }
360 // }
361 //
362 // // still need to check the tag map
363 // // also need to watch what is happening to function spaces, are they copied or what?
364 //
365 // const DataTagged::DataMapType& mapleft=left->getTagLookup();
366 // const DataTagged::DataMapType& mapright=right->getTagLookup();
367 //
368 // if (mapleft.size()!=mapright.size())
369 // {
370 // cout << "Maps are different sizes " << mapleft.size() << ":" << mapright.size() << endl;
371 // mismatch=true;
372 // cout << "Left map\n";
373 // DataTagged::DataMapType::const_iterator i,j;
374 // for (i=mapleft.begin();i!=mapleft.end();++i) {
375 // cout << "(" << i->first << "=>" << i->second << ")\n";
376 // }
377 // cout << "Right map\n";
378 // for (i=mapright.begin();i!=mapright.end();++i) {
379 // cout << "(" << i->first << "=>" << i->second << ")\n";
380 // }
381 // cout << "End map\n";
382 //
383 // }
384 //
385 // DataTagged::DataMapType::const_iterator i,j;
386 // for (i=mapleft.begin(),j=mapright.begin();i!=mapleft.end() && j!=mapright.end();++i,++j) {
387 // if ((i->first!=j->first) || (i->second!=j->second))
388 // {
389 // cout << "(" << i->first << "=>" << i->second << ")";
390 // cout << ":(" << j->first << "=>" << j->second << ") ";
391 // mismatch=true;
392 // }
393 // }
394 // if (mismatch)
395 // {
396 // cout << "#Mismatch\n";
397 // }
398 // }
399
400 escriptDataC
401 Data::getDataC()
402 {
403 escriptDataC temp;
404 temp.m_dataPtr=(void*)this;
405 return temp;
406 }
407
408 escriptDataC
409 Data::getDataC() const
410 {
411 escriptDataC temp;
412 temp.m_dataPtr=(void*)this;
413 return temp;
414 }
415
416 const boost::python::tuple
417 Data::getShapeTuple() const
418 {
419 const DataTypes::ShapeType& shape=getDataPointShape();
420 switch(getDataPointRank()) {
421 case 0:
422 return make_tuple();
423 case 1:
424 return make_tuple(long_(shape[0]));
425 case 2:
426 return make_tuple(long_(shape[0]),long_(shape[1]));
427 case 3:
428 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]));
429 case 4:
430 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]),long_(shape[3]));
431 default:
432 throw DataException("Error - illegal Data rank.");
433 }
434 }
435
436
437 // The different name is needed because boost has trouble with overloaded functions.
438 // It can't work out what type the function is based soley on its name.
439 // There are ways to fix this involving creating function pointer variables for each form
440 // but there doesn't seem to be a need given that the methods have the same name from the python point of view
441 Data
442 Data::copySelf()
443 {
444 DataAbstract* temp=m_data->deepCopy();
445 return Data(temp);
446 }
447
448 void
449 Data::copy(const Data& other)
450 {
451 DataAbstract* temp=other.m_data->deepCopy();
452 DataAbstract_ptr p=temp->getPtr();
453 m_data=p;
454 }
455
456
457 Data
458 Data::delay()
459 {
460 DataLazy* dl=new DataLazy(m_data);
461 return Data(dl);
462 }
463
464 void
465 Data::delaySelf()
466 {
467 if (!isLazy())
468 {
469 m_data=(new DataLazy(m_data))->getPtr();
470 }
471 }
472
473 void
474 Data::setToZero()
475 {
476 if (isEmpty())
477 {
478 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
479 }
480 m_data->setToZero();
481 }
482
483 void
484 Data::copyWithMask(const Data& other,
485 const Data& mask)
486 {
487 // 1. Interpolate if required so all Datas use the same FS as this
488 // 2. Tag or Expand so that all Data's are the same type
489 // 3. Iterate over the data vectors copying values where mask is >0
490 if (other.isEmpty() || mask.isEmpty())
491 {
492 throw DataException("Error - copyWithMask not permitted using instances of DataEmpty.");
493 }
494 Data other2(other);
495 Data mask2(mask);
496 other2.resolve();
497 mask2.resolve();
498 this->resolve();
499 FunctionSpace myFS=getFunctionSpace();
500 FunctionSpace oFS=other2.getFunctionSpace();
501 FunctionSpace mFS=mask2.getFunctionSpace();
502 if (oFS!=myFS)
503 {
504 if (other2.probeInterpolation(myFS))
505 {
506 other2=other2.interpolate(myFS);
507 }
508 else
509 {
510 throw DataException("Error - copyWithMask: other FunctionSpace is not compatible with this one.");
511 }
512 }
513 if (mFS!=myFS)
514 {
515 if (mask2.probeInterpolation(myFS))
516 {
517 mask2=mask2.interpolate(myFS);
518 }
519 else
520 {
521 throw DataException("Error - copyWithMask: mask FunctionSpace is not compatible with this one.");
522 }
523 }
524 // Ensure that all args have the same type
525 if (this->isExpanded() || mask2.isExpanded() || other2.isExpanded())
526 {
527 this->expand();
528 other2.expand();
529 mask2.expand();
530 }
531 else if (this->isTagged() || mask2.isTagged() || other2.isTagged())
532 {
533 this->tag();
534 other2.tag();
535 mask2.tag();
536 }
537 else if (this->isConstant() && mask2.isConstant() && other2.isConstant())
538 {
539 }
540 else
541 {
542 throw DataException("Error - Unknown DataAbstract passed to copyWithMask.");
543 }
544 // Now we iterate over the elements
545 DataVector& self=getReadyPtr()->getVector();
546 const DataVector& ovec=other2.getReadyPtr()->getVector();
547 const DataVector& mvec=mask2.getReadyPtr()->getVector();
548 if ((self.size()!=ovec.size()) || (self.size()!=mvec.size()))
549 {
550 throw DataException("Error - size mismatch in arguments to copyWithMask.");
551 }
552 size_t num_points=self.size();
553
554 // OPENMP 3.0 allows unsigned loop vars.
555 #if defined(_OPENMP) && (_OPENMP < 200805)
556 long i;
557 #else
558 size_t i;
559 #endif
560 #pragma omp parallel for private(i) schedule(static)
561 for (i=0;i<num_points;++i)
562 {
563 if (mvec[i]>0)
564 {
565 self[i]=ovec[i];
566 }
567 }
568 }
569
570
571
572 bool
573 Data::isExpanded() const
574 {
575 DataExpanded* temp=dynamic_cast<DataExpanded*>(m_data.get());
576 return (temp!=0);
577 }
578
579 bool
580 Data::isTagged() const
581 {
582 DataTagged* temp=dynamic_cast<DataTagged*>(m_data.get());
583 return (temp!=0);
584 }
585
586 bool
587 Data::isEmpty() const
588 {
589 DataEmpty* temp=dynamic_cast<DataEmpty*>(m_data.get());
590 return (temp!=0);
591 }
592
593 bool
594 Data::isConstant() const
595 {
596 DataConstant* temp=dynamic_cast<DataConstant*>(m_data.get());
597 return (temp!=0);
598 }
599
600 bool
601 Data::isLazy() const
602 {
603 return m_data->isLazy();
604 }
605
606 // at the moment this is synonymous with !isLazy() but that could change
607 bool
608 Data::isReady() const
609 {
610 return (dynamic_cast<DataReady*>(m_data.get())!=0);
611 }
612
613
614 void
615 Data::setProtection()
616 {
617 m_protected=true;
618 }
619
620 bool
621 Data::isProtected() const
622 {
623 return m_protected;
624 }
625
626
627
628 void
629 Data::expand()
630 {
631 if (isConstant()) {
632 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
633 DataAbstract* temp=new DataExpanded(*tempDataConst);
634 // shared_ptr<DataAbstract> temp_data(temp);
635 // m_data=temp_data;
636 m_data=temp->getPtr();
637 } else if (isTagged()) {
638 DataTagged* tempDataTag=dynamic_cast<DataTagged*>(m_data.get());
639 DataAbstract* temp=new DataExpanded(*tempDataTag);
640 // shared_ptr<DataAbstract> temp_data(temp);
641 // m_data=temp_data;
642 m_data=temp->getPtr();
643 } else if (isExpanded()) {
644 //
645 // do nothing
646 } else if (isEmpty()) {
647 throw DataException("Error - Expansion of DataEmpty not possible.");
648 } else if (isLazy()) {
649 resolve();
650 expand(); // resolve might not give us expanded data
651 } else {
652 throw DataException("Error - Expansion not implemented for this Data type.");
653 }
654 }
655
656 void
657 Data::tag()
658 {
659 if (isConstant()) {
660 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
661 DataAbstract* temp=new DataTagged(*tempDataConst);
662 // shared_ptr<DataAbstract> temp_data(temp);
663 // m_data=temp_data;
664 m_data=temp->getPtr();
665 } else if (isTagged()) {
666 // do nothing
667 } else if (isExpanded()) {
668 throw DataException("Error - Creating tag data from DataExpanded not possible.");
669 } else if (isEmpty()) {
670 throw DataException("Error - Creating tag data from DataEmpty not possible.");
671 } else if (isLazy()) {
672 DataAbstract_ptr res=m_data->resolve();
673 if (m_data->isExpanded())
674 {
675 throw DataException("Error - data would resolve to DataExpanded, tagging is not possible.");
676 }
677 m_data=res;
678 tag();
679 } else {
680 throw DataException("Error - Tagging not implemented for this Data type.");
681 }
682 }
683
684 void
685 Data::resolve()
686 {
687 if (isLazy())
688 {
689 m_data=m_data->resolve();
690 }
691 }
692
693
694 Data
695 Data::oneOver() const
696 {
697 MAKELAZYOP(RECIP)
698 return C_TensorUnaryOperation(*this, bind1st(divides<double>(),1.));
699 }
700
701 Data
702 Data::wherePositive() const
703 {
704 MAKELAZYOP(GZ)
705 return C_TensorUnaryOperation(*this, bind2nd(greater<double>(),0.0));
706 }
707
708 Data
709 Data::whereNegative() const
710 {
711 MAKELAZYOP(LZ)
712 return C_TensorUnaryOperation(*this, bind2nd(less<double>(),0.0));
713 }
714
715 Data
716 Data::whereNonNegative() const
717 {
718 MAKELAZYOP(GEZ)
719 return C_TensorUnaryOperation(*this, bind2nd(greater_equal<double>(),0.0));
720 }
721
722 Data
723 Data::whereNonPositive() const
724 {
725 MAKELAZYOP(LEZ)
726 return C_TensorUnaryOperation(*this, bind2nd(less_equal<double>(),0.0));
727 }
728
729 Data
730 Data::whereZero(double tol) const
731 {
732 // Data dataAbs=abs();
733 // return C_TensorUnaryOperation(dataAbs, bind2nd(less_equal<double>(),tol));
734 MAKELAZYOPOFF(EZ,tol)
735 return C_TensorUnaryOperation(*this, bind2nd(AbsLTE(),tol));
736
737 }
738
739 Data
740 Data::whereNonZero(double tol) const
741 {
742 // Data dataAbs=abs();
743 // return C_TensorUnaryOperation(dataAbs, bind2nd(greater<double>(),tol));
744 MAKELAZYOPOFF(NEZ,tol)
745 return C_TensorUnaryOperation(*this, bind2nd(AbsGT(),tol));
746
747 }
748
749 Data
750 Data::interpolate(const FunctionSpace& functionspace) const
751 {
752 return Data(*this,functionspace);
753 }
754
755 bool
756 Data::probeInterpolation(const FunctionSpace& functionspace) const
757 {
758 return getFunctionSpace().probeInterpolation(functionspace);
759 }
760
761 Data
762 Data::gradOn(const FunctionSpace& functionspace) const
763 {
764 if (isEmpty())
765 {
766 throw DataException("Error - operation not permitted on instances of DataEmpty.");
767 }
768 double blocktimer_start = blocktimer_time();
769 if (functionspace.getDomain()!=getDomain())
770 throw DataException("Error - gradient cannot be calculated on different domains.");
771 DataTypes::ShapeType grad_shape=getDataPointShape();
772 grad_shape.push_back(functionspace.getDim());
773 Data out(0.0,grad_shape,functionspace,true);
774 getDomain()->setToGradient(out,*this);
775 blocktimer_increment("grad()", blocktimer_start);
776 return out;
777 }
778
779 Data
780 Data::grad() const
781 {
782 if (isEmpty())
783 {
784 throw DataException("Error - operation not permitted on instances of DataEmpty.");
785 }
786 return gradOn(escript::function(*getDomain()));
787 }
788
789 int
790 Data::getDataPointSize() const
791 {
792 return m_data->getNoValues();
793 }
794
795 DataTypes::ValueType::size_type
796 Data::getLength() const
797 {
798 return m_data->getLength();
799 }
800
801 const
802 boost::python::numeric::array
803 Data:: getValueOfDataPoint(int dataPointNo)
804 {
805 int i, j, k, l;
806
807 FORCERESOLVE;
808
809 //
810 // determine the rank and shape of each data point
811 int dataPointRank = getDataPointRank();
812 const DataTypes::ShapeType& dataPointShape = getDataPointShape();
813
814 //
815 // create the numeric array to be returned
816 boost::python::numeric::array numArray(0.0);
817
818 //
819 // the shape of the returned numeric array will be the same
820 // as that of the data point
821 int arrayRank = dataPointRank;
822 const DataTypes::ShapeType& arrayShape = dataPointShape;
823
824 //
825 // resize the numeric array to the shape just calculated
826 if (arrayRank==0) {
827 numArray.resize(1);
828 }
829 if (arrayRank==1) {
830 numArray.resize(arrayShape[0]);
831 }
832 if (arrayRank==2) {
833 numArray.resize(arrayShape[0],arrayShape[1]);
834 }
835 if (arrayRank==3) {
836 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2]);
837 }
838 if (arrayRank==4) {
839 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2],arrayShape[3]);
840 }
841
842 if (getNumDataPointsPerSample()>0) {
843 int sampleNo = dataPointNo/getNumDataPointsPerSample();
844 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
845 //
846 // Check a valid sample number has been supplied
847 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
848 throw DataException("Error - Data::convertToNumArray: invalid sampleNo.");
849 }
850
851 //
852 // Check a valid data point number has been supplied
853 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
854 throw DataException("Error - Data::convertToNumArray: invalid dataPointNoInSample.");
855 }
856 // TODO: global error handling
857 // create a view of the data if it is stored locally
858 // DataArrayView dataPointView = getDataPoint(sampleNo, dataPointNoInSample);
859 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
860
861
862 switch( dataPointRank ){
863 case 0 :
864 numArray[0] = getDataAtOffset(offset);
865 break;
866 case 1 :
867 for( i=0; i<dataPointShape[0]; i++ )
868 numArray[i]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i));
869 break;
870 case 2 :
871 for( i=0; i<dataPointShape[0]; i++ )
872 for( j=0; j<dataPointShape[1]; j++)
873 numArray[make_tuple(i,j)]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j));
874 break;
875 case 3 :
876 for( i=0; i<dataPointShape[0]; i++ )
877 for( j=0; j<dataPointShape[1]; j++ )
878 for( k=0; k<dataPointShape[2]; k++)
879 numArray[make_tuple(i,j,k)]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k));
880 break;
881 case 4 :
882 for( i=0; i<dataPointShape[0]; i++ )
883 for( j=0; j<dataPointShape[1]; j++ )
884 for( k=0; k<dataPointShape[2]; k++ )
885 for( l=0; l<dataPointShape[3]; l++)
886 numArray[make_tuple(i,j,k,l)]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k,l));
887 break;
888 }
889 }
890 //
891 // return the array
892 return numArray;
893 }
894
895 void
896 Data::setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object)
897 {
898 // this will throw if the value cannot be represented
899 boost::python::numeric::array num_array(py_object);
900 setValueOfDataPointToArray(dataPointNo,num_array);
901 }
902
903 void
904 Data::setValueOfDataPointToArray(int dataPointNo, const boost::python::numeric::array& num_array)
905 {
906 if (isProtected()) {
907 throw DataException("Error - attempt to update protected Data object.");
908 }
909 FORCERESOLVE;
910 //
911 // check rank
912 if (static_cast<unsigned int>(num_array.getrank())<getDataPointRank())
913 throw DataException("Rank of numarray does not match Data object rank");
914
915 //
916 // check shape of num_array
917 for (unsigned int i=0; i<getDataPointRank(); i++) {
918 if (extract<int>(num_array.getshape()[i])!=getDataPointShape()[i])
919 throw DataException("Shape of numarray does not match Data object rank");
920 }
921 //
922 // make sure data is expanded:
923 //
924 if (!isExpanded()) {
925 expand();
926 }
927 if (getNumDataPointsPerSample()>0) {
928 int sampleNo = dataPointNo/getNumDataPointsPerSample();
929 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
930 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,num_array);
931 } else {
932 m_data->copyToDataPoint(-1, 0,num_array);
933 }
934 }
935
936 void
937 Data::setValueOfDataPoint(int dataPointNo, const double value)
938 {
939 if (isProtected()) {
940 throw DataException("Error - attempt to update protected Data object.");
941 }
942 //
943 // make sure data is expanded:
944 FORCERESOLVE;
945 if (!isExpanded()) {
946 expand();
947 }
948 if (getNumDataPointsPerSample()>0) {
949 int sampleNo = dataPointNo/getNumDataPointsPerSample();
950 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
951 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,value);
952 } else {
953 m_data->copyToDataPoint(-1, 0,value);
954 }
955 }
956
957 const
958 boost::python::numeric::array
959 Data::getValueOfGlobalDataPoint(int procNo, int dataPointNo)
960 {
961 size_t length=0;
962 int i, j, k, l, pos;
963 FORCERESOLVE;
964 //
965 // determine the rank and shape of each data point
966 int dataPointRank = getDataPointRank();
967 const DataTypes::ShapeType& dataPointShape = getDataPointShape();
968
969 //
970 // create the numeric array to be returned
971 boost::python::numeric::array numArray(0.0);
972
973 //
974 // the shape of the returned numeric array will be the same
975 // as that of the data point
976 int arrayRank = dataPointRank;
977 const DataTypes::ShapeType& arrayShape = dataPointShape;
978
979 //
980 // resize the numeric array to the shape just calculated
981 if (arrayRank==0) {
982 numArray.resize(1);
983 }
984 if (arrayRank==1) {
985 numArray.resize(arrayShape[0]);
986 }
987 if (arrayRank==2) {
988 numArray.resize(arrayShape[0],arrayShape[1]);
989 }
990 if (arrayRank==3) {
991 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2]);
992 }
993 if (arrayRank==4) {
994 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2],arrayShape[3]);
995 }
996
997 // added for the MPI communication
998 length=1;
999 for( i=0; i<arrayRank; i++ ) length *= arrayShape[i];
1000 double *tmpData = new double[length];
1001
1002 //
1003 // load the values for the data point into the numeric array.
1004
1005 // updated for the MPI case
1006 if( get_MPIRank()==procNo ){
1007 if (getNumDataPointsPerSample()>0) {
1008 int sampleNo = dataPointNo/getNumDataPointsPerSample();
1009 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1010 //
1011 // Check a valid sample number has been supplied
1012 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
1013 throw DataException("Error - Data::convertToNumArray: invalid sampleNo.");
1014 }
1015
1016 //
1017 // Check a valid data point number has been supplied
1018 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
1019 throw DataException("Error - Data::convertToNumArray: invalid dataPointNoInSample.");
1020 }
1021 // TODO: global error handling
1022 // create a view of the data if it is stored locally
1023 //DataArrayView dataPointView = getDataPoint(sampleNo, dataPointNoInSample);
1024 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
1025
1026 // pack the data from the view into tmpData for MPI communication
1027 pos=0;
1028 switch( dataPointRank ){
1029 case 0 :
1030 tmpData[0] = getDataAtOffset(offset);
1031 break;
1032 case 1 :
1033 for( i=0; i<dataPointShape[0]; i++ )
1034 tmpData[i]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i));
1035 break;
1036 case 2 :
1037 for( i=0; i<dataPointShape[0]; i++ )
1038 for( j=0; j<dataPointShape[1]; j++, pos++ )
1039 tmpData[pos]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j));
1040 break;
1041 case 3 :
1042 for( i=0; i<dataPointShape[0]; i++ )
1043 for( j=0; j<dataPointShape[1]; j++ )
1044 for( k=0; k<dataPointShape[2]; k++, pos++ )
1045 tmpData[pos]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k));
1046 break;
1047 case 4 :
1048 for( i=0; i<dataPointShape[0]; i++ )
1049 for( j=0; j<dataPointShape[1]; j++ )
1050 for( k=0; k<dataPointShape[2]; k++ )
1051 for( l=0; l<dataPointShape[3]; l++, pos++ )
1052 tmpData[pos]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k,l));
1053 break;
1054 }
1055 }
1056 }
1057 #ifdef PASO_MPI
1058 // broadcast the data to all other processes
1059 MPI_Bcast( tmpData, length, MPI_DOUBLE, procNo, get_MPIComm() );
1060 #endif
1061
1062 // unpack the data
1063 switch( dataPointRank ){
1064 case 0 :
1065 numArray[0]=tmpData[0];
1066 break;
1067 case 1 :
1068 for( i=0; i<dataPointShape[0]; i++ )
1069 numArray[i]=tmpData[i];
1070 break;
1071 case 2 :
1072 for( i=0; i<dataPointShape[0]; i++ )
1073 for( j=0; j<dataPointShape[1]; j++ )
1074 numArray[make_tuple(i,j)]=tmpData[i+j*dataPointShape[0]];
1075 break;
1076 case 3 :
1077 for( i=0; i<dataPointShape[0]; i++ )
1078 for( j=0; j<dataPointShape[1]; j++ )
1079 for( k=0; k<dataPointShape[2]; k++ )
1080 numArray[make_tuple(i,j,k)]=tmpData[i+dataPointShape[0]*(j*+k*dataPointShape[1])];
1081 break;
1082 case 4 :
1083 for( i=0; i<dataPointShape[0]; i++ )
1084 for( j=0; j<dataPointShape[1]; j++ )
1085 for( k=0; k<dataPointShape[2]; k++ )
1086 for( l=0; l<dataPointShape[3]; l++ )
1087 numArray[make_tuple(i,j,k,l)]=tmpData[i+dataPointShape[0]*(j*+dataPointShape[1]*(k+l*dataPointShape[2]))];
1088 break;
1089 }
1090
1091 delete [] tmpData;
1092 //
1093 // return the loaded array
1094 return numArray;
1095 }
1096
1097
1098 boost::python::numeric::array
1099 Data::integrate_const() const
1100 {
1101 if (isLazy())
1102 {
1103 throw DataException("Error - cannot integrate for constant lazy data.");
1104 }
1105 return integrateWorker();
1106 }
1107
1108 boost::python::numeric::array
1109 Data::integrate()
1110 {
1111 if (isLazy())
1112 {
1113 expand();
1114 }
1115 return integrateWorker();
1116 }
1117
1118
1119
1120 boost::python::numeric::array
1121 Data::integrateWorker() const
1122 {
1123 int index;
1124 int rank = getDataPointRank();
1125 DataTypes::ShapeType shape = getDataPointShape();
1126 int dataPointSize = getDataPointSize();
1127
1128 //
1129 // calculate the integral values
1130 vector<double> integrals(dataPointSize);
1131 vector<double> integrals_local(dataPointSize);
1132 const AbstractContinuousDomain* dom=dynamic_cast<const AbstractContinuousDomain*>(getDomain().get());
1133 if (dom==0)
1134 {
1135 throw DataException("Can not integrate over non-continuous domains.");
1136 }
1137 #ifdef PASO_MPI
1138 dom->setToIntegrals(integrals_local,*this);
1139 // Global sum: use an array instead of a vector because elements of array are guaranteed to be contiguous in memory
1140 double *tmp = new double[dataPointSize];
1141 double *tmp_local = new double[dataPointSize];
1142 for (int i=0; i<dataPointSize; i++) { tmp_local[i] = integrals_local[i]; }
1143 MPI_Allreduce( &tmp_local[0], &tmp[0], dataPointSize, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD );
1144 for (int i=0; i<dataPointSize; i++) { integrals[i] = tmp[i]; }
1145 delete[] tmp;
1146 delete[] tmp_local;
1147 #else
1148 dom->setToIntegrals(integrals,*this);
1149 #endif
1150
1151 //
1152 // create the numeric array to be returned
1153 // and load the array with the integral values
1154 boost::python::numeric::array bp_array(1.0);
1155 if (rank==0) {
1156 bp_array.resize(1);
1157 index = 0;
1158 bp_array[0] = integrals[index];
1159 }
1160 if (rank==1) {
1161 bp_array.resize(shape[0]);
1162 for (int i=0; i<shape[0]; i++) {
1163 index = i;
1164 bp_array[i] = integrals[index];
1165 }
1166 }
1167 if (rank==2) {
1168 bp_array.resize(shape[0],shape[1]);
1169 for (int i=0; i<shape[0]; i++) {
1170 for (int j=0; j<shape[1]; j++) {
1171 index = i + shape[0] * j;
1172 bp_array[make_tuple(i,j)] = integrals[index];
1173 }
1174 }
1175 }
1176 if (rank==3) {
1177 bp_array.resize(shape[0],shape[1],shape[2]);
1178 for (int i=0; i<shape[0]; i++) {
1179 for (int j=0; j<shape[1]; j++) {
1180 for (int k=0; k<shape[2]; k++) {
1181 index = i + shape[0] * ( j + shape[1] * k );
1182 bp_array[make_tuple(i,j,k)] = integrals[index];
1183 }
1184 }
1185 }
1186 }
1187 if (rank==4) {
1188 bp_array.resize(shape[0],shape[1],shape[2],shape[3]);
1189 for (int i=0; i<shape[0]; i++) {
1190 for (int j=0; j<shape[1]; j++) {
1191 for (int k=0; k<shape[2]; k++) {
1192 for (int l=0; l<shape[3]; l++) {
1193 index = i + shape[0] * ( j + shape[1] * ( k + shape[2] * l ) );
1194 bp_array[make_tuple(i,j,k,l)] = integrals[index];
1195 }
1196 }
1197 }
1198 }
1199 }
1200
1201 //
1202 // return the loaded array
1203 return bp_array;
1204 }
1205
1206 Data
1207 Data::sin() const
1208 {
1209 MAKELAZYOP(SIN)
1210 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sin);
1211 }
1212
1213 Data
1214 Data::cos() const
1215 {
1216 MAKELAZYOP(COS)
1217 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cos);
1218 }
1219
1220 Data
1221 Data::tan() const
1222 {
1223 MAKELAZYOP(TAN)
1224 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tan);
1225 }
1226
1227 Data
1228 Data::asin() const
1229 {
1230 MAKELAZYOP(ASIN)
1231 return C_TensorUnaryOperation<double (*)(double)>(*this, ::asin);
1232 }
1233
1234 Data
1235 Data::acos() const
1236 {
1237 MAKELAZYOP(ACOS)
1238 return C_TensorUnaryOperation<double (*)(double)>(*this, ::acos);
1239 }
1240
1241
1242 Data
1243 Data::atan() const
1244 {
1245 MAKELAZYOP(ATAN)
1246 return C_TensorUnaryOperation<double (*)(double)>(*this, ::atan);
1247 }
1248
1249 Data
1250 Data::sinh() const
1251 {
1252 MAKELAZYOP(SINH)
1253 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sinh);
1254 }
1255
1256 Data
1257 Data::cosh() const
1258 {
1259 MAKELAZYOP(COSH)
1260 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cosh);
1261 }
1262
1263 Data
1264 Data::tanh() const
1265 {
1266 MAKELAZYOP(TANH)
1267 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tanh);
1268 }
1269
1270
1271 Data
1272 Data::erf() const
1273 {
1274 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1275 throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");
1276 #else
1277 MAKELAZYOP(ERF)
1278 return C_TensorUnaryOperation(*this, ::erf);
1279 #endif
1280 }
1281
1282 Data
1283 Data::asinh() const
1284 {
1285 MAKELAZYOP(ASINH)
1286 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1287 return C_TensorUnaryOperation(*this, escript::asinh_substitute);
1288 #else
1289 return C_TensorUnaryOperation(*this, ::asinh);
1290 #endif
1291 }
1292
1293 Data
1294 Data::acosh() const
1295 {
1296 MAKELAZYOP(ACOSH)
1297 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1298 return C_TensorUnaryOperation(*this, escript::acosh_substitute);
1299 #else
1300 return C_TensorUnaryOperation(*this, ::acosh);
1301 #endif
1302 }
1303
1304 Data
1305 Data::atanh() const
1306 {
1307 MAKELAZYOP(ATANH)
1308 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1309 return C_TensorUnaryOperation(*this, escript::atanh_substitute);
1310 #else
1311 return C_TensorUnaryOperation(*this, ::atanh);
1312 #endif
1313 }
1314
1315 Data
1316 Data::log10() const
1317 {
1318 MAKELAZYOP(LOG10)
1319 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log10);
1320 }
1321
1322 Data
1323 Data::log() const
1324 {
1325 MAKELAZYOP(LOG)
1326 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log);
1327 }
1328
1329 Data
1330 Data::sign() const
1331 {
1332 MAKELAZYOP(SIGN)
1333 return C_TensorUnaryOperation(*this, escript::fsign);
1334 }
1335
1336 Data
1337 Data::abs() const
1338 {
1339 MAKELAZYOP(ABS)
1340 return C_TensorUnaryOperation<double (*)(double)>(*this, ::fabs);
1341 }
1342
1343 Data
1344 Data::neg() const
1345 {
1346 MAKELAZYOP(NEG)
1347 return C_TensorUnaryOperation(*this, negate<double>());
1348 }
1349
1350 Data
1351 Data::pos() const
1352 {
1353 // not doing lazy check here is deliberate.
1354 // since a deep copy of lazy data should be cheap, I'll just let it happen now
1355 Data result;
1356 // perform a deep copy
1357 result.copy(*this);
1358 return result;
1359 }
1360
1361 Data
1362 Data::exp() const
1363 {
1364 MAKELAZYOP(EXP)
1365 return C_TensorUnaryOperation<double (*)(double)>(*this, ::exp);
1366 }
1367
1368 Data
1369 Data::sqrt() const
1370 {
1371 MAKELAZYOP(SQRT)
1372 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sqrt);
1373 }
1374
1375 double
1376 Data::Lsup_const() const
1377 {
1378 if (isLazy())
1379 {
1380 throw DataException("Error - cannot compute Lsup for constant lazy data.");
1381 }
1382 return LsupWorker();
1383 }
1384
1385 double
1386 Data::Lsup()
1387 {
1388 if (isLazy())
1389 {
1390 resolve();
1391 }
1392 return LsupWorker();
1393 }
1394
1395 double
1396 Data::sup_const() const
1397 {
1398 if (isLazy())
1399 {
1400 throw DataException("Error - cannot compute sup for constant lazy data.");
1401 }
1402 return supWorker();
1403 }
1404
1405 double
1406 Data::sup()
1407 {
1408 if (isLazy())
1409 {
1410 resolve();
1411 }
1412 return supWorker();
1413 }
1414
1415 double
1416 Data::inf_const() const
1417 {
1418 if (isLazy())
1419 {
1420 throw DataException("Error - cannot compute inf for constant lazy data.");
1421 }
1422 return infWorker();
1423 }
1424
1425 double
1426 Data::inf()
1427 {
1428 if (isLazy())
1429 {
1430 resolve();
1431 }
1432 return infWorker();
1433 }
1434
1435 double
1436 Data::LsupWorker() const
1437 {
1438 double localValue;
1439 //
1440 // set the initial absolute maximum value to zero
1441
1442 AbsMax abs_max_func;
1443 localValue = algorithm(abs_max_func,0);
1444 #ifdef PASO_MPI
1445 double globalValue;
1446 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1447 return globalValue;
1448 #else
1449 return localValue;
1450 #endif
1451 }
1452
1453 double
1454 Data::supWorker() const
1455 {
1456 double localValue;
1457 //
1458 // set the initial maximum value to min possible double
1459 FMax fmax_func;
1460 localValue = algorithm(fmax_func,numeric_limits<double>::max()*-1);
1461 #ifdef PASO_MPI
1462 double globalValue;
1463 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1464 return globalValue;
1465 #else
1466 return localValue;
1467 #endif
1468 }
1469
1470 double
1471 Data::infWorker() const
1472 {
1473 double localValue;
1474 //
1475 // set the initial minimum value to max possible double
1476 FMin fmin_func;
1477 localValue = algorithm(fmin_func,numeric_limits<double>::max());
1478 #ifdef PASO_MPI
1479 double globalValue;
1480 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD );
1481 return globalValue;
1482 #else
1483 return localValue;
1484 #endif
1485 }
1486
1487 /* TODO */
1488 /* global reduction */
1489 Data
1490 Data::maxval() const
1491 {
1492 if (isLazy())
1493 {
1494 Data temp(*this); // to get around the fact that you can't resolve a const Data
1495 temp.resolve();
1496 return temp.maxval();
1497 }
1498 //
1499 // set the initial maximum value to min possible double
1500 FMax fmax_func;
1501 return dp_algorithm(fmax_func,numeric_limits<double>::max()*-1);
1502 }
1503
1504 Data
1505 Data::minval() const
1506 {
1507 if (isLazy())
1508 {
1509 Data temp(*this); // to get around the fact that you can't resolve a const Data
1510 temp.resolve();
1511 return temp.minval();
1512 }
1513 //
1514 // set the initial minimum value to max possible double
1515 FMin fmin_func;
1516 return dp_algorithm(fmin_func,numeric_limits<double>::max());
1517 }
1518
1519 Data
1520 Data::swapaxes(const int axis0, const int axis1) const
1521 {
1522 if (isLazy())
1523 {
1524 Data temp(*this);
1525 temp.resolve();
1526 return temp.swapaxes(axis0,axis1);
1527 }
1528 int axis0_tmp,axis1_tmp;
1529 DataTypes::ShapeType s=getDataPointShape();
1530 DataTypes::ShapeType ev_shape;
1531 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1532 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1533 int rank=getDataPointRank();
1534 if (rank<2) {
1535 throw DataException("Error - Data::swapaxes argument must have at least rank 2.");
1536 }
1537 if (axis0<0 || axis0>rank-1) {
1538 throw DataException("Error - Data::swapaxes: axis0 must be between 0 and rank-1=" + rank-1);
1539 }
1540 if (axis1<0 || axis1>rank-1) {
1541 throw DataException("Error - Data::swapaxes: axis1 must be between 0 and rank-1=" + rank-1);
1542 }
1543 if (axis0 == axis1) {
1544 throw DataException("Error - Data::swapaxes: axis indices must be different.");
1545 }
1546 if (axis0 > axis1) {
1547 axis0_tmp=axis1;
1548 axis1_tmp=axis0;
1549 } else {
1550 axis0_tmp=axis0;
1551 axis1_tmp=axis1;
1552 }
1553 for (int i=0; i<rank; i++) {
1554 if (i == axis0_tmp) {
1555 ev_shape.push_back(s[axis1_tmp]);
1556 } else if (i == axis1_tmp) {
1557 ev_shape.push_back(s[axis0_tmp]);
1558 } else {
1559 ev_shape.push_back(s[i]);
1560 }
1561 }
1562 Data ev(0.,ev_shape,getFunctionSpace());
1563 ev.typeMatchRight(*this);
1564 m_data->swapaxes(ev.m_data.get(), axis0_tmp, axis1_tmp);
1565 return ev;
1566
1567 }
1568
1569 Data
1570 Data::symmetric() const
1571 {
1572 // check input
1573 DataTypes::ShapeType s=getDataPointShape();
1574 if (getDataPointRank()==2) {
1575 if(s[0] != s[1])
1576 throw DataException("Error - Data::symmetric can only be calculated for rank 2 object with equal first and second dimension.");
1577 }
1578 else if (getDataPointRank()==4) {
1579 if(!(s[0] == s[2] && s[1] == s[3]))
1580 throw DataException("Error - Data::symmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1581 }
1582 else {
1583 throw DataException("Error - Data::symmetric can only be calculated for rank 2 or 4 object.");
1584 }
1585 MAKELAZYOP(SYM)
1586 Data ev(0.,getDataPointShape(),getFunctionSpace());
1587 ev.typeMatchRight(*this);
1588 m_data->symmetric(ev.m_data.get());
1589 return ev;
1590 }
1591
1592 Data
1593 Data::nonsymmetric() const
1594 {
1595 MAKELAZYOP(NSYM)
1596 // check input
1597 DataTypes::ShapeType s=getDataPointShape();
1598 if (getDataPointRank()==2) {
1599 if(s[0] != s[1])
1600 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 object with equal first and second dimension.");
1601 DataTypes::ShapeType ev_shape;
1602 ev_shape.push_back(s[0]);
1603 ev_shape.push_back(s[1]);
1604 Data ev(0.,ev_shape,getFunctionSpace());
1605 ev.typeMatchRight(*this);
1606 m_data->nonsymmetric(ev.m_data.get());
1607 return ev;
1608 }
1609 else if (getDataPointRank()==4) {
1610 if(!(s[0] == s[2] && s[1] == s[3]))
1611 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1612 DataTypes::ShapeType ev_shape;
1613 ev_shape.push_back(s[0]);
1614 ev_shape.push_back(s[1]);
1615 ev_shape.push_back(s[2]);
1616 ev_shape.push_back(s[3]);
1617 Data ev(0.,ev_shape,getFunctionSpace());
1618 ev.typeMatchRight(*this);
1619 m_data->nonsymmetric(ev.m_data.get());
1620 return ev;
1621 }
1622 else {
1623 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 or 4 object.");
1624 }
1625 }
1626
1627 Data
1628 Data::trace(int axis_offset) const
1629 {
1630 MAKELAZYOPOFF(TRACE,axis_offset)
1631 if ((axis_offset<0) || (axis_offset>getDataPointRank()))
1632 {
1633 throw DataException("Error - Data::trace, axis_offset must be between 0 and rank-2 inclusive.");
1634 }
1635 DataTypes::ShapeType s=getDataPointShape();
1636 if (getDataPointRank()==2) {
1637 DataTypes::ShapeType ev_shape;
1638 Data ev(0.,ev_shape,getFunctionSpace());
1639 ev.typeMatchRight(*this);
1640 m_data->trace(ev.m_data.get(), axis_offset);
1641 return ev;
1642 }
1643 if (getDataPointRank()==3) {
1644 DataTypes::ShapeType ev_shape;
1645 if (axis_offset==0) {
1646 int s2=s[2];
1647 ev_shape.push_back(s2);
1648 }
1649 else if (axis_offset==1) {
1650 int s0=s[0];
1651 ev_shape.push_back(s0);
1652 }
1653 Data ev(0.,ev_shape,getFunctionSpace());
1654 ev.typeMatchRight(*this);
1655 m_data->trace(ev.m_data.get(), axis_offset);
1656 return ev;
1657 }
1658 if (getDataPointRank()==4) {
1659 DataTypes::ShapeType ev_shape;
1660 if (axis_offset==0) {
1661 ev_shape.push_back(s[2]);
1662 ev_shape.push_back(s[3]);
1663 }
1664 else if (axis_offset==1) {
1665 ev_shape.push_back(s[0]);
1666 ev_shape.push_back(s[3]);
1667 }
1668 else if (axis_offset==2) {
1669 ev_shape.push_back(s[0]);
1670 ev_shape.push_back(s[1]);
1671 }
1672 Data ev(0.,ev_shape,getFunctionSpace());
1673 ev.typeMatchRight(*this);
1674 m_data->trace(ev.m_data.get(), axis_offset);
1675 return ev;
1676 }
1677 else {
1678 throw DataException("Error - Data::trace can only be calculated for rank 2, 3 or 4 object.");
1679 }
1680 }
1681
1682 Data
1683 Data::transpose(int axis_offset) const
1684 {
1685 MAKELAZYOPOFF(TRANS,axis_offset)
1686 DataTypes::ShapeType s=getDataPointShape();
1687 DataTypes::ShapeType ev_shape;
1688 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1689 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1690 int rank=getDataPointRank();
1691 if (axis_offset<0 || axis_offset>rank) {
1692 throw DataException("Error - Data::transpose must have 0 <= axis_offset <= rank=" + rank);
1693 }
1694 for (int i=0; i<rank; i++) {
1695 int index = (axis_offset+i)%rank;
1696 ev_shape.push_back(s[index]); // Append to new shape
1697 }
1698 Data ev(0.,ev_shape,getFunctionSpace());
1699 ev.typeMatchRight(*this);
1700 m_data->transpose(ev.m_data.get(), axis_offset);
1701 return ev;
1702 }
1703
1704 Data
1705 Data::eigenvalues() const
1706 {
1707 if (isLazy())
1708 {
1709 Data temp(*this); // to get around the fact that you can't resolve a const Data
1710 temp.resolve();
1711 return temp.eigenvalues();
1712 }
1713 // check input
1714 DataTypes::ShapeType s=getDataPointShape();
1715 if (getDataPointRank()!=2)
1716 throw DataException("Error - Data::eigenvalues can only be calculated for rank 2 object.");
1717 if(s[0] != s[1])
1718 throw DataException("Error - Data::eigenvalues can only be calculated for object with equal first and second dimension.");
1719 // create return
1720 DataTypes::ShapeType ev_shape(1,s[0]);
1721 Data ev(0.,ev_shape,getFunctionSpace());
1722 ev.typeMatchRight(*this);
1723 m_data->eigenvalues(ev.m_data.get());
1724 return ev;
1725 }
1726
1727 const boost::python::tuple
1728 Data::eigenvalues_and_eigenvectors(const double tol) const
1729 {
1730 if (isLazy())
1731 {
1732 Data temp(*this); // to get around the fact that you can't resolve a const Data
1733 temp.resolve();
1734 return temp.eigenvalues_and_eigenvectors(tol);
1735 }
1736 DataTypes::ShapeType s=getDataPointShape();
1737 if (getDataPointRank()!=2)
1738 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for rank 2 object.");
1739 if(s[0] != s[1])
1740 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for object with equal first and second dimension.");
1741 // create return
1742 DataTypes::ShapeType ev_shape(1,s[0]);
1743 Data ev(0.,ev_shape,getFunctionSpace());
1744 ev.typeMatchRight(*this);
1745 DataTypes::ShapeType V_shape(2,s[0]);
1746 Data V(0.,V_shape,getFunctionSpace());
1747 V.typeMatchRight(*this);
1748 m_data->eigenvalues_and_eigenvectors(ev.m_data.get(),V.m_data.get(),tol);
1749 return make_tuple(boost::python::object(ev),boost::python::object(V));
1750 }
1751
1752 const boost::python::tuple
1753 Data::minGlobalDataPoint() const
1754 {
1755 // NB: calc_minGlobalDataPoint( had to be split off from minGlobalDataPoint( as boost::make_tuple causes an
1756 // abort (for unknown reasons) if there are openmp directives with it in the
1757 // surrounding function
1758
1759 int DataPointNo;
1760 int ProcNo;
1761 calc_minGlobalDataPoint(ProcNo,DataPointNo);
1762 return make_tuple(ProcNo,DataPointNo);
1763 }
1764
1765 void
1766 Data::calc_minGlobalDataPoint(int& ProcNo,
1767 int& DataPointNo) const
1768 {
1769 if (isLazy())
1770 {
1771 Data temp(*this); // to get around the fact that you can't resolve a const Data
1772 temp.resolve();
1773 return temp.calc_minGlobalDataPoint(ProcNo,DataPointNo);
1774 }
1775 int i,j;
1776 int lowi=0,lowj=0;
1777 double min=numeric_limits<double>::max();
1778
1779 Data temp=minval();
1780
1781 int numSamples=temp.getNumSamples();
1782 int numDPPSample=temp.getNumDataPointsPerSample();
1783
1784 double next,local_min;
1785 int local_lowi=0,local_lowj=0;
1786
1787 #pragma omp parallel firstprivate(local_lowi,local_lowj) private(next,local_min)
1788 {
1789 local_min=min;
1790 #pragma omp for private(i,j) schedule(static)
1791 for (i=0; i<numSamples; i++) {
1792 for (j=0; j<numDPPSample; j++) {
1793 next=temp.getDataAtOffset(temp.getDataOffset(i,j));
1794 if (next<local_min) {
1795 local_min=next;
1796 local_lowi=i;
1797 local_lowj=j;
1798 }
1799 }
1800 }
1801 #pragma omp critical
1802 if (local_min<min) {
1803 min=local_min;
1804 lowi=local_lowi;
1805 lowj=local_lowj;
1806 }
1807 }
1808
1809 #ifdef PASO_MPI
1810 // determine the processor on which the minimum occurs
1811 next = temp.getDataPoint(lowi,lowj);
1812 int lowProc = 0;
1813 double *globalMins = new double[get_MPISize()+1];
1814 int error;
1815 error = MPI_Gather ( &next, 1, MPI_DOUBLE, globalMins, 1, MPI_DOUBLE, 0, get_MPIComm() );
1816
1817 if( get_MPIRank()==0 ){
1818 next = globalMins[lowProc];
1819 for( i=1; i<get_MPISize(); i++ )
1820 if( next>globalMins[i] ){
1821 lowProc = i;
1822 next = globalMins[i];
1823 }
1824 }
1825 MPI_Bcast( &lowProc, 1, MPI_DOUBLE, 0, get_MPIComm() );
1826
1827 delete [] globalMins;
1828 ProcNo = lowProc;
1829 #else
1830 ProcNo = 0;
1831 #endif
1832 DataPointNo = lowj + lowi * numDPPSample;
1833 }
1834
1835 void
1836 Data::saveDX(std::string fileName) const
1837 {
1838 if (isEmpty())
1839 {
1840 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1841 }
1842 if (isLazy())
1843 {
1844 Data temp(*this); // to get around the fact that you can't resolve a const Data
1845 temp.resolve();
1846 temp.saveDX(fileName);
1847 return;
1848 }
1849 boost::python::dict args;
1850 args["data"]=boost::python::object(this);
1851 getDomain()->saveDX(fileName,args);
1852 return;
1853 }
1854
1855 void
1856 Data::saveVTK(std::string fileName) const
1857 {
1858 if (isEmpty())
1859 {
1860 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1861 }
1862 if (isLazy())
1863 {
1864 Data temp(*this); // to get around the fact that you can't resolve a const Data
1865 temp.resolve();
1866 temp.saveVTK(fileName);
1867 return;
1868 }
1869 boost::python::dict args;
1870 args["data"]=boost::python::object(this);
1871 getDomain()->saveVTK(fileName,args);
1872 return;
1873 }
1874
1875 Data&
1876 Data::operator+=(const Data& right)
1877 {
1878 if (isProtected()) {
1879 throw DataException("Error - attempt to update protected Data object.");
1880 }
1881 MAKELAZYBINSELF(right,ADD) // for lazy + is equivalent to +=
1882 binaryOp(right,plus<double>());
1883 return (*this);
1884 }
1885
1886 Data&
1887 Data::operator+=(const boost::python::object& right)
1888 {
1889 if (isProtected()) {
1890 throw DataException("Error - attempt to update protected Data object.");
1891 }
1892 Data tmp(right,getFunctionSpace(),false);
1893 MAKELAZYBINSELF(tmp,ADD)
1894 binaryOp(tmp,plus<double>());
1895 return (*this);
1896 }
1897
1898 // Hmmm, operator= makes a deep copy but the copy constructor does not?
1899 Data&
1900 Data::operator=(const Data& other)
1901 {
1902 copy(other);
1903 return (*this);
1904 }
1905
1906 Data&
1907 Data::operator-=(const Data& right)
1908 {
1909 if (isProtected()) {
1910 throw DataException("Error - attempt to update protected Data object.");
1911 }
1912 MAKELAZYBINSELF(right,SUB)
1913 binaryOp(right,minus<double>());
1914 return (*this);
1915 }
1916
1917 Data&
1918 Data::operator-=(const boost::python::object& right)
1919 {
1920 if (isProtected()) {
1921 throw DataException("Error - attempt to update protected Data object.");
1922 }
1923 Data tmp(right,getFunctionSpace(),false);
1924 MAKELAZYBINSELF(tmp,SUB)
1925 binaryOp(tmp,minus<double>());
1926 return (*this);
1927 }
1928
1929 Data&
1930 Data::operator*=(const Data& right)
1931 {
1932 if (isProtected()) {
1933 throw DataException("Error - attempt to update protected Data object.");
1934 }
1935 MAKELAZYBINSELF(right,MUL)
1936 binaryOp(right,multiplies<double>());
1937 return (*this);
1938 }
1939
1940 Data&
1941 Data::operator*=(const boost::python::object& right)
1942 {
1943 if (isProtected()) {
1944 throw DataException("Error - attempt to update protected Data object.");
1945 }
1946 Data tmp(right,getFunctionSpace(),false);
1947 MAKELAZYBINSELF(tmp,MUL)
1948 binaryOp(tmp,multiplies<double>());
1949 return (*this);
1950 }
1951
1952 Data&
1953 Data::operator/=(const Data& right)
1954 {
1955 if (isProtected()) {
1956 throw DataException("Error - attempt to update protected Data object.");
1957 }
1958 MAKELAZYBINSELF(right,DIV)
1959 binaryOp(right,divides<double>());
1960 return (*this);
1961 }
1962
1963 Data&
1964 Data::operator/=(const boost::python::object& right)
1965 {
1966 if (isProtected()) {
1967 throw DataException("Error - attempt to update protected Data object.");
1968 }
1969 Data tmp(right,getFunctionSpace(),false);
1970 MAKELAZYBINSELF(tmp,DIV)
1971 binaryOp(tmp,divides<double>());
1972 return (*this);
1973 }
1974
1975 Data
1976 Data::rpowO(const boost::python::object& left) const
1977 {
1978 Data left_d(left,*this);
1979 return left_d.powD(*this);
1980 }
1981
1982 Data
1983 Data::powO(const boost::python::object& right) const
1984 {
1985 Data tmp(right,getFunctionSpace(),false);
1986 return powD(tmp);
1987 }
1988
1989 Data
1990 Data::powD(const Data& right) const
1991 {
1992 MAKELAZYBIN(right,POW)
1993 return C_TensorBinaryOperation<double (*)(double, double)>(*this, right, ::pow);
1994 }
1995
1996 //
1997 // NOTE: It is essential to specify the namespace this operator belongs to
1998 Data
1999 escript::operator+(const Data& left, const Data& right)
2000 {
2001 MAKELAZYBIN2(left,right,ADD)
2002 return C_TensorBinaryOperation(left, right, plus<double>());
2003 }
2004
2005 //
2006 // NOTE: It is essential to specify the namespace this operator belongs to
2007 Data
2008 escript::operator-(const Data& left, const Data& right)
2009 {
2010 MAKELAZYBIN2(left,right,SUB)
2011 return C_TensorBinaryOperation(left, right, minus<double>());
2012 }
2013
2014 //
2015 // NOTE: It is essential to specify the namespace this operator belongs to
2016 Data
2017 escript::operator*(const Data& left, const Data& right)
2018 {
2019 MAKELAZYBIN2(left,right,MUL)
2020 return C_TensorBinaryOperation(left, right, multiplies<double>());
2021 }
2022
2023 //
2024 // NOTE: It is essential to specify the namespace this operator belongs to
2025 Data
2026 escript::operator/(const Data& left, const Data& right)
2027 {
2028 MAKELAZYBIN2(left,right,DIV)
2029 return C_TensorBinaryOperation(left, right, divides<double>());
2030 }
2031
2032 //
2033 // NOTE: It is essential to specify the namespace this operator belongs to
2034 Data
2035 escript::operator+(const Data& left, const boost::python::object& right)
2036 {
2037 Data tmp(right,left.getFunctionSpace(),false);
2038 MAKELAZYBIN2(left,tmp,ADD)
2039 return left+tmp;
2040 }
2041
2042 //
2043 // NOTE: It is essential to specify the namespace this operator belongs to
2044 Data
2045 escript::operator-(const Data& left, const boost::python::object& right)
2046 {
2047 Data tmp(right,left.getFunctionSpace(),false);
2048 MAKELAZYBIN2(left,tmp,SUB)
2049 return left-tmp;
2050 }
2051
2052 //
2053 // NOTE: It is essential to specify the namespace this operator belongs to
2054 Data
2055 escript::operator*(const Data& left, const boost::python::object& right)
2056 {
2057 Data tmp(right,left.getFunctionSpace(),false);
2058 MAKELAZYBIN2(left,tmp,MUL)
2059 return left*tmp;
2060 }
2061
2062 //
2063 // NOTE: It is essential to specify the namespace this operator belongs to
2064 Data
2065 escript::operator/(const Data& left, const boost::python::object& right)
2066 {
2067 Data tmp(right,left.getFunctionSpace(),false);
2068 MAKELAZYBIN2(left,tmp,DIV)
2069 return left/tmp;
2070 }
2071
2072 //
2073 // NOTE: It is essential to specify the namespace this operator belongs to
2074 Data
2075 escript::operator+(const boost::python::object& left, const Data& right)
2076 {
2077 Data tmp(left,right.getFunctionSpace(),false);
2078 MAKELAZYBIN2(tmp,right,ADD)
2079 return tmp+right;
2080 }
2081
2082 //
2083 // NOTE: It is essential to specify the namespace this operator belongs to
2084 Data
2085 escript::operator-(const boost::python::object& left, const Data& right)
2086 {
2087 Data tmp(left,right.getFunctionSpace(),false);
2088 MAKELAZYBIN2(tmp,right,SUB)
2089 return tmp-right;
2090 }
2091
2092 //
2093 // NOTE: It is essential to specify the namespace this operator belongs to
2094 Data
2095 escript::operator*(const boost::python::object& left, const Data& right)
2096 {
2097 Data tmp(left,right.getFunctionSpace(),false);
2098 MAKELAZYBIN2(tmp,right,MUL)
2099 return tmp*right;
2100 }
2101
2102 //
2103 // NOTE: It is essential to specify the namespace this operator belongs to
2104 Data
2105 escript::operator/(const boost::python::object& left, const Data& right)
2106 {
2107 Data tmp(left,right.getFunctionSpace(),false);
2108 MAKELAZYBIN2(tmp,right,DIV)
2109 return tmp/right;
2110 }
2111
2112
2113 /* TODO */
2114 /* global reduction */
2115 Data
2116 Data::getItem(const boost::python::object& key) const
2117 {
2118
2119 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2120
2121 if (slice_region.size()!=getDataPointRank()) {
2122 throw DataException("Error - slice size does not match Data rank.");
2123 }
2124
2125 return getSlice(slice_region);
2126 }
2127
2128 /* TODO */
2129 /* global reduction */
2130 Data
2131 Data::getSlice(const DataTypes::RegionType& region) const
2132 {
2133 return Data(*this,region);
2134 }
2135
2136 /* TODO */
2137 /* global reduction */
2138 void
2139 Data::setItemO(const boost::python::object& key,
2140 const boost::python::object& value)
2141 {
2142 Data tempData(value,getFunctionSpace());
2143 setItemD(key,tempData);
2144 }
2145
2146 void
2147 Data::setItemD(const boost::python::object& key,
2148 const Data& value)
2149 {
2150 // const DataArrayView& view=getPointDataView();
2151
2152 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2153 if (slice_region.size()!=getDataPointRank()) {
2154 throw DataException("Error - slice size does not match Data rank.");
2155 }
2156 if (getFunctionSpace()!=value.getFunctionSpace()) {
2157 setSlice(Data(value,getFunctionSpace()),slice_region);
2158 } else {
2159 setSlice(value,slice_region);
2160 }
2161 }
2162
2163 void
2164 Data::setSlice(const Data& value,
2165 const DataTypes::RegionType& region)
2166 {
2167 if (isProtected()) {
2168 throw DataException("Error - attempt to update protected Data object.");
2169 }
2170 FORCERESOLVE;
2171 /* if (isLazy())
2172 {
2173 throw DataException("Error - setSlice not permitted on lazy data.");
2174 }*/
2175 Data tempValue(value);
2176 typeMatchLeft(tempValue);
2177 typeMatchRight(tempValue);
2178 getReady()->setSlice(tempValue.m_data.get(),region);
2179 }
2180
2181 void
2182 Data::typeMatchLeft(Data& right) const
2183 {
2184 if (right.isLazy() && !isLazy())
2185 {
2186 right.resolve();
2187 }
2188 if (isExpanded()){
2189 right.expand();
2190 } else if (isTagged()) {
2191 if (right.isConstant()) {
2192 right.tag();
2193 }
2194 }
2195 }
2196
2197 void
2198 Data::typeMatchRight(const Data& right)
2199 {
2200 if (isLazy() && !right.isLazy())
2201 {
2202 resolve();
2203 }
2204 if (isTagged()) {
2205 if (right.isExpanded()) {
2206 expand();
2207 }
2208 } else if (isConstant()) {
2209 if (right.isExpanded()) {
2210 expand();
2211 } else if (right.isTagged()) {
2212 tag();
2213 }
2214 }
2215 }
2216
2217 void
2218 Data::setTaggedValueByName(std::string name,
2219 const boost::python::object& value)
2220 {
2221 if (getFunctionSpace().getDomain()->isValidTagName(name)) {
2222 FORCERESOLVE;
2223 int tagKey=getFunctionSpace().getDomain()->getTag(name);
2224 setTaggedValue(tagKey,value);
2225 }
2226 }
2227 void
2228 Data::setTaggedValue(int tagKey,
2229 const boost::python::object& value)
2230 {
2231 if (isProtected()) {
2232 throw DataException("Error - attempt to update protected Data object.");
2233 }
2234 //
2235 // Ensure underlying data object is of type DataTagged
2236 FORCERESOLVE;
2237 if (isConstant()) tag();
2238 numeric::array asNumArray(value);
2239
2240 // extract the shape of the numarray
2241 DataTypes::ShapeType tempShape;
2242 for (int i=0; i < asNumArray.getrank(); i++) {
2243 tempShape.push_back(extract<int>(asNumArray.getshape()[i]));
2244 }
2245
2246 DataVector temp_data2;
2247 temp_data2.copyFromNumArray(asNumArray,1);
2248
2249 m_data->setTaggedValue(tagKey,tempShape, temp_data2);
2250 }
2251
2252
2253 void
2254 Data::setTaggedValueFromCPP(int tagKey,
2255 const DataTypes::ShapeType& pointshape,
2256 const DataTypes::ValueType& value,
2257 int dataOffset)
2258 {
2259 if (isProtected()) {
2260 throw DataException("Error - attempt to update protected Data object.");
2261 }
2262 //
2263 // Ensure underlying data object is of type DataTagged
2264 FORCERESOLVE;
2265 if (isConstant()) tag();
2266 //
2267 // Call DataAbstract::setTaggedValue
2268 m_data->setTaggedValue(tagKey,pointshape, value, dataOffset);
2269 }
2270
2271 int
2272 Data::getTagNumber(int dpno)
2273 {
2274 if (isEmpty())
2275 {
2276 throw DataException("Error - operation not permitted on instances of DataEmpty.");
2277 }
2278 return getFunctionSpace().getTagFromDataPointNo(dpno);
2279 }
2280
2281
2282 ostream& escript::operator<<(ostream& o, const Data& data)
2283 {
2284 o << data.toString();
2285 return o;
2286 }
2287
2288 Data
2289 escript::C_GeneralTensorProduct(Data& arg_0,
2290 Data& arg_1,
2291 int axis_offset,
2292 int transpose)
2293 {
2294 // General tensor product: res(SL x SR) = arg_0(SL x SM) * arg_1(SM x SR)
2295 // SM is the product of the last axis_offset entries in arg_0.getShape().
2296
2297 // deal with any lazy data
2298 // if (arg_0.isLazy()) {arg_0.resolve();}
2299 // if (arg_1.isLazy()) {arg_1.resolve();}
2300 if (arg_0.isLazy() || arg_1.isLazy() || (AUTOLAZYON && (arg_0.isExpanded() || arg_1.isExpanded())))
2301 {
2302 DataLazy* c=new DataLazy(arg_0.borrowDataPtr(), arg_1.borrowDataPtr(), PROD, axis_offset,transpose);
2303 return Data(c);
2304 }
2305
2306 // Interpolate if necessary and find an appropriate function space
2307 Data arg_0_Z, arg_1_Z;
2308 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2309 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2310 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2311 arg_1_Z = Data(arg_1);
2312 }
2313 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2314 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2315 arg_0_Z =Data(arg_0);
2316 }
2317 else {
2318 throw DataException("Error - C_GeneralTensorProduct: arguments have incompatible function spaces.");
2319 }
2320 } else {
2321 arg_0_Z = Data(arg_0);
2322 arg_1_Z = Data(arg_1);
2323 }
2324 // Get rank and shape of inputs
2325 int rank0 = arg_0_Z.getDataPointRank();
2326 int rank1 = arg_1_Z.getDataPointRank();
2327 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
2328 const DataTypes::ShapeType& shape1 = arg_1_Z.getDataPointShape();
2329
2330 // Prepare for the loops of the product and verify compatibility of shapes
2331 int start0=0, start1=0;
2332 if (transpose == 0) {}
2333 else if (transpose == 1) { start0 = axis_offset; }
2334 else if (transpose == 2) { start1 = rank1-axis_offset; }
2335 else { throw DataException("C_GeneralTensorProduct: Error - transpose should be 0, 1 or 2"); }
2336
2337
2338 // Adjust the shapes for transpose
2339 DataTypes::ShapeType tmpShape0(rank0); // pre-sizing the vectors rather
2340 DataTypes::ShapeType tmpShape1(rank1); // than using push_back
2341 for (int i=0; i<rank0; i++) { tmpShape0[i]=shape0[(i+start0)%rank0]; }
2342 for (int i=0; i<rank1; i++) { tmpShape1[i]=shape1[(i+start1)%rank1]; }
2343
2344 #if 0
2345 // For debugging: show shape after transpose
2346 char tmp[100];
2347 std::string shapeStr;
2348 shapeStr = "(";
2349 for (int i=0; i<rank0; i++) { sprintf(tmp, "%d,", tmpShape0[i]); shapeStr += tmp; }
2350 shapeStr += ")";
2351 cout << "C_GeneralTensorProduct: Shape of arg0 is " << shapeStr << endl;
2352 shapeStr = "(";
2353 for (int i=0; i<rank1; i++) { sprintf(tmp, "%d,", tmpShape1[i]); shapeStr += tmp; }
2354 shapeStr += ")";
2355 cout << "C_GeneralTensorProduct: Shape of arg1 is " << shapeStr << endl;
2356 #endif
2357
2358 // Prepare for the loops of the product
2359 int SL=1, SM=1, SR=1;
2360 for (int i=0; i<rank0-axis_offset; i++) {
2361 SL *= tmpShape0[i];
2362 }
2363 for (int i=rank0-axis_offset; i<rank0; i++) {
2364 if (tmpShape0[i] != tmpShape1[i-(rank0-axis_offset)]) {
2365 throw DataException("C_GeneralTensorProduct: Error - incompatible shapes");
2366 }
2367 SM *= tmpShape0[i];
2368 }
2369 for (int i=axis_offset; i<rank1; i++) {
2370 SR *= tmpShape1[i];
2371 }
2372
2373 // Define the shape of the output (rank of shape is the sum of the loop ranges below)
2374 DataTypes::ShapeType shape2(rank0+rank1-2*axis_offset);
2375 { // block to limit the scope of out_index
2376 int out_index=0;
2377 for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z
2378 for (int i=axis_offset; i<rank1; i++, ++out_index) { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z
2379 }
2380
2381 if (shape2.size()>ESCRIPT_MAX_DATA_RANK)
2382 {
2383 ostringstream os;
2384 os << "C_GeneralTensorProduct: Error - Attempt to create a rank " << shape2.size() << " object. The maximum rank is " << ESCRIPT_MAX_DATA_RANK << ".";
2385 throw DataException(os.str());
2386 }
2387
2388 // Declare output Data object
2389 Data res;
2390
2391 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2392 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataConstant output
2393 double *ptr_0 = &(arg_0_Z.getDataAtOffset(0));
2394 double *ptr_1 = &(arg_1_Z.getDataAtOffset(0));
2395 double *ptr_2 = &(res.getDataAtOffset(0));
2396 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2397 }
2398 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2399
2400 // Prepare the DataConstant input
2401 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2402 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2403
2404 // Borrow DataTagged input from Data object
2405 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2406 if (tmp_1==0) { throw DataException("GTP_1 Programming error - casting to DataTagged."); }
2407
2408 // Prepare a DataTagged output 2
2409 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataTagged output
2410 res.tag();
2411 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2412 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2413
2414 // Prepare offset into DataConstant
2415 int offset_0 = tmp_0->getPointOffset(0,0);
2416 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2417 // Get the views
2418 // DataArrayView view_1 = tmp_1->getDefaultValue();
2419 // DataArrayView view_2 = tmp_2->getDefaultValue();
2420 // // Get the pointers to the actual data
2421 // double *ptr_1 = &((view_1.getData())[0]);
2422 // double *ptr_2 = &((view_2.getData())[0]);
2423
2424 double *ptr_1 = &(tmp_1->getDefaultValue(0));
2425 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2426
2427
2428 // Compute an MVP for the default
2429 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2430 // Compute an MVP for each tag
2431 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2432 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2433 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2434 tmp_2->addTag(i->first);
2435 // DataArrayView view_1 = tmp_1->getDataPointByTag(i->first);
2436 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2437 // double *ptr_1 = &view_1.getData(0);
2438 // double *ptr_2 = &view_2.getData(0);
2439
2440 double *ptr_1 = &(tmp_1->getDataByTag(i->first,0));
2441 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2442
2443 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2444 }
2445
2446 }
2447 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2448
2449 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2450 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2451 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2452 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2453 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2454 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2455 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2456 int sampleNo_1,dataPointNo_1;
2457 int numSamples_1 = arg_1_Z.getNumSamples();
2458 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2459 int offset_0 = tmp_0->getPointOffset(0,0);
2460 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2461 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2462 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2463 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2464 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2465 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2466 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2467 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2468 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2469 }
2470 }
2471
2472 }
2473 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2474
2475 // Borrow DataTagged input from Data object
2476 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2477 if (tmp_0==0) { throw DataException("GTP_0 Programming error - casting to DataTagged."); }
2478
2479 // Prepare the DataConstant input
2480 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2481 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2482
2483 // Prepare a DataTagged output 2
2484 res = Data(0.0, shape2, arg_0_Z.getFunctionSpace()); // DataTagged output
2485 res.tag();
2486 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2487 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2488
2489 // Prepare offset into DataConstant
2490 int offset_1 = tmp_1->getPointOffset(0,0);
2491 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2492 // Get the views
2493 // DataArrayView view_0 = tmp_0->getDefaultValue();
2494 // DataArrayView view_2 = tmp_2->getDefaultValue();
2495 // // Get the pointers to the actual data
2496 // double *ptr_0 = &((view_0.getData())[0]);
2497 // double *ptr_2 = &((view_2.getData())[0]);
2498
2499 double *ptr_0 = &(tmp_0->getDefaultValue(0));
2500 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2501
2502 // Compute an MVP for the default
2503 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2504 // Compute an MVP for each tag
2505 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2506 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2507 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2508 // tmp_2->addTaggedValue(i->first,tmp_2->getDefaultValue());
2509 // DataArrayView view_0 = tmp_0->getDataPointByTag(i->first);
2510 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2511 // double *ptr_0 = &view_0.getData(0);
2512 // double *ptr_2 = &view_2.getData(0);
2513
2514 tmp_2->addTag(i->first);
2515 double *ptr_0 = &(tmp_0->getDataByTag(i->first,0));
2516 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2517 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2518 }
2519
2520 }
2521 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2522
2523 // Borrow DataTagged input from Data object
2524 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2525 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2526
2527 // Borrow DataTagged input from Data object
2528 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2529 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2530
2531 // Prepare a DataTagged output 2
2532 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace());
2533 res.tag(); // DataTagged output
2534 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2535 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2536
2537 // // Get the views
2538 // DataArrayView view_0 = tmp_0->getDefaultValue();
2539 // DataArrayView view_1 = tmp_1->getDefaultValue();
2540 // DataArrayView view_2 = tmp_2->getDefaultValue();
2541 // // Get the pointers to the actual data
2542 // double *ptr_0 = &((view_0.getData())[0]);
2543 // double *ptr_1 = &((view_1.getData())[0]);
2544 // double *ptr_2 = &((view_2.getData())[0]);
2545
2546 double *ptr_0 = &(tmp_0->getDefaultValue(0));
2547 double *ptr_1 = &(tmp_1->getDefaultValue(0));
2548 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2549
2550
2551 // Compute an MVP for the default
2552 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2553 // Merge the tags
2554 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2555 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2556 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2557 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2558 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2559 }
2560 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2561 tmp_2->addTag(i->first);
2562 }
2563 // Compute an MVP for each tag
2564 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2565 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2566 // DataArrayView view_0 = tmp_0->getDataPointByTag(i->first);
2567 // DataArrayView view_1 = tmp_1->getDataPointByTag(i->first);
2568 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2569 // double *ptr_0 = &view_0.getData(0);
2570 // double *ptr_1 = &view_1.getData(0);
2571 // double *ptr_2 = &view_2.getData(0);
2572
2573 double *ptr_0 = &(tmp_0->getDataByTag(i->first,0));
2574 double *ptr_1 = &(tmp_1->getDataByTag(i->first,0));
2575 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2576
2577 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2578 }
2579
2580 }
2581 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2582
2583 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2584 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2585 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2586 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2587 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2588 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2589 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2590 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2591 int sampleNo_0,dataPointNo_0;
2592 int numSamples_0 = arg_0_Z.getNumSamples();
2593 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2594 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2595 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2596 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2597 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2598 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2599 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2600 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2601 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2602 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2603 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2604 }
2605 }
2606
2607 }
2608 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2609
2610 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2611 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2612 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2613 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2614 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2615 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2616 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2617 int sampleNo_0,dataPointNo_0;
2618 int numSamples_0 = arg_0_Z.getNumSamples();
2619 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2620 int offset_1 = tmp_1->getPointOffset(0,0);
2621 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2622 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2623 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2624 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2625 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2626 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2627 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2628 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2629 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2630 }
2631 }
2632
2633
2634 }
2635 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2636
2637 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2638 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2639 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2640 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2641 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2642 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2643 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2644 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2645 int sampleNo_0,dataPointNo_0;
2646 int numSamples_0 = arg_0_Z.getNumSamples();
2647 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2648 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2649 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2650 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2651 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2652 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2653 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2654 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2655 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2656 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2657 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2658 }
2659 }
2660
2661 }
2662 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2663
2664 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2665 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2666 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2667 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2668 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2669 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2670 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2671 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2672 int sampleNo_0,dataPointNo_0;
2673 int numSamples_0 = arg_0_Z.getNumSamples();
2674 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2675 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2676 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2677 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2678 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2679 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2680 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2681 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2682 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2683 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2684 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2685 }
2686 }
2687
2688 }
2689 else {
2690 throw DataException("Error - C_GeneralTensorProduct: unknown combination of inputs");
2691 }
2692
2693 return res;
2694 }
2695
2696 DataAbstract*
2697 Data::borrowData() const
2698 {
2699 return m_data.get();
2700 }
2701
2702 // Not all that happy about returning a non-const from a const
2703 DataAbstract_ptr
2704 Data::borrowDataPtr() const
2705 {
2706 return m_data;
2707 }
2708
2709 // Not all that happy about returning a non-const from a const
2710 DataReady_ptr
2711 Data::borrowReadyPtr() const
2712 {
2713 DataReady_ptr dr=dynamic_pointer_cast<DataReady>(m_data);
2714 EsysAssert((dr!=0), "Error - casting to DataReady.");
2715 return dr;
2716 }
2717
2718 std::string
2719 Data::toString() const
2720 {
2721 if (!m_data->isEmpty() &&
2722 !m_data->isLazy() &&
2723 getLength()>escriptParams.getInt("TOO_MANY_LINES"))
2724 {
2725 stringstream temp;
2726 temp << "Summary: inf="<< inf_const() << " sup=" << sup_const() << " data points=" << getNumDataPoints();
2727 return temp.str();
2728 }
2729 return m_data->toString();
2730 }
2731
2732
2733
2734 DataTypes::ValueType::const_reference
2735 Data::getDataAtOffset(DataTypes::ValueType::size_type i) const
2736 {
2737 if (isLazy())
2738 {
2739 throw DataException("Programmer error - getDataAtOffset not permitted on lazy data (object is const which prevents resolving).");
2740 }
2741 return getReady()->getDataAtOffset(i);
2742 }
2743
2744
2745 DataTypes::ValueType::reference
2746 Data::getDataAtOffset(DataTypes::ValueType::size_type i)
2747 {
2748 // if (isLazy())
2749 // {
2750 // throw DataException("getDataAtOffset not permitted on lazy data.");
2751 // }
2752 FORCERESOLVE;
2753 return getReady()->getDataAtOffset(i);
2754 }
2755
2756 DataTypes::ValueType::const_reference
2757 Data::getDataPoint(int sampleNo, int dataPointNo) const
2758 {
2759 if (!isReady())
2760 {
2761 throw DataException("Programmer error - getDataPoint() not permitted on Lazy Data (object is const which prevents resolving).");
2762 }
2763 else
2764 {
2765 const DataReady* dr=getReady();
2766 return dr->getDataAtOffset(dr->getPointOffset(sampleNo, dataPointNo));
2767 }
2768 }
2769
2770
2771 DataTypes::ValueType::reference
2772 Data::getDataPoint(int sampleNo, int dataPointNo)
2773 {
2774 FORCERESOLVE;
2775 if (!isReady())
2776 {
2777 throw DataException("Programmer error - getDataPoint() not permitted on Lazy Data.");
2778 }
2779 else
2780 {
2781 DataReady* dr=getReady();
2782 return dr->getDataAtOffset(dr->getPointOffset(sampleNo, dataPointNo));
2783 }
2784 }
2785
2786
2787 /* Member functions specific to the MPI implementation */
2788
2789 void
2790 Data::print()
2791 {
2792 int i,j;
2793
2794 printf( "Data is %dX%d\n", getNumSamples(), getNumDataPointsPerSample() );
2795 for( i=0; i<getNumSamples(); i++ )
2796 {
2797 printf( "[%6d]", i );
2798 for( j=0; j<getNumDataPointsPerSample(); j++ )
2799 printf( "\t%10.7g", (getSampleData(i))[j] );
2800 printf( "\n" );
2801 }
2802 }
2803 void
2804 Data::dump(const std::string fileName) const
2805 {
2806 try
2807 {
2808 if (isLazy())
2809 {
2810 Data temp(*this); // this is to get a non-const object which we can resolve
2811 temp.resolve();
2812 temp.dump(fileName);
2813 }
2814 else
2815 {
2816 return m_data->dump(fileName);
2817 }
2818 }
2819 catch (std::exception& e)
2820 {
2821 cout << e.what() << endl;
2822 }
2823 }
2824
2825 int
2826 Data::get_MPISize() const
2827 {
2828 int size;
2829 #ifdef PASO_MPI
2830 int error;
2831 error = MPI_Comm_size( get_MPIComm(), &size );
2832 #else
2833 size = 1;
2834 #endif
2835 return size;
2836 }
2837
2838 int
2839 Data::get_MPIRank() const
2840 {
2841 int rank;
2842 #ifdef PASO_MPI
2843 int error;
2844 error = MPI_Comm_rank( get_MPIComm(), &rank );
2845 #else
2846 rank = 0;
2847 #endif
2848 return rank;
2849 }
2850
2851 MPI_Comm
2852 Data::get_MPIComm() const
2853 {
2854 #ifdef PASO_MPI
2855 return MPI_COMM_WORLD;
2856 #else
2857 return -1;
2858 #endif
2859 }
2860
2861

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26