/[escript]/branches/arrexp_2137_win_merge/escript/src/Data.cpp
ViewVC logotype

Contents of /branches/arrexp_2137_win_merge/escript/src/Data.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2225 - (show annotations)
Wed Jan 21 05:30:12 2009 UTC (13 years, 5 months ago) by jfenwick
File size: 75729 byte(s)
And again.

1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2008 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 #include "Data.h"
16
17 #include "DataExpanded.h"
18 #include "DataConstant.h"
19 #include "DataTagged.h"
20 #include "DataEmpty.h"
21 #include "DataLazy.h"
22 #include "FunctionSpaceFactory.h"
23 #include "AbstractContinuousDomain.h"
24 #include "UnaryFuncs.h"
25 #include "FunctionSpaceException.h"
26 #include "EscriptParams.h"
27
28 extern "C" {
29 #include "esysUtils/blocktimer.h"
30 }
31
32 #ifdef _OPENMP
33 #include <omp.h>
34 #endif
35 #include <fstream>
36 #include <algorithm>
37 #include <vector>
38 #include <functional>
39 #include <sstream> // so we can throw messages about ranks
40
41 #include <boost/python/dict.hpp>
42 #include <boost/python/extract.hpp>
43 #include <boost/python/long.hpp>
44 #include "WrappedArray.h"
45
46 using namespace std;
47 using namespace boost::python;
48 using namespace boost;
49 using namespace escript;
50
51 // ensure the current object is not a DataLazy
52 // The idea was that we could add an optional warning whenever a resolve is forced
53 #define FORCERESOLVE if (isLazy()) {resolve();}
54 #define AUTOLAZYON escriptParams.getAUTOLAZY()
55 #define MAKELAZYOP(X) if (isLazy() || (AUTOLAZYON && m_data->isExpanded())) \
56 {\
57 DataLazy* c=new DataLazy(borrowDataPtr(),X);\
58 return Data(c);\
59 }
60 #define MAKELAZYOPOFF(X,Y) if (isLazy() || (AUTOLAZYON && m_data->isExpanded())) \
61 {\
62 DataLazy* c=new DataLazy(borrowDataPtr(),X,Y);\
63 return Data(c);\
64 }
65
66 #define MAKELAZYBINSELF(R,X) if (isLazy() || R.isLazy() || (AUTOLAZYON && (isExpanded() || R.isExpanded()))) \
67 {\
68 DataLazy* c=new DataLazy(m_data,R.borrowDataPtr(),X);\
69 /* m_data=c->getPtr();*/ set_m_data(c->getPtr());\
70 return (*this);\
71 }
72
73 // like the above but returns a new data rather than *this
74 #define MAKELAZYBIN(R,X) if (isLazy() || R.isLazy() || (AUTOLAZYON && (isExpanded() || R.isExpanded()))) \
75 {\
76 DataLazy* c=new DataLazy(m_data,R.borrowDataPtr(),X);\
77 return Data(c);\
78 }
79
80 #define MAKELAZYBIN2(L,R,X) if (L.isLazy() || R.isLazy() || (AUTOLAZYON && (L.isExpanded() || R.isExpanded()))) \
81 {\
82 DataLazy* c=new DataLazy(L.borrowDataPtr(),R.borrowDataPtr(),X);\
83 return Data(c);\
84 }
85
86 // Do not use the following unless you want to make copies on assignment rather than
87 // share data. CopyOnWrite should make this unnescessary.
88 // #define ASSIGNMENT_MEANS_DEEPCOPY
89
90 namespace
91 {
92
93 // This should be safer once the DataC RO changes have been brought in
94 template <class ARR>
95 boost::python::tuple
96 pointToTuple( const DataTypes::ShapeType& shape,ARR v)
97 {
98 using namespace boost::python;
99 using boost::python::list;
100 int rank=shape.size();
101 if (rank==0)
102 {
103 return make_tuple(v[0]);
104 }
105 else if (rank==1)
106 {
107 list l;
108 for (size_t i=0;i<shape[0];++i)
109 {
110 l.append(v[i]);
111 }
112 return tuple(l);
113 }
114 else if (rank==2)
115 {
116 list lj;
117 for (size_t j=0;j<shape[0];++j)
118 {
119 list li;
120 for (size_t i=0;i<shape[1];++i)
121 {
122 li.append(v[DataTypes::getRelIndex(shape,j,i)]);
123 }
124 lj.append(tuple(li));
125 }
126 return tuple(lj);
127 }
128 else if (rank==3)
129 {
130 list lk;
131 for (size_t k=0;k<shape[0];++k)
132 {
133 list lj;
134 for (size_t j=0;j<shape[1];++j)
135 {
136 list li;
137 for (size_t i=0;i<shape[2];++i)
138 {
139 li.append(v[DataTypes::getRelIndex(shape,k,j,i)]);
140 }
141 lj.append(tuple(li));
142 }
143 lk.append(tuple(lj));
144 }
145 return tuple(lk);
146 }
147 else if (rank==4)
148 {
149 list ll;
150 for (size_t l=0;l<shape[0];++l)
151 {
152 list lk;
153 for (size_t k=0;k<shape[1];++k)
154 {
155 list lj;
156 for (size_t j=0;j<shape[2];++j)
157 {
158 list li;
159 for (size_t i=0;i<shape[3];++i)
160 {
161 li.append(v[DataTypes::getRelIndex(shape,l,k,j,i)]);
162 }
163 lj.append(tuple(li));
164 }
165 lk.append(tuple(lj));
166 }
167 ll.append(tuple(lk));
168 }
169 return tuple(ll);
170 }
171 else
172 throw DataException("Unknown rank in pointToTuple.");
173 }
174
175 } // anonymous namespace
176
177 Data::Data()
178 : m_shared(false), m_lazy(false)
179 {
180 //
181 // Default data is type DataEmpty
182 DataAbstract* temp=new DataEmpty();
183 // m_data=temp->getPtr();
184 set_m_data(temp->getPtr());
185 m_protected=false;
186 }
187
188 Data::Data(double value,
189 const tuple& shape,
190 const FunctionSpace& what,
191 bool expanded)
192 : m_shared(false), m_lazy(false)
193 {
194 DataTypes::ShapeType dataPointShape;
195 for (int i = 0; i < shape.attr("__len__")(); ++i) {
196 dataPointShape.push_back(extract<const int>(shape[i]));
197 }
198
199 int len = DataTypes::noValues(dataPointShape);
200 DataVector temp_data(len,value,len);
201 initialise(temp_data, dataPointShape, what, expanded);
202 m_protected=false;
203 }
204
205 Data::Data(double value,
206 const DataTypes::ShapeType& dataPointShape,
207 const FunctionSpace& what,
208 bool expanded)
209 : m_shared(false), m_lazy(false)
210 {
211 int len = DataTypes::noValues(dataPointShape);
212
213 DataVector temp_data(len,value,len);
214 // DataArrayView temp_dataView(temp_data, dataPointShape);
215
216 // initialise(temp_dataView, what, expanded);
217 initialise(temp_data, dataPointShape, what, expanded);
218
219 m_protected=false;
220 }
221
222 Data::Data(const Data& inData)
223 : m_shared(false), m_lazy(false)
224 {
225 // m_data=inData.m_data;
226 set_m_data(inData.m_data);
227 m_protected=inData.isProtected();
228 }
229
230
231 Data::Data(const Data& inData,
232 const DataTypes::RegionType& region)
233 : m_shared(false), m_lazy(false)
234 {
235 DataAbstract_ptr dat=inData.m_data;
236 if (inData.isLazy())
237 {
238 dat=inData.m_data->resolve();
239 }
240 else
241 {
242 dat=inData.m_data;
243 }
244 //
245 // Create Data which is a slice of another Data
246 DataAbstract* tmp = dat->getSlice(region);
247 // m_data=DataAbstract_ptr(tmp);
248 set_m_data(DataAbstract_ptr(tmp));
249 m_protected=false;
250
251 }
252
253 Data::Data(const Data& inData,
254 const FunctionSpace& functionspace)
255 : m_shared(false), m_lazy(false)
256 {
257 if (inData.isEmpty())
258 {
259 throw DataException("Error - will not interpolate for instances of DataEmpty.");
260 }
261 if (inData.getFunctionSpace()==functionspace) {
262 // m_data=inData.m_data;
263 set_m_data(inData.m_data);
264 }
265 else
266 {
267
268 if (inData.isConstant()) { // for a constant function, we just need to use the new function space
269 if (!inData.probeInterpolation(functionspace))
270 { // Even though this is constant, we still need to check whether interpolation is allowed
271 throw FunctionSpaceException("Cannot interpolate across to the domain of the specified FunctionSpace. (DataConstant)");
272 }
273 // if the data is not lazy, this will just be a cast to DataReady
274 DataReady_ptr dr=inData.m_data->resolve();
275 DataConstant* dc=new DataConstant(functionspace,inData.m_data->getShape(),dr->getVectorRO());
276 // m_data=DataAbstract_ptr(dc);
277 set_m_data(DataAbstract_ptr(dc));
278 } else {
279 Data tmp(0,inData.getDataPointShape(),functionspace,true);
280 // Note: Must use a reference or pointer to a derived object
281 // in order to get polymorphic behaviour. Shouldn't really
282 // be able to create an instance of AbstractDomain but that was done
283 // as a boost:python work around which may no longer be required.
284 /*const AbstractDomain& inDataDomain=inData.getDomain();*/
285 const_Domain_ptr inDataDomain=inData.getDomain();
286 if (inDataDomain==functionspace.getDomain()) {
287 inDataDomain->interpolateOnDomain(tmp,inData);
288 } else {
289 inDataDomain->interpolateACross(tmp,inData);
290 }
291 // m_data=tmp.m_data;
292 set_m_data(tmp.m_data);
293 }
294 }
295 m_protected=false;
296 }
297
298 Data::Data(DataAbstract* underlyingdata)
299 : m_shared(false), m_lazy(false)
300 {
301 // m_data=shared_ptr<DataAbstract>(underlyingdata);
302 m_data=underlyingdata->getPtr();
303 m_protected=false;
304 }
305
306 Data::Data(DataAbstract_ptr underlyingdata)
307 : m_shared(false), m_lazy(false)
308 {
309 // m_data=underlyingdata;
310 set_m_data(underlyingdata);
311 m_protected=false;
312 }
313
314 Data::Data(const DataTypes::ValueType& value,
315 const DataTypes::ShapeType& shape,
316 const FunctionSpace& what,
317 bool expanded)
318 : m_shared(false), m_lazy(false)
319 {
320 initialise(value,shape,what,expanded);
321 m_protected=false;
322 }
323
324
325 Data::Data(const object& value,
326 const FunctionSpace& what,
327 bool expanded)
328 : m_shared(false), m_lazy(false)
329 {
330 //numeric::array asNumArray(value);
331 WrappedArray w(value);
332 initialise(w,what,expanded);
333 m_protected=false;
334 }
335
336
337 Data::Data(const object& value,
338 const Data& other)
339 : m_shared(false), m_lazy(false)
340 {
341 WrappedArray w(value);
342
343 // extract the shape of the numarray
344 const DataTypes::ShapeType& tempShape=w.getShape();
345 if (w.getRank()==0) {
346
347
348 // get the space for the data vector
349 int len1 = DataTypes::noValues(tempShape);
350 DataVector temp_data(len1, 0.0, len1);
351 temp_data.copyFromArray(w,1);
352
353 int len = DataTypes::noValues(other.getDataPointShape());
354
355 DataVector temp2_data(len, temp_data[0], len);
356 DataConstant* t=new DataConstant(other.getFunctionSpace(),other.getDataPointShape(),temp2_data);
357 // m_data=DataAbstract_ptr(t);
358 set_m_data(DataAbstract_ptr(t));
359
360 } else {
361 //
362 // Create a DataConstant with the same sample shape as other
363 DataConstant* t=new DataConstant(w,other.getFunctionSpace());
364 // m_data=DataAbstract_ptr(t);
365 set_m_data(DataAbstract_ptr(t));
366 }
367 m_protected=false;
368 }
369
370 Data::~Data()
371 {
372 set_m_data(DataAbstract_ptr());
373 }
374
375
376 // only call in thread safe contexts.
377 // This method should be atomic
378 void Data::set_m_data(DataAbstract_ptr p)
379 {
380 if (m_data.get()!=0) // release old ownership
381 {
382 m_data->removeOwner(this);
383 }
384 m_shared=false;
385 if (p.get()!=0)
386 {
387 m_data=p;
388 m_data->addOwner(this);
389 m_lazy=m_data->isLazy();
390 }
391 }
392
393 void Data::initialise(const WrappedArray& value,
394 const FunctionSpace& what,
395 bool expanded)
396 {
397 //
398 // Construct a Data object of the appropriate type.
399 // Construct the object first as there seems to be a bug which causes
400 // undefined behaviour if an exception is thrown during construction
401 // within the shared_ptr constructor.
402 if (expanded) {
403 DataAbstract* temp=new DataExpanded(value, what);
404 // m_data=temp->getPtr();
405 set_m_data(temp->getPtr());
406 } else {
407 DataAbstract* temp=new DataConstant(value, what);
408 // m_data=temp->getPtr();
409 set_m_data(temp->getPtr());
410 }
411 }
412
413
414 void
415 Data::initialise(const DataTypes::ValueType& value,
416 const DataTypes::ShapeType& shape,
417 const FunctionSpace& what,
418 bool expanded)
419 {
420 //
421 // Construct a Data object of the appropriate type.
422 // Construct the object first as there seems to be a bug which causes
423 // undefined behaviour if an exception is thrown during construction
424 // within the shared_ptr constructor.
425 if (expanded) {
426 DataAbstract* temp=new DataExpanded(what, shape, value);
427 // m_data=temp->getPtr();
428 set_m_data(temp->getPtr());
429 } else {
430 DataAbstract* temp=new DataConstant(what, shape, value);
431 // m_data=temp->getPtr();
432 set_m_data(temp->getPtr());
433 }
434 }
435
436
437 escriptDataC
438 Data::getDataC()
439 {
440 escriptDataC temp;
441 temp.m_dataPtr=(void*)this;
442 return temp;
443 }
444
445 escriptDataC
446 Data::getDataC() const
447 {
448 escriptDataC temp;
449 temp.m_dataPtr=(void*)this;
450 return temp;
451 }
452
453 size_t
454 Data::getSampleBufferSize() const
455 {
456 return m_data->getSampleBufferSize();
457 }
458
459
460 const boost::python::tuple
461 Data::getShapeTuple() const
462 {
463 const DataTypes::ShapeType& shape=getDataPointShape();
464 switch(getDataPointRank()) {
465 case 0:
466 return make_tuple();
467 case 1:
468 return make_tuple(long_(shape[0]));
469 case 2:
470 return make_tuple(long_(shape[0]),long_(shape[1]));
471 case 3:
472 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]));
473 case 4:
474 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]),long_(shape[3]));
475 default:
476 throw DataException("Error - illegal Data rank.");
477 }
478 }
479
480
481 // The different name is needed because boost has trouble with overloaded functions.
482 // It can't work out what type the function is based soley on its name.
483 // There are ways to fix this involving creating function pointer variables for each form
484 // but there doesn't seem to be a need given that the methods have the same name from the python point of view
485 Data
486 Data::copySelf()
487 {
488 DataAbstract* temp=m_data->deepCopy();
489 return Data(temp);
490 }
491
492 void
493 Data::copy(const Data& other)
494 {
495 DataAbstract* temp=other.m_data->deepCopy();
496 DataAbstract_ptr p=temp->getPtr();
497 // m_data=p;
498 set_m_data(p);
499 }
500
501
502 Data
503 Data::delay()
504 {
505 DataLazy* dl=new DataLazy(m_data);
506 return Data(dl);
507 }
508
509 void
510 Data::delaySelf()
511 {
512 if (!isLazy())
513 {
514 // m_data=(new DataLazy(m_data))->getPtr();
515 set_m_data((new DataLazy(m_data))->getPtr());
516 }
517 }
518
519 void
520 Data::setToZero()
521 {
522 if (isEmpty())
523 {
524 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
525 }
526 exclusiveWrite();
527 m_data->setToZero();
528 }
529
530 void
531 Data::copyWithMask(const Data& other,
532 const Data& mask)
533 {
534 // 1. Interpolate if required so all Datas use the same FS as this
535 // 2. Tag or Expand so that all Data's are the same type
536 // 3. Iterate over the data vectors copying values where mask is >0
537 if (other.isEmpty() || mask.isEmpty())
538 {
539 throw DataException("Error - copyWithMask not permitted using instances of DataEmpty.");
540 }
541 Data other2(other);
542 Data mask2(mask);
543 other2.resolve();
544 mask2.resolve();
545 this->resolve();
546 FunctionSpace myFS=getFunctionSpace();
547 FunctionSpace oFS=other2.getFunctionSpace();
548 FunctionSpace mFS=mask2.getFunctionSpace();
549 if (oFS!=myFS)
550 {
551 if (other2.probeInterpolation(myFS))
552 {
553 other2=other2.interpolate(myFS);
554 }
555 else
556 {
557 throw DataException("Error - copyWithMask: other FunctionSpace is not compatible with this one.");
558 }
559 }
560 if (mFS!=myFS)
561 {
562 if (mask2.probeInterpolation(myFS))
563 {
564 mask2=mask2.interpolate(myFS);
565 }
566 else
567 {
568 throw DataException("Error - copyWithMask: mask FunctionSpace is not compatible with this one.");
569 }
570 }
571 // Ensure that all args have the same type
572 if (this->isExpanded() || mask2.isExpanded() || other2.isExpanded())
573 {
574 this->expand();
575 other2.expand();
576 mask2.expand();
577 }
578 else if (this->isTagged() || mask2.isTagged() || other2.isTagged())
579 {
580 this->tag();
581 other2.tag();
582 mask2.tag();
583 }
584 else if (this->isConstant() && mask2.isConstant() && other2.isConstant())
585 {
586 }
587 else
588 {
589 throw DataException("Error - Unknown DataAbstract passed to copyWithMask.");
590 }
591 exclusiveWrite();
592 // Now we iterate over the elements
593 DataVector& self=getReady()->getVectorRW();
594 const DataVector& ovec=other2.getReady()->getVectorRO();
595 const DataVector& mvec=mask2.getReady()->getVectorRO();
596 if ((self.size()!=ovec.size()) || (self.size()!=mvec.size()))
597 {
598 throw DataException("Error - size mismatch in arguments to copyWithMask.");
599 }
600 size_t num_points=self.size();
601
602 // OPENMP 3.0 allows unsigned loop vars.
603 #if defined(_OPENMP) && (_OPENMP < 200805)
604 long i;
605 #else
606 size_t i;
607 #endif
608 #pragma omp parallel for private(i) schedule(static)
609 for (i=0;i<num_points;++i)
610 {
611 if (mvec[i]>0)
612 {
613 self[i]=ovec[i];
614 }
615 }
616 }
617
618
619
620 bool
621 Data::isExpanded() const
622 {
623 DataExpanded* temp=dynamic_cast<DataExpanded*>(m_data.get());
624 return (temp!=0);
625 }
626
627 bool
628 Data::actsExpanded() const
629 {
630 return m_data->actsExpanded();
631
632 }
633
634
635 bool
636 Data::isTagged() const
637 {
638 DataTagged* temp=dynamic_cast<DataTagged*>(m_data.get());
639 return (temp!=0);
640 }
641
642 bool
643 Data::isEmpty() const
644 {
645 DataEmpty* temp=dynamic_cast<DataEmpty*>(m_data.get());
646 return (temp!=0);
647 }
648
649 bool
650 Data::isConstant() const
651 {
652 DataConstant* temp=dynamic_cast<DataConstant*>(m_data.get());
653 return (temp!=0);
654 }
655
656 bool
657 Data::isLazy() const
658 {
659 return m_data->isLazy();
660 }
661
662 // at the moment this is synonymous with !isLazy() but that could change
663 bool
664 Data::isReady() const
665 {
666 return (dynamic_cast<DataReady*>(m_data.get())!=0);
667 }
668
669
670 void
671 Data::setProtection()
672 {
673 m_protected=true;
674 }
675
676 bool
677 Data::isProtected() const
678 {
679 return m_protected;
680 }
681
682
683
684 void
685 Data::expand()
686 {
687 if (isConstant()) {
688 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
689 DataAbstract* temp=new DataExpanded(*tempDataConst);
690 // m_data=temp->getPtr();
691 set_m_data(temp->getPtr());
692 } else if (isTagged()) {
693 DataTagged* tempDataTag=dynamic_cast<DataTagged*>(m_data.get());
694 DataAbstract* temp=new DataExpanded(*tempDataTag);
695 // m_data=temp->getPtr();
696 set_m_data(temp->getPtr());
697 } else if (isExpanded()) {
698 //
699 // do nothing
700 } else if (isEmpty()) {
701 throw DataException("Error - Expansion of DataEmpty not possible.");
702 } else if (isLazy()) {
703 resolve();
704 expand(); // resolve might not give us expanded data
705 } else {
706 throw DataException("Error - Expansion not implemented for this Data type.");
707 }
708 }
709
710 void
711 Data::tag()
712 {
713 if (isConstant()) {
714 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
715 DataAbstract* temp=new DataTagged(*tempDataConst);
716 // m_data=temp->getPtr();
717 set_m_data(temp->getPtr());
718 } else if (isTagged()) {
719 // do nothing
720 } else if (isExpanded()) {
721 throw DataException("Error - Creating tag data from DataExpanded not possible.");
722 } else if (isEmpty()) {
723 throw DataException("Error - Creating tag data from DataEmpty not possible.");
724 } else if (isLazy()) {
725 DataAbstract_ptr res=m_data->resolve();
726 if (m_data->isExpanded())
727 {
728 throw DataException("Error - data would resolve to DataExpanded, tagging is not possible.");
729 }
730 // m_data=res;
731 set_m_data(res);
732 tag();
733 } else {
734 throw DataException("Error - Tagging not implemented for this Data type.");
735 }
736 }
737
738 void
739 Data::resolve()
740 {
741 if (isLazy())
742 {
743 // m_data=m_data->resolve();
744 set_m_data(m_data->resolve());
745 }
746 }
747
748 void
749 Data::requireWrite()
750 {
751 #ifdef _OPENMP
752 if (omp_in_parallel()) // Yes this is throwing an exception inside a parallel region which is forbidden
753 { // However, programs which do this are unsafe and need to be fixed
754 throw DataException("Programming error. Please do not run requireWrite() in multi-threaded sections.");
755 }
756 #endif
757 resolve();
758 exclusiveWrite();
759 }
760
761 Data
762 Data::oneOver() const
763 {
764 MAKELAZYOP(RECIP)
765 return C_TensorUnaryOperation(*this, bind1st(divides<double>(),1.));
766 }
767
768 Data
769 Data::wherePositive() const
770 {
771 MAKELAZYOP(GZ)
772 return C_TensorUnaryOperation(*this, bind2nd(greater<double>(),0.0));
773 }
774
775 Data
776 Data::whereNegative() const
777 {
778 MAKELAZYOP(LZ)
779 return C_TensorUnaryOperation(*this, bind2nd(less<double>(),0.0));
780 }
781
782 Data
783 Data::whereNonNegative() const
784 {
785 MAKELAZYOP(GEZ)
786 return C_TensorUnaryOperation(*this, bind2nd(greater_equal<double>(),0.0));
787 }
788
789 Data
790 Data::whereNonPositive() const
791 {
792 MAKELAZYOP(LEZ)
793 return C_TensorUnaryOperation(*this, bind2nd(less_equal<double>(),0.0));
794 }
795
796 Data
797 Data::whereZero(double tol) const
798 {
799 // Data dataAbs=abs();
800 // return C_TensorUnaryOperation(dataAbs, bind2nd(less_equal<double>(),tol));
801 MAKELAZYOPOFF(EZ,tol)
802 return C_TensorUnaryOperation(*this, bind2nd(AbsLTE(),tol));
803
804 }
805
806 Data
807 Data::whereNonZero(double tol) const
808 {
809 // Data dataAbs=abs();
810 // return C_TensorUnaryOperation(dataAbs, bind2nd(greater<double>(),tol));
811 MAKELAZYOPOFF(NEZ,tol)
812 return C_TensorUnaryOperation(*this, bind2nd(AbsGT(),tol));
813
814 }
815
816 Data
817 Data::interpolate(const FunctionSpace& functionspace) const
818 {
819 return Data(*this,functionspace);
820 }
821
822 bool
823 Data::probeInterpolation(const FunctionSpace& functionspace) const
824 {
825 return getFunctionSpace().probeInterpolation(functionspace);
826 }
827
828 Data
829 Data::gradOn(const FunctionSpace& functionspace) const
830 {
831 if (isEmpty())
832 {
833 throw DataException("Error - operation not permitted on instances of DataEmpty.");
834 }
835 double blocktimer_start = blocktimer_time();
836 if (functionspace.getDomain()!=getDomain())
837 throw DataException("Error - gradient cannot be calculated on different domains.");
838 DataTypes::ShapeType grad_shape=getDataPointShape();
839 grad_shape.push_back(functionspace.getDim());
840 Data out(0.0,grad_shape,functionspace,true);
841 getDomain()->setToGradient(out,*this);
842 blocktimer_increment("grad()", blocktimer_start);
843 return out;
844 }
845
846 Data
847 Data::grad() const
848 {
849 if (isEmpty())
850 {
851 throw DataException("Error - operation not permitted on instances of DataEmpty.");
852 }
853 return gradOn(escript::function(*getDomain()));
854 }
855
856 int
857 Data::getDataPointSize() const
858 {
859 return m_data->getNoValues();
860 }
861
862 DataTypes::ValueType::size_type
863 Data::getLength() const
864 {
865 return m_data->getLength();
866 }
867
868 const
869 boost::python::numeric :: array
870 Data:: getValueOfDataPoint(int dataPointNo)
871 {
872 return boost::python::numeric::array(getValueOfDataPointAsTuple(dataPointNo));
873 }
874
875 const boost::python::object
876 Data::getValueOfDataPointAsTuple(int dataPointNo)
877 {
878 FORCERESOLVE
879 if (getNumDataPointsPerSample()>0) {
880 int sampleNo = dataPointNo/getNumDataPointsPerSample();
881 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
882 //
883 // Check a valid sample number has been supplied
884 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
885 throw DataException("Error - Data::getValueOfDataPointAsTuple: invalid sampleNo.");
886 }
887
888 //
889 // Check a valid data point number has been supplied
890 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
891 throw DataException("Error - Data::getValueOfDataPointAsTuple: invalid dataPointNoInSample.");
892 }
893 // TODO: global error handling
894 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
895 return pointToTuple(getDataPointShape(),&(getDataAtOffsetRO(offset)));
896 }
897 else
898 {
899 // The old method would return an empty numArray of the given shape
900 // I'm going to throw an exception because if we have zero points per sample we have problems
901 throw DataException("Error - need at least 1 datapoint per sample.");
902 }
903
904 }
905
906
907 void
908 Data::setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object)
909 {
910 // this will throw if the value cannot be represented
911 setValueOfDataPointToArray(dataPointNo,py_object);
912 }
913
914 void
915 Data::setValueOfDataPointToArray(int dataPointNo, const boost::python::object& obj)
916 {
917 if (isProtected()) {
918 throw DataException("Error - attempt to update protected Data object.");
919 }
920 FORCERESOLVE;
921
922 WrappedArray w(obj);
923 //
924 // check rank
925 if (static_cast<unsigned int>(w.getRank())<getDataPointRank())
926 throw DataException("Rank of numarray does not match Data object rank");
927
928 //
929 // check shape of num_array
930 for (unsigned int i=0; i<getDataPointRank(); i++) {
931 if (w.getShape()[i]!=getDataPointShape()[i])
932 throw DataException("Shape of numarray does not match Data object rank");
933 }
934 //
935 // make sure data is expanded:
936 //
937 if (!isExpanded()) {
938 expand();
939 }
940 if (getNumDataPointsPerSample()>0) {
941 int sampleNo = dataPointNo/getNumDataPointsPerSample();
942 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
943 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,w);
944 } else {
945 m_data->copyToDataPoint(-1, 0,w);
946 }
947 }
948
949 void
950 Data::setValueOfDataPoint(int dataPointNo, const double value)
951 {
952 if (isProtected()) {
953 throw DataException("Error - attempt to update protected Data object.");
954 }
955 //
956 // make sure data is expanded:
957 FORCERESOLVE;
958 if (!isExpanded()) {
959 expand();
960 }
961 if (getNumDataPointsPerSample()>0) {
962 int sampleNo = dataPointNo/getNumDataPointsPerSample();
963 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
964 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,value);
965 } else {
966 m_data->copyToDataPoint(-1, 0,value);
967 }
968 }
969
970 const
971 boost::python::numeric::array
972 Data::getValueOfGlobalDataPoint(int procNo, int dataPointNo)
973 {
974 return boost::python::numeric::array(getValueOfGlobalDataPointAsTuple(procNo, dataPointNo));
975 }
976
977 const
978 boost::python::object
979 Data::getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo)
980 {
981 // This could be lazier than it is now
982 FORCERESOLVE;
983
984 // copy datapoint into a buffer
985 // broadcast buffer to all nodes
986 // convert buffer to tuple
987 // return tuple
988
989 const DataTypes::ShapeType& dataPointShape = getDataPointShape();
990 size_t length=DataTypes::noValues(dataPointShape);
991
992 // added for the MPI communication
993 double *tmpData = new double[length];
994
995 // updated for the MPI case
996 if( get_MPIRank()==procNo ){
997 if (getNumDataPointsPerSample()>0) {
998 int sampleNo = dataPointNo/getNumDataPointsPerSample();
999 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1000 //
1001 // Check a valid sample number has been supplied
1002 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
1003 throw DataException("Error - Data::convertToNumArray: invalid sampleNo.");
1004 }
1005
1006 //
1007 // Check a valid data point number has been supplied
1008 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
1009 throw DataException("Error - Data::convertToNumArray: invalid dataPointNoInSample.");
1010 }
1011 // TODO: global error handling
1012 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
1013
1014 memcpy(tmpData,&(getDataAtOffsetRO(offset)),length*sizeof(double));
1015 }
1016 }
1017 #ifdef PASO_MPI
1018 // broadcast the data to all other processes
1019 MPI_Bcast( tmpData, length, MPI_DOUBLE, procNo, get_MPIComm() );
1020 #endif
1021
1022 boost::python::tuple t=pointToTuple(dataPointShape,tmpData);
1023 delete [] tmpData;
1024 //
1025 // return the loaded array
1026 return t;
1027
1028 }
1029
1030
1031 boost::python::object
1032 Data::integrateToTuple_const() const
1033 {
1034 if (isLazy())
1035 {
1036 throw DataException("Error - cannot integrate for constant lazy data.");
1037 }
1038 return integrateWorker();
1039 }
1040
1041 boost::python::object
1042 Data::integrateToTuple()
1043 {
1044 if (isLazy())
1045 {
1046 expand();
1047 }
1048 return integrateWorker();
1049
1050 }
1051
1052
1053 boost::python::object
1054 Data::integrate_const() const
1055 {
1056 if (isLazy())
1057 {
1058 throw DataException("Error - cannot integrate for constant lazy data.");
1059 }
1060 return boost::python::numeric::array(integrateWorker());
1061 }
1062
1063 boost::python::object
1064 Data::integrate()
1065 {
1066 if (isLazy())
1067 {
1068 expand();
1069 }
1070 return boost::python::numeric::array(integrateWorker());
1071 }
1072
1073
1074
1075 boost::python::object
1076 Data::integrateWorker() const
1077 {
1078 DataTypes::ShapeType shape = getDataPointShape();
1079 int dataPointSize = getDataPointSize();
1080
1081 //
1082 // calculate the integral values
1083 vector<double> integrals(dataPointSize);
1084 vector<double> integrals_local(dataPointSize);
1085 const AbstractContinuousDomain* dom=dynamic_cast<const AbstractContinuousDomain*>(getDomain().get());
1086 if (dom==0)
1087 {
1088 throw DataException("Can not integrate over non-continuous domains.");
1089 }
1090 #ifdef PASO_MPI
1091 dom->setToIntegrals(integrals_local,*this);
1092 // Global sum: use an array instead of a vector because elements of array are guaranteed to be contiguous in memory
1093 double *tmp = new double[dataPointSize];
1094 double *tmp_local = new double[dataPointSize];
1095 for (int i=0; i<dataPointSize; i++) { tmp_local[i] = integrals_local[i]; }
1096 MPI_Allreduce( &tmp_local[0], &tmp[0], dataPointSize, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD );
1097 for (int i=0; i<dataPointSize; i++) { integrals[i] = tmp[i]; }
1098 tuple result=pointToTuple(shape,tmp);
1099 delete[] tmp;
1100 delete[] tmp_local;
1101 #else
1102 dom->setToIntegrals(integrals,*this);
1103 /* double *tmp = new double[dataPointSize];
1104 for (int i=0; i<dataPointSize; i++) { tmp[i]=integrals[i]; }*/
1105 tuple result=pointToTuple(shape,integrals);
1106 // delete tmp;
1107 #endif
1108
1109
1110 return result;
1111 }
1112
1113 Data
1114 Data::sin() const
1115 {
1116 MAKELAZYOP(SIN)
1117 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sin);
1118 }
1119
1120 Data
1121 Data::cos() const
1122 {
1123 MAKELAZYOP(COS)
1124 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cos);
1125 }
1126
1127 Data
1128 Data::tan() const
1129 {
1130 MAKELAZYOP(TAN)
1131 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tan);
1132 }
1133
1134 Data
1135 Data::asin() const
1136 {
1137 MAKELAZYOP(ASIN)
1138 return C_TensorUnaryOperation<double (*)(double)>(*this, ::asin);
1139 }
1140
1141 Data
1142 Data::acos() const
1143 {
1144 MAKELAZYOP(ACOS)
1145 return C_TensorUnaryOperation<double (*)(double)>(*this, ::acos);
1146 }
1147
1148
1149 Data
1150 Data::atan() const
1151 {
1152 MAKELAZYOP(ATAN)
1153 return C_TensorUnaryOperation<double (*)(double)>(*this, ::atan);
1154 }
1155
1156 Data
1157 Data::sinh() const
1158 {
1159 MAKELAZYOP(SINH)
1160 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sinh);
1161 }
1162
1163 Data
1164 Data::cosh() const
1165 {
1166 MAKELAZYOP(COSH)
1167 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cosh);
1168 }
1169
1170 Data
1171 Data::tanh() const
1172 {
1173 MAKELAZYOP(TANH)
1174 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tanh);
1175 }
1176
1177
1178 Data
1179 Data::erf() const
1180 {
1181 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1182 throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");
1183 #else
1184 MAKELAZYOP(ERF)
1185 return C_TensorUnaryOperation(*this, ::erf);
1186 #endif
1187 }
1188
1189 Data
1190 Data::asinh() const
1191 {
1192 MAKELAZYOP(ASINH)
1193 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1194 return C_TensorUnaryOperation(*this, escript::asinh_substitute);
1195 #else
1196 return C_TensorUnaryOperation(*this, ::asinh);
1197 #endif
1198 }
1199
1200 Data
1201 Data::acosh() const
1202 {
1203 MAKELAZYOP(ACOSH)
1204 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1205 return C_TensorUnaryOperation(*this, escript::acosh_substitute);
1206 #else
1207 return C_TensorUnaryOperation(*this, ::acosh);
1208 #endif
1209 }
1210
1211 Data
1212 Data::atanh() const
1213 {
1214 MAKELAZYOP(ATANH)
1215 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1216 return C_TensorUnaryOperation(*this, escript::atanh_substitute);
1217 #else
1218 return C_TensorUnaryOperation(*this, ::atanh);
1219 #endif
1220 }
1221
1222 Data
1223 Data::log10() const
1224 {
1225 MAKELAZYOP(LOG10)
1226 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log10);
1227 }
1228
1229 Data
1230 Data::log() const
1231 {
1232 MAKELAZYOP(LOG)
1233 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log);
1234 }
1235
1236 Data
1237 Data::sign() const
1238 {
1239 MAKELAZYOP(SIGN)
1240 return C_TensorUnaryOperation(*this, escript::fsign);
1241 }
1242
1243 Data
1244 Data::abs() const
1245 {
1246 MAKELAZYOP(ABS)
1247 return C_TensorUnaryOperation<double (*)(double)>(*this, ::fabs);
1248 }
1249
1250 Data
1251 Data::neg() const
1252 {
1253 MAKELAZYOP(NEG)
1254 return C_TensorUnaryOperation(*this, negate<double>());
1255 }
1256
1257 Data
1258 Data::pos() const
1259 {
1260 // not doing lazy check here is deliberate.
1261 // since a deep copy of lazy data should be cheap, I'll just let it happen now
1262 Data result;
1263 // perform a deep copy
1264 result.copy(*this);
1265 return result;
1266 }
1267
1268 Data
1269 Data::exp() const
1270 {
1271 MAKELAZYOP(EXP)
1272 return C_TensorUnaryOperation<double (*)(double)>(*this, ::exp);
1273 }
1274
1275 Data
1276 Data::sqrt() const
1277 {
1278 MAKELAZYOP(SQRT)
1279 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sqrt);
1280 }
1281
1282 double
1283 Data::Lsup_const() const
1284 {
1285 if (isLazy())
1286 {
1287 throw DataException("Error - cannot compute Lsup for constant lazy data.");
1288 }
1289 return LsupWorker();
1290 }
1291
1292 double
1293 Data::Lsup()
1294 {
1295 if (isLazy())
1296 {
1297 resolve();
1298 }
1299 return LsupWorker();
1300 }
1301
1302 double
1303 Data::sup_const() const
1304 {
1305 if (isLazy())
1306 {
1307 throw DataException("Error - cannot compute sup for constant lazy data.");
1308 }
1309 return supWorker();
1310 }
1311
1312 double
1313 Data::sup()
1314 {
1315 if (isLazy())
1316 {
1317 resolve();
1318 }
1319 return supWorker();
1320 }
1321
1322 double
1323 Data::inf_const() const
1324 {
1325 if (isLazy())
1326 {
1327 throw DataException("Error - cannot compute inf for constant lazy data.");
1328 }
1329 return infWorker();
1330 }
1331
1332 double
1333 Data::inf()
1334 {
1335 if (isLazy())
1336 {
1337 resolve();
1338 }
1339 return infWorker();
1340 }
1341
1342 double
1343 Data::LsupWorker() const
1344 {
1345 double localValue;
1346 //
1347 // set the initial absolute maximum value to zero
1348
1349 AbsMax abs_max_func;
1350 localValue = algorithm(abs_max_func,0);
1351 #ifdef PASO_MPI
1352 double globalValue;
1353 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1354 return globalValue;
1355 #else
1356 return localValue;
1357 #endif
1358 }
1359
1360 double
1361 Data::supWorker() const
1362 {
1363 double localValue;
1364 //
1365 // set the initial maximum value to min possible double
1366 FMax fmax_func;
1367 localValue = algorithm(fmax_func,numeric_limits<double>::max()*-1);
1368 #ifdef PASO_MPI
1369 double globalValue;
1370 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1371 return globalValue;
1372 #else
1373 return localValue;
1374 #endif
1375 }
1376
1377 double
1378 Data::infWorker() const
1379 {
1380 double localValue;
1381 //
1382 // set the initial minimum value to max possible double
1383 FMin fmin_func;
1384 localValue = algorithm(fmin_func,numeric_limits<double>::max());
1385 #ifdef PASO_MPI
1386 double globalValue;
1387 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD );
1388 return globalValue;
1389 #else
1390 return localValue;
1391 #endif
1392 }
1393
1394 /* TODO */
1395 /* global reduction */
1396 Data
1397 Data::maxval() const
1398 {
1399 if (isLazy())
1400 {
1401 Data temp(*this); // to get around the fact that you can't resolve a const Data
1402 temp.resolve();
1403 return temp.maxval();
1404 }
1405 //
1406 // set the initial maximum value to min possible double
1407 FMax fmax_func;
1408 return dp_algorithm(fmax_func,numeric_limits<double>::max()*-1);
1409 }
1410
1411 Data
1412 Data::minval() const
1413 {
1414 if (isLazy())
1415 {
1416 Data temp(*this); // to get around the fact that you can't resolve a const Data
1417 temp.resolve();
1418 return temp.minval();
1419 }
1420 //
1421 // set the initial minimum value to max possible double
1422 FMin fmin_func;
1423 return dp_algorithm(fmin_func,numeric_limits<double>::max());
1424 }
1425
1426 Data
1427 Data::swapaxes(const int axis0, const int axis1) const
1428 {
1429 if (isLazy())
1430 {
1431 Data temp(*this);
1432 temp.resolve();
1433 return temp.swapaxes(axis0,axis1);
1434 }
1435 int axis0_tmp,axis1_tmp;
1436 DataTypes::ShapeType s=getDataPointShape();
1437 DataTypes::ShapeType ev_shape;
1438 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1439 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1440 int rank=getDataPointRank();
1441 if (rank<2) {
1442 throw DataException("Error - Data::swapaxes argument must have at least rank 2.");
1443 }
1444 if (axis0<0 || axis0>rank-1) {
1445 throw DataException("Error - Data::swapaxes: axis0 must be between 0 and rank-1=" + rank-1);
1446 }
1447 if (axis1<0 || axis1>rank-1) {
1448 throw DataException("Error - Data::swapaxes: axis1 must be between 0 and rank-1=" + rank-1);
1449 }
1450 if (axis0 == axis1) {
1451 throw DataException("Error - Data::swapaxes: axis indices must be different.");
1452 }
1453 if (axis0 > axis1) {
1454 axis0_tmp=axis1;
1455 axis1_tmp=axis0;
1456 } else {
1457 axis0_tmp=axis0;
1458 axis1_tmp=axis1;
1459 }
1460 for (int i=0; i<rank; i++) {
1461 if (i == axis0_tmp) {
1462 ev_shape.push_back(s[axis1_tmp]);
1463 } else if (i == axis1_tmp) {
1464 ev_shape.push_back(s[axis0_tmp]);
1465 } else {
1466 ev_shape.push_back(s[i]);
1467 }
1468 }
1469 Data ev(0.,ev_shape,getFunctionSpace());
1470 ev.typeMatchRight(*this);
1471 m_data->swapaxes(ev.m_data.get(), axis0_tmp, axis1_tmp);
1472 return ev;
1473
1474 }
1475
1476 Data
1477 Data::symmetric() const
1478 {
1479 // check input
1480 DataTypes::ShapeType s=getDataPointShape();
1481 if (getDataPointRank()==2) {
1482 if(s[0] != s[1])
1483 throw DataException("Error - Data::symmetric can only be calculated for rank 2 object with equal first and second dimension.");
1484 }
1485 else if (getDataPointRank()==4) {
1486 if(!(s[0] == s[2] && s[1] == s[3]))
1487 throw DataException("Error - Data::symmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1488 }
1489 else {
1490 throw DataException("Error - Data::symmetric can only be calculated for rank 2 or 4 object.");
1491 }
1492 MAKELAZYOP(SYM)
1493 Data ev(0.,getDataPointShape(),getFunctionSpace());
1494 ev.typeMatchRight(*this);
1495 m_data->symmetric(ev.m_data.get());
1496 return ev;
1497 }
1498
1499 Data
1500 Data::nonsymmetric() const
1501 {
1502 MAKELAZYOP(NSYM)
1503 // check input
1504 DataTypes::ShapeType s=getDataPointShape();
1505 if (getDataPointRank()==2) {
1506 if(s[0] != s[1])
1507 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 object with equal first and second dimension.");
1508 DataTypes::ShapeType ev_shape;
1509 ev_shape.push_back(s[0]);
1510 ev_shape.push_back(s[1]);
1511 Data ev(0.,ev_shape,getFunctionSpace());
1512 ev.typeMatchRight(*this);
1513 m_data->nonsymmetric(ev.m_data.get());
1514 return ev;
1515 }
1516 else if (getDataPointRank()==4) {
1517 if(!(s[0] == s[2] && s[1] == s[3]))
1518 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1519 DataTypes::ShapeType ev_shape;
1520 ev_shape.push_back(s[0]);
1521 ev_shape.push_back(s[1]);
1522 ev_shape.push_back(s[2]);
1523 ev_shape.push_back(s[3]);
1524 Data ev(0.,ev_shape,getFunctionSpace());
1525 ev.typeMatchRight(*this);
1526 m_data->nonsymmetric(ev.m_data.get());
1527 return ev;
1528 }
1529 else {
1530 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 or 4 object.");
1531 }
1532 }
1533
1534 Data
1535 Data::trace(int axis_offset) const
1536 {
1537 MAKELAZYOPOFF(TRACE,axis_offset)
1538 if ((axis_offset<0) || (axis_offset>getDataPointRank()))
1539 {
1540 throw DataException("Error - Data::trace, axis_offset must be between 0 and rank-2 inclusive.");
1541 }
1542 DataTypes::ShapeType s=getDataPointShape();
1543 if (getDataPointRank()==2) {
1544 DataTypes::ShapeType ev_shape;
1545 Data ev(0.,ev_shape,getFunctionSpace());
1546 ev.typeMatchRight(*this);
1547 m_data->trace(ev.m_data.get(), axis_offset);
1548 return ev;
1549 }
1550 if (getDataPointRank()==3) {
1551 DataTypes::ShapeType ev_shape;
1552 if (axis_offset==0) {
1553 int s2=s[2];
1554 ev_shape.push_back(s2);
1555 }
1556 else if (axis_offset==1) {
1557 int s0=s[0];
1558 ev_shape.push_back(s0);
1559 }
1560 Data ev(0.,ev_shape,getFunctionSpace());
1561 ev.typeMatchRight(*this);
1562 m_data->trace(ev.m_data.get(), axis_offset);
1563 return ev;
1564 }
1565 if (getDataPointRank()==4) {
1566 DataTypes::ShapeType ev_shape;
1567 if (axis_offset==0) {
1568 ev_shape.push_back(s[2]);
1569 ev_shape.push_back(s[3]);
1570 }
1571 else if (axis_offset==1) {
1572 ev_shape.push_back(s[0]);
1573 ev_shape.push_back(s[3]);
1574 }
1575 else if (axis_offset==2) {
1576 ev_shape.push_back(s[0]);
1577 ev_shape.push_back(s[1]);
1578 }
1579 Data ev(0.,ev_shape,getFunctionSpace());
1580 ev.typeMatchRight(*this);
1581 m_data->trace(ev.m_data.get(), axis_offset);
1582 return ev;
1583 }
1584 else {
1585 throw DataException("Error - Data::trace can only be calculated for rank 2, 3 or 4 object.");
1586 }
1587 }
1588
1589 Data
1590 Data::transpose(int axis_offset) const
1591 {
1592 MAKELAZYOPOFF(TRANS,axis_offset)
1593 DataTypes::ShapeType s=getDataPointShape();
1594 DataTypes::ShapeType ev_shape;
1595 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1596 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1597 int rank=getDataPointRank();
1598 if (axis_offset<0 || axis_offset>rank) {
1599 throw DataException("Error - Data::transpose must have 0 <= axis_offset <= rank=" + rank);
1600 }
1601 for (int i=0; i<rank; i++) {
1602 int index = (axis_offset+i)%rank;
1603 ev_shape.push_back(s[index]); // Append to new shape
1604 }
1605 Data ev(0.,ev_shape,getFunctionSpace());
1606 ev.typeMatchRight(*this);
1607 m_data->transpose(ev.m_data.get(), axis_offset);
1608 return ev;
1609 }
1610
1611 Data
1612 Data::eigenvalues() const
1613 {
1614 if (isLazy())
1615 {
1616 Data temp(*this); // to get around the fact that you can't resolve a const Data
1617 temp.resolve();
1618 return temp.eigenvalues();
1619 }
1620 // check input
1621 DataTypes::ShapeType s=getDataPointShape();
1622 if (getDataPointRank()!=2)
1623 throw DataException("Error - Data::eigenvalues can only be calculated for rank 2 object.");
1624 if(s[0] != s[1])
1625 throw DataException("Error - Data::eigenvalues can only be calculated for object with equal first and second dimension.");
1626 // create return
1627 DataTypes::ShapeType ev_shape(1,s[0]);
1628 Data ev(0.,ev_shape,getFunctionSpace());
1629 ev.typeMatchRight(*this);
1630 m_data->eigenvalues(ev.m_data.get());
1631 return ev;
1632 }
1633
1634 const boost::python::tuple
1635 Data::eigenvalues_and_eigenvectors(const double tol) const
1636 {
1637 if (isLazy())
1638 {
1639 Data temp(*this); // to get around the fact that you can't resolve a const Data
1640 temp.resolve();
1641 return temp.eigenvalues_and_eigenvectors(tol);
1642 }
1643 DataTypes::ShapeType s=getDataPointShape();
1644 if (getDataPointRank()!=2)
1645 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for rank 2 object.");
1646 if(s[0] != s[1])
1647 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for object with equal first and second dimension.");
1648 // create return
1649 DataTypes::ShapeType ev_shape(1,s[0]);
1650 Data ev(0.,ev_shape,getFunctionSpace());
1651 ev.typeMatchRight(*this);
1652 DataTypes::ShapeType V_shape(2,s[0]);
1653 Data V(0.,V_shape,getFunctionSpace());
1654 V.typeMatchRight(*this);
1655 m_data->eigenvalues_and_eigenvectors(ev.m_data.get(),V.m_data.get(),tol);
1656 return make_tuple(boost::python::object(ev),boost::python::object(V));
1657 }
1658
1659 const boost::python::tuple
1660 Data::minGlobalDataPoint() const
1661 {
1662 // NB: calc_minGlobalDataPoint( had to be split off from minGlobalDataPoint( as boost::make_tuple causes an
1663 // abort (for unknown reasons) if there are openmp directives with it in the
1664 // surrounding function
1665
1666 int DataPointNo;
1667 int ProcNo;
1668 calc_minGlobalDataPoint(ProcNo,DataPointNo);
1669 return make_tuple(ProcNo,DataPointNo);
1670 }
1671
1672 void
1673 Data::calc_minGlobalDataPoint(int& ProcNo,
1674 int& DataPointNo) const
1675 {
1676 if (isLazy())
1677 {
1678 Data temp(*this); // to get around the fact that you can't resolve a const Data
1679 temp.resolve();
1680 return temp.calc_minGlobalDataPoint(ProcNo,DataPointNo);
1681 }
1682 int i,j;
1683 int lowi=0,lowj=0;
1684 double min=numeric_limits<double>::max();
1685
1686 Data temp=minval();
1687
1688 int numSamples=temp.getNumSamples();
1689 int numDPPSample=temp.getNumDataPointsPerSample();
1690
1691 double next,local_min;
1692 int local_lowi=0,local_lowj=0;
1693
1694 #pragma omp parallel firstprivate(local_lowi,local_lowj) private(next,local_min)
1695 {
1696 local_min=min;
1697 #pragma omp for private(i,j) schedule(static)
1698 for (i=0; i<numSamples; i++) {
1699 for (j=0; j<numDPPSample; j++) {
1700 next=temp.getDataAtOffsetRO(temp.getDataOffset(i,j));
1701 if (next<local_min) {
1702 local_min=next;
1703 local_lowi=i;
1704 local_lowj=j;
1705 }
1706 }
1707 }
1708 #pragma omp critical
1709 if (local_min<min) {
1710 min=local_min;
1711 lowi=local_lowi;
1712 lowj=local_lowj;
1713 }
1714 }
1715
1716 #ifdef PASO_MPI
1717 // determine the processor on which the minimum occurs
1718 next = temp.getDataPointRO(lowi,lowj);
1719 int lowProc = 0;
1720 double *globalMins = new double[get_MPISize()+1];
1721 int error;
1722 error = MPI_Gather ( &next, 1, MPI_DOUBLE, globalMins, 1, MPI_DOUBLE, 0, get_MPIComm() );
1723
1724 if( get_MPIRank()==0 ){
1725 next = globalMins[lowProc];
1726 for( i=1; i<get_MPISize(); i++ )
1727 if( next>globalMins[i] ){
1728 lowProc = i;
1729 next = globalMins[i];
1730 }
1731 }
1732 MPI_Bcast( &lowProc, 1, MPI_DOUBLE, 0, get_MPIComm() );
1733
1734 delete [] globalMins;
1735 ProcNo = lowProc;
1736 #else
1737 ProcNo = 0;
1738 #endif
1739 DataPointNo = lowj + lowi * numDPPSample;
1740 }
1741
1742 void
1743 Data::saveDX(std::string fileName) const
1744 {
1745 if (isEmpty())
1746 {
1747 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1748 }
1749 if (isLazy())
1750 {
1751 Data temp(*this); // to get around the fact that you can't resolve a const Data
1752 temp.resolve();
1753 temp.saveDX(fileName);
1754 return;
1755 }
1756 boost::python::dict args;
1757 args["data"]=boost::python::object(this);
1758 getDomain()->saveDX(fileName,args);
1759 return;
1760 }
1761
1762 void
1763 Data::saveVTK(std::string fileName) const
1764 {
1765 if (isEmpty())
1766 {
1767 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1768 }
1769 if (isLazy())
1770 {
1771 Data temp(*this); // to get around the fact that you can't resolve a const Data
1772 temp.resolve();
1773 temp.saveVTK(fileName);
1774 return;
1775 }
1776 boost::python::dict args;
1777 args["data"]=boost::python::object(this);
1778 getDomain()->saveVTK(fileName,args);
1779 return;
1780 }
1781
1782 Data&
1783 Data::operator+=(const Data& right)
1784 {
1785 if (isProtected()) {
1786 throw DataException("Error - attempt to update protected Data object.");
1787 }
1788 MAKELAZYBINSELF(right,ADD) // for lazy + is equivalent to +=
1789 exclusiveWrite(); // Since Lazy data does not modify its leaves we only need to worry here
1790 binaryOp(right,plus<double>());
1791 return (*this);
1792 }
1793
1794 Data&
1795 Data::operator+=(const boost::python::object& right)
1796 {
1797 if (isProtected()) {
1798 throw DataException("Error - attempt to update protected Data object.");
1799 }
1800 Data tmp(right,getFunctionSpace(),false);
1801 (*this)+=tmp;
1802 return *this;
1803 }
1804
1805 // Hmmm, operator= makes a deep copy but the copy constructor does not?
1806 Data&
1807 Data::operator=(const Data& other)
1808 {
1809 #if defined ASSIGNMENT_MEANS_DEEPCOPY
1810 // This should not be used.
1811 // Just leaving this here until I have completed transition
1812 copy(other);
1813 #else
1814 m_protected=false; // since any changes should be caught by exclusiveWrite();
1815 // m_data=other.m_data;
1816 set_m_data(other.m_data);
1817 #endif
1818 return (*this);
1819 }
1820
1821 Data&
1822 Data::operator-=(const Data& right)
1823 {
1824 if (isProtected()) {
1825 throw DataException("Error - attempt to update protected Data object.");
1826 }
1827 MAKELAZYBINSELF(right,SUB)
1828 exclusiveWrite();
1829 binaryOp(right,minus<double>());
1830 return (*this);
1831 }
1832
1833 Data&
1834 Data::operator-=(const boost::python::object& right)
1835 {
1836 if (isProtected()) {
1837 throw DataException("Error - attempt to update protected Data object.");
1838 }
1839 Data tmp(right,getFunctionSpace(),false);
1840 (*this)-=tmp;
1841 return (*this);
1842 }
1843
1844 Data&
1845 Data::operator*=(const Data& right)
1846 {
1847 if (isProtected()) {
1848 throw DataException("Error - attempt to update protected Data object.");
1849 }
1850 MAKELAZYBINSELF(right,MUL)
1851 exclusiveWrite();
1852 binaryOp(right,multiplies<double>());
1853 return (*this);
1854 }
1855
1856 Data&
1857 Data::operator*=(const boost::python::object& right)
1858 {
1859 if (isProtected()) {
1860 throw DataException("Error - attempt to update protected Data object.");
1861 }
1862 Data tmp(right,getFunctionSpace(),false);
1863 (*this)*=tmp;
1864 return (*this);
1865 }
1866
1867 Data&
1868 Data::operator/=(const Data& right)
1869 {
1870 if (isProtected()) {
1871 throw DataException("Error - attempt to update protected Data object.");
1872 }
1873 MAKELAZYBINSELF(right,DIV)
1874 exclusiveWrite();
1875 binaryOp(right,divides<double>());
1876 return (*this);
1877 }
1878
1879 Data&
1880 Data::operator/=(const boost::python::object& right)
1881 {
1882 if (isProtected()) {
1883 throw DataException("Error - attempt to update protected Data object.");
1884 }
1885 Data tmp(right,getFunctionSpace(),false);
1886 (*this)/=tmp;
1887 return (*this);
1888 }
1889
1890 Data
1891 Data::rpowO(const boost::python::object& left) const
1892 {
1893 Data left_d(left,*this);
1894 return left_d.powD(*this);
1895 }
1896
1897 Data
1898 Data::powO(const boost::python::object& right) const
1899 {
1900 Data tmp(right,getFunctionSpace(),false);
1901 return powD(tmp);
1902 }
1903
1904 Data
1905 Data::powD(const Data& right) const
1906 {
1907 MAKELAZYBIN(right,POW)
1908 return C_TensorBinaryOperation<double (*)(double, double)>(*this, right, ::pow);
1909 }
1910
1911 //
1912 // NOTE: It is essential to specify the namespace this operator belongs to
1913 Data
1914 escript::operator+(const Data& left, const Data& right)
1915 {
1916 MAKELAZYBIN2(left,right,ADD)
1917 return C_TensorBinaryOperation(left, right, plus<double>());
1918 }
1919
1920 //
1921 // NOTE: It is essential to specify the namespace this operator belongs to
1922 Data
1923 escript::operator-(const Data& left, const Data& right)
1924 {
1925 MAKELAZYBIN2(left,right,SUB)
1926 return C_TensorBinaryOperation(left, right, minus<double>());
1927 }
1928
1929 //
1930 // NOTE: It is essential to specify the namespace this operator belongs to
1931 Data
1932 escript::operator*(const Data& left, const Data& right)
1933 {
1934 MAKELAZYBIN2(left,right,MUL)
1935 return C_TensorBinaryOperation(left, right, multiplies<double>());
1936 }
1937
1938 //
1939 // NOTE: It is essential to specify the namespace this operator belongs to
1940 Data
1941 escript::operator/(const Data& left, const Data& right)
1942 {
1943 MAKELAZYBIN2(left,right,DIV)
1944 return C_TensorBinaryOperation(left, right, divides<double>());
1945 }
1946
1947 //
1948 // NOTE: It is essential to specify the namespace this operator belongs to
1949 Data
1950 escript::operator+(const Data& left, const boost::python::object& right)
1951 {
1952 Data tmp(right,left.getFunctionSpace(),false);
1953 MAKELAZYBIN2(left,tmp,ADD)
1954 return left+tmp;
1955 }
1956
1957 //
1958 // NOTE: It is essential to specify the namespace this operator belongs to
1959 Data
1960 escript::operator-(const Data& left, const boost::python::object& right)
1961 {
1962 Data tmp(right,left.getFunctionSpace(),false);
1963 MAKELAZYBIN2(left,tmp,SUB)
1964 return left-tmp;
1965 }
1966
1967 //
1968 // NOTE: It is essential to specify the namespace this operator belongs to
1969 Data
1970 escript::operator*(const Data& left, const boost::python::object& right)
1971 {
1972 Data tmp(right,left.getFunctionSpace(),false);
1973 MAKELAZYBIN2(left,tmp,MUL)
1974 return left*tmp;
1975 }
1976
1977 //
1978 // NOTE: It is essential to specify the namespace this operator belongs to
1979 Data
1980 escript::operator/(const Data& left, const boost::python::object& right)
1981 {
1982 Data tmp(right,left.getFunctionSpace(),false);
1983 MAKELAZYBIN2(left,tmp,DIV)
1984 return left/tmp;
1985 }
1986
1987 //
1988 // NOTE: It is essential to specify the namespace this operator belongs to
1989 Data
1990 escript::operator+(const boost::python::object& left, const Data& right)
1991 {
1992 Data tmp(left,right.getFunctionSpace(),false);
1993 MAKELAZYBIN2(tmp,right,ADD)
1994 return tmp+right;
1995 }
1996
1997 //
1998 // NOTE: It is essential to specify the namespace this operator belongs to
1999 Data
2000 escript::operator-(const boost::python::object& left, const Data& right)
2001 {
2002 Data tmp(left,right.getFunctionSpace(),false);
2003 MAKELAZYBIN2(tmp,right,SUB)
2004 return tmp-right;
2005 }
2006
2007 //
2008 // NOTE: It is essential to specify the namespace this operator belongs to
2009 Data
2010 escript::operator*(const boost::python::object& left, const Data& right)
2011 {
2012 Data tmp(left,right.getFunctionSpace(),false);
2013 MAKELAZYBIN2(tmp,right,MUL)
2014 return tmp*right;
2015 }
2016
2017 //
2018 // NOTE: It is essential to specify the namespace this operator belongs to
2019 Data
2020 escript::operator/(const boost::python::object& left, const Data& right)
2021 {
2022 Data tmp(left,right.getFunctionSpace(),false);
2023 MAKELAZYBIN2(tmp,right,DIV)
2024 return tmp/right;
2025 }
2026
2027
2028 /* TODO */
2029 /* global reduction */
2030 Data
2031 Data::getItem(const boost::python::object& key) const
2032 {
2033
2034 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2035
2036 if (slice_region.size()!=getDataPointRank()) {
2037 throw DataException("Error - slice size does not match Data rank.");
2038 }
2039
2040 return getSlice(slice_region);
2041 }
2042
2043 /* TODO */
2044 /* global reduction */
2045 Data
2046 Data::getSlice(const DataTypes::RegionType& region) const
2047 {
2048 return Data(*this,region);
2049 }
2050
2051 /* TODO */
2052 /* global reduction */
2053 void
2054 Data::setItemO(const boost::python::object& key,
2055 const boost::python::object& value)
2056 {
2057 Data tempData(value,getFunctionSpace());
2058 setItemD(key,tempData);
2059 }
2060
2061 void
2062 Data::setItemD(const boost::python::object& key,
2063 const Data& value)
2064 {
2065 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2066 if (slice_region.size()!=getDataPointRank()) {
2067 throw DataException("Error - slice size does not match Data rank.");
2068 }
2069 exclusiveWrite();
2070 if (getFunctionSpace()!=value.getFunctionSpace()) {
2071 setSlice(Data(value,getFunctionSpace()),slice_region);
2072 } else {
2073 setSlice(value,slice_region);
2074 }
2075 }
2076
2077 void
2078 Data::setSlice(const Data& value,
2079 const DataTypes::RegionType& region)
2080 {
2081 if (isProtected()) {
2082 throw DataException("Error - attempt to update protected Data object.");
2083 }
2084 FORCERESOLVE;
2085 /* if (isLazy())
2086 {
2087 throw DataException("Error - setSlice not permitted on lazy data.");
2088 }*/
2089 exclusiveWrite(); // In case someone finds a way to call this without going through setItemD
2090 Data tempValue(value);
2091 typeMatchLeft(tempValue);
2092 typeMatchRight(tempValue);
2093 getReady()->setSlice(tempValue.m_data.get(),region);
2094 }
2095
2096 void
2097 Data::typeMatchLeft(Data& right) const
2098 {
2099 if (right.isLazy() && !isLazy())
2100 {
2101 right.resolve();
2102 }
2103 if (isExpanded()){
2104 right.expand();
2105 } else if (isTagged()) {
2106 if (right.isConstant()) {
2107 right.tag();
2108 }
2109 }
2110 }
2111
2112 void
2113 Data::typeMatchRight(const Data& right)
2114 {
2115 if (isLazy() && !right.isLazy())
2116 {
2117 resolve();
2118 }
2119 if (isTagged()) {
2120 if (right.isExpanded()) {
2121 expand();
2122 }
2123 } else if (isConstant()) {
2124 if (right.isExpanded()) {
2125 expand();
2126 } else if (right.isTagged()) {
2127 tag();
2128 }
2129 }
2130 }
2131
2132 void
2133 Data::setTaggedValueByName(std::string name,
2134 const boost::python::object& value)
2135 {
2136 if (getFunctionSpace().getDomain()->isValidTagName(name)) {
2137 FORCERESOLVE;
2138 exclusiveWrite();
2139 int tagKey=getFunctionSpace().getDomain()->getTag(name);
2140 setTaggedValue(tagKey,value);
2141 }
2142 }
2143
2144 void
2145 Data::setTaggedValue(int tagKey,
2146 const boost::python::object& value)
2147 {
2148 if (isProtected()) {
2149 throw DataException("Error - attempt to update protected Data object.");
2150 }
2151 //
2152 // Ensure underlying data object is of type DataTagged
2153 FORCERESOLVE;
2154 exclusiveWrite();
2155 if (isConstant()) tag();
2156 WrappedArray w(value);
2157
2158 DataVector temp_data2;
2159 temp_data2.copyFromArray(w,1);
2160
2161 m_data->setTaggedValue(tagKey,w.getShape(), temp_data2);
2162 }
2163
2164
2165 void
2166 Data::setTaggedValueFromCPP(int tagKey,
2167 const DataTypes::ShapeType& pointshape,
2168 const DataTypes::ValueType& value,
2169 int dataOffset)
2170 {
2171 if (isProtected()) {
2172 throw DataException("Error - attempt to update protected Data object.");
2173 }
2174 //
2175 // Ensure underlying data object is of type DataTagged
2176 FORCERESOLVE;
2177 if (isConstant()) tag();
2178 exclusiveWrite();
2179 //
2180 // Call DataAbstract::setTaggedValue
2181 m_data->setTaggedValue(tagKey,pointshape, value, dataOffset);
2182 }
2183
2184 int
2185 Data::getTagNumber(int dpno)
2186 {
2187 if (isEmpty())
2188 {
2189 throw DataException("Error - operation not permitted on instances of DataEmpty.");
2190 }
2191 return getFunctionSpace().getTagFromDataPointNo(dpno);
2192 }
2193
2194
2195 ostream& escript::operator<<(ostream& o, const Data& data)
2196 {
2197 o << data.toString();
2198 return o;
2199 }
2200
2201 Data
2202 escript::C_GeneralTensorProduct(Data& arg_0,
2203 Data& arg_1,
2204 int axis_offset,
2205 int transpose)
2206 {
2207 // General tensor product: res(SL x SR) = arg_0(SL x SM) * arg_1(SM x SR)
2208 // SM is the product of the last axis_offset entries in arg_0.getShape().
2209
2210 // deal with any lazy data
2211 // if (arg_0.isLazy()) {arg_0.resolve();}
2212 // if (arg_1.isLazy()) {arg_1.resolve();}
2213 if (arg_0.isLazy() || arg_1.isLazy() || (AUTOLAZYON && (arg_0.isExpanded() || arg_1.isExpanded())))
2214 {
2215 DataLazy* c=new DataLazy(arg_0.borrowDataPtr(), arg_1.borrowDataPtr(), PROD, axis_offset,transpose);
2216 return Data(c);
2217 }
2218
2219 // Interpolate if necessary and find an appropriate function space
2220 Data arg_0_Z, arg_1_Z;
2221 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2222 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2223 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2224 arg_1_Z = Data(arg_1);
2225 }
2226 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2227 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2228 arg_0_Z =Data(arg_0);
2229 }
2230 else {
2231 throw DataException("Error - C_GeneralTensorProduct: arguments have incompatible function spaces.");
2232 }
2233 } else {
2234 arg_0_Z = Data(arg_0);
2235 arg_1_Z = Data(arg_1);
2236 }
2237 // Get rank and shape of inputs
2238 int rank0 = arg_0_Z.getDataPointRank();
2239 int rank1 = arg_1_Z.getDataPointRank();
2240 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
2241 const DataTypes::ShapeType& shape1 = arg_1_Z.getDataPointShape();
2242
2243 // Prepare for the loops of the product and verify compatibility of shapes
2244 int start0=0, start1=0;
2245 if (transpose == 0) {}
2246 else if (transpose == 1) { start0 = axis_offset; }
2247 else if (transpose == 2) { start1 = rank1-axis_offset; }
2248 else { throw DataException("C_GeneralTensorProduct: Error - transpose should be 0, 1 or 2"); }
2249
2250
2251 // Adjust the shapes for transpose
2252 DataTypes::ShapeType tmpShape0(rank0); // pre-sizing the vectors rather
2253 DataTypes::ShapeType tmpShape1(rank1); // than using push_back
2254 for (int i=0; i<rank0; i++) { tmpShape0[i]=shape0[(i+start0)%rank0]; }
2255 for (int i=0; i<rank1; i++) { tmpShape1[i]=shape1[(i+start1)%rank1]; }
2256
2257 #if 0
2258 // For debugging: show shape after transpose
2259 char tmp[100];
2260 std::string shapeStr;
2261 shapeStr = "(";
2262 for (int i=0; i<rank0; i++) { sprintf(tmp, "%d,", tmpShape0[i]); shapeStr += tmp; }
2263 shapeStr += ")";
2264 cout << "C_GeneralTensorProduct: Shape of arg0 is " << shapeStr << endl;
2265 shapeStr = "(";
2266 for (int i=0; i<rank1; i++) { sprintf(tmp, "%d,", tmpShape1[i]); shapeStr += tmp; }
2267 shapeStr += ")";
2268 cout << "C_GeneralTensorProduct: Shape of arg1 is " << shapeStr << endl;
2269 #endif
2270
2271 // Prepare for the loops of the product
2272 int SL=1, SM=1, SR=1;
2273 for (int i=0; i<rank0-axis_offset; i++) {
2274 SL *= tmpShape0[i];
2275 }
2276 for (int i=rank0-axis_offset; i<rank0; i++) {
2277 if (tmpShape0[i] != tmpShape1[i-(rank0-axis_offset)]) {
2278 throw DataException("C_GeneralTensorProduct: Error - incompatible shapes");
2279 }
2280 SM *= tmpShape0[i];
2281 }
2282 for (int i=axis_offset; i<rank1; i++) {
2283 SR *= tmpShape1[i];
2284 }
2285
2286 // Define the shape of the output (rank of shape is the sum of the loop ranges below)
2287 DataTypes::ShapeType shape2(rank0+rank1-2*axis_offset);
2288 { // block to limit the scope of out_index
2289 int out_index=0;
2290 for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z
2291 for (int i=axis_offset; i<rank1; i++, ++out_index) { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z
2292 }
2293
2294 if (shape2.size()>ESCRIPT_MAX_DATA_RANK)
2295 {
2296 ostringstream os;
2297 os << "C_GeneralTensorProduct: Error - Attempt to create a rank " << shape2.size() << " object. The maximum rank is " << ESCRIPT_MAX_DATA_RANK << ".";
2298 throw DataException(os.str());
2299 }
2300
2301 // Declare output Data object
2302 Data res;
2303
2304 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2305 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataConstant output
2306 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2307 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2308 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2309 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2310 }
2311 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2312
2313 // Prepare the DataConstant input
2314 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2315 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2316
2317 // Borrow DataTagged input from Data object
2318 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2319 if (tmp_1==0) { throw DataException("GTP_1 Programming error - casting to DataTagged."); }
2320
2321 // Prepare a DataTagged output 2
2322 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataTagged output
2323 res.tag();
2324 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2325 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2326
2327 // Prepare offset into DataConstant
2328 int offset_0 = tmp_0->getPointOffset(0,0);
2329 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2330
2331 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2332 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2333
2334 // Compute an MVP for the default
2335 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2336 // Compute an MVP for each tag
2337 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2338 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2339 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2340 tmp_2->addTag(i->first);
2341
2342 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2343 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2344
2345 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2346 }
2347
2348 }
2349 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2350
2351 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2352 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2353 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2354 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2355 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2356 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2357 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2358 int sampleNo_1,dataPointNo_1;
2359 int numSamples_1 = arg_1_Z.getNumSamples();
2360 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2361 int offset_0 = tmp_0->getPointOffset(0,0);
2362 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2363 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2364 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2365 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2366 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2367 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2368 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2369 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2370 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2371 }
2372 }
2373
2374 }
2375 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2376
2377 // Borrow DataTagged input from Data object
2378 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2379 if (tmp_0==0) { throw DataException("GTP_0 Programming error - casting to DataTagged."); }
2380
2381 // Prepare the DataConstant input
2382 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2383 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2384
2385 // Prepare a DataTagged output 2
2386 res = Data(0.0, shape2, arg_0_Z.getFunctionSpace()); // DataTagged output
2387 res.tag();
2388 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2389 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2390
2391 // Prepare offset into DataConstant
2392 int offset_1 = tmp_1->getPointOffset(0,0);
2393 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2394 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2395 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2396
2397 // Compute an MVP for the default
2398 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2399 // Compute an MVP for each tag
2400 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2401 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2402 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2403
2404 tmp_2->addTag(i->first);
2405 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2406 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2407 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2408 }
2409
2410 }
2411 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2412
2413 // Borrow DataTagged input from Data object
2414 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2415 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2416
2417 // Borrow DataTagged input from Data object
2418 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2419 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2420
2421 // Prepare a DataTagged output 2
2422 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace());
2423 res.tag(); // DataTagged output
2424 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2425 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2426
2427 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2428 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2429 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2430
2431 // Compute an MVP for the default
2432 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2433 // Merge the tags
2434 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2435 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2436 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2437 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2438 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2439 }
2440 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2441 tmp_2->addTag(i->first);
2442 }
2443 // Compute an MVP for each tag
2444 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2445 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2446 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2447 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2448 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2449
2450 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2451 }
2452
2453 }
2454 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2455
2456 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2457 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2458 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2459 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2460 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2461 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2462 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2463 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2464 int sampleNo_0,dataPointNo_0;
2465 int numSamples_0 = arg_0_Z.getNumSamples();
2466 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2467 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2468 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2469 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2470 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2471 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2472 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2473 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2474 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2475 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2476 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2477 }
2478 }
2479
2480 }
2481 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2482
2483 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2484 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2485 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2486 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2487 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2488 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2489 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2490 int sampleNo_0,dataPointNo_0;
2491 int numSamples_0 = arg_0_Z.getNumSamples();
2492 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2493 int offset_1 = tmp_1->getPointOffset(0,0);
2494 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2495 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2496 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2497 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2498 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2499 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2500 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2501 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2502 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2503 }
2504 }
2505
2506
2507 }
2508 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2509
2510 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2511 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2512 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2513 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2514 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2515 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2516 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2517 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2518 int sampleNo_0,dataPointNo_0;
2519 int numSamples_0 = arg_0_Z.getNumSamples();
2520 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2521 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2522 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2523 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2524 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2525 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2526 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2527 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2528 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2529 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2530 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2531 }
2532 }
2533
2534 }
2535 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2536
2537 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2538 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2539 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2540 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2541 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2542 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2543 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2544 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2545 int sampleNo_0,dataPointNo_0;
2546 int numSamples_0 = arg_0_Z.getNumSamples();
2547 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2548 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2549 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2550 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2551 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2552 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2553 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2554 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2555 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2556 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2557 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2558 }
2559 }
2560
2561 }
2562 else {
2563 throw DataException("Error - C_GeneralTensorProduct: unknown combination of inputs");
2564 }
2565
2566 return res;
2567 }
2568
2569 DataAbstract*
2570 Data::borrowData() const
2571 {
2572 return m_data.get();
2573 }
2574
2575 // Not all that happy about returning a non-const from a const
2576 DataAbstract_ptr
2577 Data::borrowDataPtr() const
2578 {
2579 return m_data;
2580 }
2581
2582 // Not all that happy about returning a non-const from a const
2583 DataReady_ptr
2584 Data::borrowReadyPtr() const
2585 {
2586 DataReady_ptr dr=dynamic_pointer_cast<DataReady>(m_data);
2587 EsysAssert((dr!=0), "Error - casting to DataReady.");
2588 return dr;
2589 }
2590
2591 std::string
2592 Data::toString() const
2593 {
2594 if (!m_data->isEmpty() &&
2595 !m_data->isLazy() &&
2596 getLength()>escriptParams.getInt("TOO_MANY_LINES"))
2597 {
2598 stringstream temp;
2599 temp << "Summary: inf="<< inf_const() << " sup=" << sup_const() << " data points=" << getNumDataPoints();
2600 return temp.str();
2601 }
2602 return m_data->toString();
2603 }
2604
2605
2606 // This method is not thread-safe
2607 DataTypes::ValueType::reference
2608 Data::getDataAtOffsetRW(DataTypes::ValueType::size_type i)
2609 {
2610 // FORCERESOLVE;
2611 // exclusiveWrite();
2612
2613 return getReady()->getDataAtOffsetRW(i);
2614 }
2615
2616 // This method is not thread-safe
2617 DataTypes::ValueType::const_reference
2618 Data::getDataAtOffsetRO(DataTypes::ValueType::size_type i)
2619 {
2620 // FORCERESOLVE;
2621 return getReady()->getDataAtOffsetRO(i);
2622 }
2623
2624
2625 // DataTypes::ValueType::const_reference
2626 // Data::getDataAtOffsetRO(DataTypes::ValueType::size_type i) const
2627 // {
2628 // if (isLazy())
2629 // {
2630 // throw DataException("Programmer error - getDataAtOffsetRO() not permitted on Lazy Data (object is const which prevents resolving).");
2631 // }
2632 // return getReady()->getDataAtOffsetRO(i);
2633 // }
2634
2635
2636 DataTypes::ValueType::const_reference
2637 Data::getDataPointRO(int sampleNo, int dataPointNo)
2638 {
2639 FORCERESOLVE;
2640 if (!isReady())
2641 {
2642 throw DataException("Programmer error -getDataPointRO() not permitted on Lazy Data.");
2643 }
2644 else
2645 {
2646 const DataReady* dr=getReady();
2647 return dr->getDataAtOffsetRO(dr->getPointOffset(sampleNo, dataPointNo));
2648 }
2649 }
2650
2651
2652
2653
2654 DataTypes::ValueType::reference
2655 Data::getDataPointRW(int sampleNo, int dataPointNo)
2656 {
2657 FORCERESOLVE;
2658 if (!isReady())
2659 {
2660 throw DataException("Programmer error - getDataPointRW() not permitted on Lazy Data.");
2661 }
2662 else
2663 {
2664 exclusiveWrite();
2665 DataReady* dr=getReady();
2666 return dr->getDataAtOffsetRW(dr->getPointOffset(sampleNo, dataPointNo));
2667 }
2668 }
2669
2670 DataTypes::ValueType*
2671 Data::allocSampleBuffer() const
2672 {
2673 if (isLazy())
2674 {
2675 return new DataTypes::ValueType(getSampleBufferSize());
2676 }
2677 else
2678 {
2679 return NULL;
2680 }
2681 }
2682
2683 void
2684 Data::freeSampleBuffer(DataTypes::ValueType* buffer)
2685 {
2686 if (buffer!=0)
2687 {
2688 delete buffer;
2689 }
2690 }
2691
2692
2693 /* Member functions specific to the MPI implementation */
2694
2695 void
2696 Data::print()
2697 {
2698 int i,j;
2699
2700 printf( "Data is %dX%d\n", getNumSamples(), getNumDataPointsPerSample() );
2701 for( i=0; i<getNumSamples(); i++ )
2702 {
2703 printf( "[%6d]", i );
2704 for( j=0; j<getNumDataPointsPerSample(); j++ )
2705 printf( "\t%10.7g", (getSampleDataRW(i))[j] ); // doesn't really need RW access
2706 printf( "\n" );
2707 }
2708 }
2709 void
2710 Data::dump(const std::string fileName) const
2711 {
2712 try
2713 {
2714 if (isLazy())
2715 {
2716 Data temp(*this); // this is to get a non-const object which we can resolve
2717 temp.resolve();
2718 temp.dump(fileName);
2719 }
2720 else
2721 {
2722 return m_data->dump(fileName);
2723 }
2724 }
2725 catch (std::exception& e)
2726 {
2727 cout << e.what() << endl;
2728 }
2729 }
2730
2731 int
2732 Data::get_MPISize() const
2733 {
2734 int size;
2735 #ifdef PASO_MPI
2736 int error;
2737 error = MPI_Comm_size( get_MPIComm(), &size );
2738 #else
2739 size = 1;
2740 #endif
2741 return size;
2742 }
2743
2744 int
2745 Data::get_MPIRank() const
2746 {
2747 int rank;
2748 #ifdef PASO_MPI
2749 int error;
2750 error = MPI_Comm_rank( get_MPIComm(), &rank );
2751 #else
2752 rank = 0;
2753 #endif
2754 return rank;
2755 }
2756
2757 MPI_Comm
2758 Data::get_MPIComm() const
2759 {
2760 #ifdef PASO_MPI
2761 return MPI_COMM_WORLD;
2762 #else
2763 return -1;
2764 #endif
2765 }
2766
2767

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26