/[escript]/branches/arrexp_2137_win_merge/escript/src/Data.cpp
ViewVC logotype

Contents of /branches/arrexp_2137_win_merge/escript/src/Data.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2226 - (show annotations)
Thu Jan 22 05:11:02 2009 UTC (10 years, 4 months ago) by jfenwick
File size: 76534 byte(s)
It continues
1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2008 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 #include "Data.h"
16
17 #include "DataExpanded.h"
18 #include "DataConstant.h"
19 #include "DataTagged.h"
20 #include "DataEmpty.h"
21 #include "DataLazy.h"
22 #include "FunctionSpaceFactory.h"
23 #include "AbstractContinuousDomain.h"
24 #include "UnaryFuncs.h"
25 #include "FunctionSpaceException.h"
26 #include "EscriptParams.h"
27
28 extern "C" {
29 #include "esysUtils/blocktimer.h"
30 }
31
32 #include <fstream>
33 #include <algorithm>
34 #include <vector>
35 #include <functional>
36 #include <sstream> // so we can throw messages about ranks
37
38 #include <boost/python/dict.hpp>
39 #include <boost/python/extract.hpp>
40 #include <boost/python/long.hpp>
41 #include "WrappedArray.h"
42
43 using namespace std;
44 using namespace boost::python;
45 using namespace boost;
46 using namespace escript;
47
48 // ensure the current object is not a DataLazy
49 // The idea was that we could add an optional warning whenever a resolve is forced
50 // #define forceResolve() if (isLazy()) {#resolve();}
51
52 #define AUTOLAZYON escriptParams.getAUTOLAZY()
53 #define MAKELAZYOP(X) if (isLazy() || (AUTOLAZYON && m_data->isExpanded())) \
54 {\
55 DataLazy* c=new DataLazy(borrowDataPtr(),X);\
56 return Data(c);\
57 }
58 #define MAKELAZYOPOFF(X,Y) if (isLazy() || (AUTOLAZYON && m_data->isExpanded())) \
59 {\
60 DataLazy* c=new DataLazy(borrowDataPtr(),X,Y);\
61 return Data(c);\
62 }
63
64 #define MAKELAZYBINSELF(R,X) if (isLazy() || R.isLazy() || (AUTOLAZYON && (isExpanded() || R.isExpanded()))) \
65 {\
66 DataLazy* c=new DataLazy(m_data,R.borrowDataPtr(),X);\
67 /* m_data=c->getPtr();*/ set_m_data(c->getPtr());\
68 return (*this);\
69 }
70
71 // like the above but returns a new data rather than *this
72 #define MAKELAZYBIN(R,X) if (isLazy() || R.isLazy() || (AUTOLAZYON && (isExpanded() || R.isExpanded()))) \
73 {\
74 DataLazy* c=new DataLazy(m_data,R.borrowDataPtr(),X);\
75 return Data(c);\
76 }
77
78 #define MAKELAZYBIN2(L,R,X) if (L.isLazy() || R.isLazy() || (AUTOLAZYON && (L.isExpanded() || R.isExpanded()))) \
79 {\
80 DataLazy* c=new DataLazy(L.borrowDataPtr(),R.borrowDataPtr(),X);\
81 return Data(c);\
82 }
83
84 // Do not use the following unless you want to make copies on assignment rather than
85 // share data. CopyOnWrite should make this unnescessary.
86 // #define ASSIGNMENT_MEANS_DEEPCOPY
87
88 namespace
89 {
90
91 // This should be safer once the DataC RO changes have been brought in
92 template <class ARR>
93 boost::python::tuple
94 pointToTuple( const DataTypes::ShapeType& shape,ARR v)
95 {
96 using namespace boost::python;
97 using boost::python::list;
98 int rank=shape.size();
99 if (rank==0)
100 {
101 return make_tuple(v[0]);
102 }
103 else if (rank==1)
104 {
105 list l;
106 for (size_t i=0;i<shape[0];++i)
107 {
108 l.append(v[i]);
109 }
110 return tuple(l);
111 }
112 else if (rank==2)
113 {
114 list lj;
115 for (size_t j=0;j<shape[0];++j)
116 {
117 list li;
118 for (size_t i=0;i<shape[1];++i)
119 {
120 li.append(v[DataTypes::getRelIndex(shape,j,i)]);
121 }
122 lj.append(tuple(li));
123 }
124 return tuple(lj);
125 }
126 else if (rank==3)
127 {
128 list lk;
129 for (size_t k=0;k<shape[0];++k)
130 {
131 list lj;
132 for (size_t j=0;j<shape[1];++j)
133 {
134 list li;
135 for (size_t i=0;i<shape[2];++i)
136 {
137 li.append(v[DataTypes::getRelIndex(shape,k,j,i)]);
138 }
139 lj.append(tuple(li));
140 }
141 lk.append(tuple(lj));
142 }
143 return tuple(lk);
144 }
145 else if (rank==4)
146 {
147 list ll;
148 for (size_t l=0;l<shape[0];++l)
149 {
150 list lk;
151 for (size_t k=0;k<shape[1];++k)
152 {
153 list lj;
154 for (size_t j=0;j<shape[2];++j)
155 {
156 list li;
157 for (size_t i=0;i<shape[3];++i)
158 {
159 li.append(v[DataTypes::getRelIndex(shape,l,k,j,i)]);
160 }
161 lj.append(tuple(li));
162 }
163 lk.append(tuple(lj));
164 }
165 ll.append(tuple(lk));
166 }
167 return tuple(ll);
168 }
169 else
170 throw DataException("Unknown rank in pointToTuple.");
171 }
172
173 } // anonymous namespace
174
175 Data::Data()
176 : m_shared(false), m_lazy(false)
177 {
178 //
179 // Default data is type DataEmpty
180 DataAbstract* temp=new DataEmpty();
181 // m_data=temp->getPtr();
182 set_m_data(temp->getPtr());
183 m_protected=false;
184 }
185
186 Data::Data(double value,
187 const tuple& shape,
188 const FunctionSpace& what,
189 bool expanded)
190 : m_shared(false), m_lazy(false)
191 {
192 DataTypes::ShapeType dataPointShape;
193 for (int i = 0; i < shape.attr("__len__")(); ++i) {
194 dataPointShape.push_back(extract<const int>(shape[i]));
195 }
196
197 int len = DataTypes::noValues(dataPointShape);
198 DataVector temp_data(len,value,len);
199 initialise(temp_data, dataPointShape, what, expanded);
200 m_protected=false;
201 }
202
203 Data::Data(double value,
204 const DataTypes::ShapeType& dataPointShape,
205 const FunctionSpace& what,
206 bool expanded)
207 : m_shared(false), m_lazy(false)
208 {
209 int len = DataTypes::noValues(dataPointShape);
210
211 DataVector temp_data(len,value,len);
212 // DataArrayView temp_dataView(temp_data, dataPointShape);
213
214 // initialise(temp_dataView, what, expanded);
215 initialise(temp_data, dataPointShape, what, expanded);
216
217 m_protected=false;
218 }
219
220 Data::Data(const Data& inData)
221 : m_shared(false), m_lazy(false)
222 {
223 // m_data=inData.m_data;
224 set_m_data(inData.m_data);
225 m_protected=inData.isProtected();
226 }
227
228
229 Data::Data(const Data& inData,
230 const DataTypes::RegionType& region)
231 : m_shared(false), m_lazy(false)
232 {
233 DataAbstract_ptr dat=inData.m_data;
234 if (inData.isLazy())
235 {
236 dat=inData.m_data->resolve();
237 }
238 else
239 {
240 dat=inData.m_data;
241 }
242 //
243 // Create Data which is a slice of another Data
244 DataAbstract* tmp = dat->getSlice(region);
245 // m_data=DataAbstract_ptr(tmp);
246 set_m_data(DataAbstract_ptr(tmp));
247 m_protected=false;
248
249 }
250
251 Data::Data(const Data& inData,
252 const FunctionSpace& functionspace)
253 : m_shared(false), m_lazy(false)
254 {
255 if (inData.isEmpty())
256 {
257 throw DataException("Error - will not interpolate for instances of DataEmpty.");
258 }
259 if (inData.getFunctionSpace()==functionspace) {
260 // m_data=inData.m_data;
261 set_m_data(inData.m_data);
262 }
263 else
264 {
265
266 if (inData.isConstant()) { // for a constant function, we just need to use the new function space
267 if (!inData.probeInterpolation(functionspace))
268 { // Even though this is constant, we still need to check whether interpolation is allowed
269 throw FunctionSpaceException("Cannot interpolate across to the domain of the specified FunctionSpace. (DataConstant)");
270 }
271 // if the data is not lazy, this will just be a cast to DataReady
272 DataReady_ptr dr=inData.m_data->resolve();
273 DataConstant* dc=new DataConstant(functionspace,inData.m_data->getShape(),dr->getVectorRO());
274 // m_data=DataAbstract_ptr(dc);
275 set_m_data(DataAbstract_ptr(dc));
276 } else {
277 Data tmp(0,inData.getDataPointShape(),functionspace,true);
278 // Note: Must use a reference or pointer to a derived object
279 // in order to get polymorphic behaviour. Shouldn't really
280 // be able to create an instance of AbstractDomain but that was done
281 // as a boost:python work around which may no longer be required.
282 /*const AbstractDomain& inDataDomain=inData.getDomain();*/
283 const_Domain_ptr inDataDomain=inData.getDomain();
284 if (inDataDomain==functionspace.getDomain()) {
285 inDataDomain->interpolateOnDomain(tmp,inData);
286 } else {
287 inDataDomain->interpolateACross(tmp,inData);
288 }
289 // m_data=tmp.m_data;
290 set_m_data(tmp.m_data);
291 }
292 }
293 m_protected=false;
294 }
295
296 Data::Data(DataAbstract* underlyingdata)
297 : m_shared(false), m_lazy(false)
298 {
299 set_m_data(underlyingdata->getPtr());
300 m_protected=false;
301 }
302
303 Data::Data(DataAbstract_ptr underlyingdata)
304 : m_shared(false), m_lazy(false)
305 {
306 set_m_data(underlyingdata);
307 m_protected=false;
308 }
309
310 Data::Data(const DataTypes::ValueType& value,
311 const DataTypes::ShapeType& shape,
312 const FunctionSpace& what,
313 bool expanded)
314 : m_shared(false), m_lazy(false)
315 {
316 initialise(value,shape,what,expanded);
317 m_protected=false;
318 }
319
320
321 Data::Data(const object& value,
322 const FunctionSpace& what,
323 bool expanded)
324 : m_shared(false), m_lazy(false)
325 {
326 //numeric::array asNumArray(value);
327 WrappedArray w(value);
328 initialise(w,what,expanded);
329 m_protected=false;
330 }
331
332
333 Data::Data(const object& value,
334 const Data& other)
335 : m_shared(false), m_lazy(false)
336 {
337 WrappedArray w(value);
338
339 // extract the shape of the numarray
340 const DataTypes::ShapeType& tempShape=w.getShape();
341 if (w.getRank()==0) {
342
343
344 // get the space for the data vector
345 int len1 = DataTypes::noValues(tempShape);
346 DataVector temp_data(len1, 0.0, len1);
347 temp_data.copyFromArray(w,1);
348
349 int len = DataTypes::noValues(other.getDataPointShape());
350
351 DataVector temp2_data(len, temp_data[0], len);
352 DataConstant* t=new DataConstant(other.getFunctionSpace(),other.getDataPointShape(),temp2_data);
353 // m_data=DataAbstract_ptr(t);
354 set_m_data(DataAbstract_ptr(t));
355
356 } else {
357 //
358 // Create a DataConstant with the same sample shape as other
359 DataConstant* t=new DataConstant(w,other.getFunctionSpace());
360 // m_data=DataAbstract_ptr(t);
361 set_m_data(DataAbstract_ptr(t));
362 }
363 m_protected=false;
364 }
365
366 Data::~Data()
367 {
368 set_m_data(DataAbstract_ptr());
369 }
370
371
372 // only call in thread safe contexts.
373 // This method should be atomic
374 void Data::set_m_data(DataAbstract_ptr p)
375 {
376 if (m_data.get()!=0) // release old ownership
377 {
378 m_data->removeOwner(this);
379 }
380 if (p.get()!=0)
381 {
382 m_data=p;
383 m_data->addOwner(this);
384 m_shared=m_data->isShared();
385 m_lazy=m_data->isLazy();
386 }
387 }
388
389 void Data::initialise(const WrappedArray& value,
390 const FunctionSpace& what,
391 bool expanded)
392 {
393 //
394 // Construct a Data object of the appropriate type.
395 // Construct the object first as there seems to be a bug which causes
396 // undefined behaviour if an exception is thrown during construction
397 // within the shared_ptr constructor.
398 if (expanded) {
399 DataAbstract* temp=new DataExpanded(value, what);
400 // m_data=temp->getPtr();
401 set_m_data(temp->getPtr());
402 } else {
403 DataAbstract* temp=new DataConstant(value, what);
404 // m_data=temp->getPtr();
405 set_m_data(temp->getPtr());
406 }
407 }
408
409
410 void
411 Data::initialise(const DataTypes::ValueType& value,
412 const DataTypes::ShapeType& shape,
413 const FunctionSpace& what,
414 bool expanded)
415 {
416 //
417 // Construct a Data object of the appropriate type.
418 // Construct the object first as there seems to be a bug which causes
419 // undefined behaviour if an exception is thrown during construction
420 // within the shared_ptr constructor.
421 if (expanded) {
422 DataAbstract* temp=new DataExpanded(what, shape, value);
423 // m_data=temp->getPtr();
424 set_m_data(temp->getPtr());
425 } else {
426 DataAbstract* temp=new DataConstant(what, shape, value);
427 // m_data=temp->getPtr();
428 set_m_data(temp->getPtr());
429 }
430 }
431
432
433 escriptDataC
434 Data::getDataC()
435 {
436 escriptDataC temp;
437 temp.m_dataPtr=(void*)this;
438 return temp;
439 }
440
441 escriptDataC
442 Data::getDataC() const
443 {
444 escriptDataC temp;
445 temp.m_dataPtr=(void*)this;
446 return temp;
447 }
448
449 size_t
450 Data::getSampleBufferSize() const
451 {
452 return m_data->getSampleBufferSize();
453 }
454
455
456 const boost::python::tuple
457 Data::getShapeTuple() const
458 {
459 const DataTypes::ShapeType& shape=getDataPointShape();
460 switch(getDataPointRank()) {
461 case 0:
462 return make_tuple();
463 case 1:
464 return make_tuple(long_(shape[0]));
465 case 2:
466 return make_tuple(long_(shape[0]),long_(shape[1]));
467 case 3:
468 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]));
469 case 4:
470 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]),long_(shape[3]));
471 default:
472 throw DataException("Error - illegal Data rank.");
473 }
474 }
475
476
477 // The different name is needed because boost has trouble with overloaded functions.
478 // It can't work out what type the function is based soley on its name.
479 // There are ways to fix this involving creating function pointer variables for each form
480 // but there doesn't seem to be a need given that the methods have the same name from the python point of view
481 Data
482 Data::copySelf()
483 {
484 DataAbstract* temp=m_data->deepCopy();
485 return Data(temp);
486 }
487
488 void
489 Data::copy(const Data& other)
490 {
491 DataAbstract* temp=other.m_data->deepCopy();
492 DataAbstract_ptr p=temp->getPtr();
493 // m_data=p;
494 set_m_data(p);
495 }
496
497
498 Data
499 Data::delay()
500 {
501 DataLazy* dl=new DataLazy(m_data);
502 return Data(dl);
503 }
504
505 void
506 Data::delaySelf()
507 {
508 if (!isLazy())
509 {
510 // m_data=(new DataLazy(m_data))->getPtr();
511 set_m_data((new DataLazy(m_data))->getPtr());
512 }
513 }
514
515
516 // For lazy data, it would seem that DataTagged will need to be treated differently since even after setting all tags
517 // to zero, all the tags from all the DataTags would be in the result.
518 // However since they all have the same value (0) whether they are there or not should not matter.
519 // So I have decided that for all types this method will create a constant 0.
520 // It can be promoted up as required.
521 // A possible efficiency concern might be expanded->constant->expanded which has an extra memory management
522 // but we can deal with that if it arises.
523 //
524 void
525 Data::setToZero()
526 {
527 if (isEmpty())
528 {
529 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
530 }
531 if (isLazy())
532 {
533 DataTypes::ValueType v(getNoValues(),0);
534 DataConstant* dc=new DataConstant(getFunctionSpace(),getDataPointShape(),v);
535 DataLazy* dl=new DataLazy(dc->getPtr());
536 set_m_data(dl->getPtr());
537 }
538 else
539 {
540 exclusiveWrite();
541 m_data->setToZero();
542 }
543 }
544
545 void
546 Data::copyWithMask(const Data& other,
547 const Data& mask)
548 {
549 // 1. Interpolate if required so all Datas use the same FS as this
550 // 2. Tag or Expand so that all Data's are the same type
551 // 3. Iterate over the data vectors copying values where mask is >0
552 if (other.isEmpty() || mask.isEmpty())
553 {
554 throw DataException("Error - copyWithMask not permitted using instances of DataEmpty.");
555 }
556 Data other2(other);
557 Data mask2(mask);
558 other2.resolve();
559 mask2.resolve();
560 this->resolve();
561 FunctionSpace myFS=getFunctionSpace();
562 FunctionSpace oFS=other2.getFunctionSpace();
563 FunctionSpace mFS=mask2.getFunctionSpace();
564 if (oFS!=myFS)
565 {
566 if (other2.probeInterpolation(myFS))
567 {
568 other2=other2.interpolate(myFS);
569 }
570 else
571 {
572 throw DataException("Error - copyWithMask: other FunctionSpace is not compatible with this one.");
573 }
574 }
575 if (mFS!=myFS)
576 {
577 if (mask2.probeInterpolation(myFS))
578 {
579 mask2=mask2.interpolate(myFS);
580 }
581 else
582 {
583 throw DataException("Error - copyWithMask: mask FunctionSpace is not compatible with this one.");
584 }
585 }
586 // Ensure that all args have the same type
587 if (this->isExpanded() || mask2.isExpanded() || other2.isExpanded())
588 {
589 this->expand();
590 other2.expand();
591 mask2.expand();
592 }
593 else if (this->isTagged() || mask2.isTagged() || other2.isTagged())
594 {
595 this->tag();
596 other2.tag();
597 mask2.tag();
598 }
599 else if (this->isConstant() && mask2.isConstant() && other2.isConstant())
600 {
601 }
602 else
603 {
604 throw DataException("Error - Unknown DataAbstract passed to copyWithMask.");
605 }
606 exclusiveWrite();
607 // Now we iterate over the elements
608 DataVector& self=getReady()->getVectorRW();
609 const DataVector& ovec=other2.getReady()->getVectorRO();
610 const DataVector& mvec=mask2.getReady()->getVectorRO();
611 if ((self.size()!=ovec.size()) || (self.size()!=mvec.size()))
612 {
613 throw DataException("Error - size mismatch in arguments to copyWithMask.");
614 }
615 size_t num_points=self.size();
616
617 // OPENMP 3.0 allows unsigned loop vars.
618 #if defined(_OPENMP) && (_OPENMP < 200805)
619 long i;
620 #else
621 size_t i;
622 #endif
623 #pragma omp parallel for private(i) schedule(static)
624 for (i=0;i<num_points;++i)
625 {
626 if (mvec[i]>0)
627 {
628 self[i]=ovec[i];
629 }
630 }
631 }
632
633
634
635 bool
636 Data::isExpanded() const
637 {
638 DataExpanded* temp=dynamic_cast<DataExpanded*>(m_data.get());
639 return (temp!=0);
640 }
641
642 bool
643 Data::actsExpanded() const
644 {
645 return m_data->actsExpanded();
646
647 }
648
649
650 bool
651 Data::isTagged() const
652 {
653 DataTagged* temp=dynamic_cast<DataTagged*>(m_data.get());
654 return (temp!=0);
655 }
656
657 bool
658 Data::isEmpty() const
659 {
660 DataEmpty* temp=dynamic_cast<DataEmpty*>(m_data.get());
661 return (temp!=0);
662 }
663
664 bool
665 Data::isConstant() const
666 {
667 DataConstant* temp=dynamic_cast<DataConstant*>(m_data.get());
668 return (temp!=0);
669 }
670
671 bool
672 Data::isLazy() const
673 {
674 return m_lazy; // not asking m_data because we need to be able to ask this while m_data is changing
675 }
676
677 // at the moment this is synonymous with !isLazy() but that could change
678 bool
679 Data::isReady() const
680 {
681 return (dynamic_cast<DataReady*>(m_data.get())!=0);
682 }
683
684
685 void
686 Data::setProtection()
687 {
688 m_protected=true;
689 }
690
691 bool
692 Data::isProtected() const
693 {
694 return m_protected;
695 }
696
697
698
699 void
700 Data::expand()
701 {
702 if (isConstant()) {
703 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
704 DataAbstract* temp=new DataExpanded(*tempDataConst);
705 // m_data=temp->getPtr();
706 set_m_data(temp->getPtr());
707 } else if (isTagged()) {
708 DataTagged* tempDataTag=dynamic_cast<DataTagged*>(m_data.get());
709 DataAbstract* temp=new DataExpanded(*tempDataTag);
710 // m_data=temp->getPtr();
711 set_m_data(temp->getPtr());
712 } else if (isExpanded()) {
713 //
714 // do nothing
715 } else if (isEmpty()) {
716 throw DataException("Error - Expansion of DataEmpty not possible.");
717 } else if (isLazy()) {
718 resolve();
719 expand(); // resolve might not give us expanded data
720 } else {
721 throw DataException("Error - Expansion not implemented for this Data type.");
722 }
723 }
724
725 void
726 Data::tag()
727 {
728 if (isConstant()) {
729 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
730 DataAbstract* temp=new DataTagged(*tempDataConst);
731 // m_data=temp->getPtr();
732 set_m_data(temp->getPtr());
733 } else if (isTagged()) {
734 // do nothing
735 } else if (isExpanded()) {
736 throw DataException("Error - Creating tag data from DataExpanded not possible.");
737 } else if (isEmpty()) {
738 throw DataException("Error - Creating tag data from DataEmpty not possible.");
739 } else if (isLazy()) {
740 DataAbstract_ptr res=m_data->resolve();
741 if (m_data->isExpanded())
742 {
743 throw DataException("Error - data would resolve to DataExpanded, tagging is not possible.");
744 }
745 // m_data=res;
746 set_m_data(res);
747 tag();
748 } else {
749 throw DataException("Error - Tagging not implemented for this Data type.");
750 }
751 }
752
753 void
754 Data::resolve()
755 {
756 if (isLazy())
757 {
758 // m_data=m_data->resolve();
759 set_m_data(m_data->resolve());
760 }
761 }
762
763 void
764 Data::requireWrite()
765 {
766 // #ifdef _OPENMP
767 // if (omp_in_parallel()) // Yes this is throwing an exception inside a parallel region which is forbidden
768 // { // However, programs which do this are unsafe and need to be fixed
769 // throw DataException("Programming error. Please do not run requireWrite() in multi-threaded sections.");
770 // }
771 // #endif
772 resolve();
773 exclusiveWrite();
774 }
775
776 Data
777 Data::oneOver() const
778 {
779 MAKELAZYOP(RECIP)
780 return C_TensorUnaryOperation(*this, bind1st(divides<double>(),1.));
781 }
782
783 Data
784 Data::wherePositive() const
785 {
786 MAKELAZYOP(GZ)
787 return C_TensorUnaryOperation(*this, bind2nd(greater<double>(),0.0));
788 }
789
790 Data
791 Data::whereNegative() const
792 {
793 MAKELAZYOP(LZ)
794 return C_TensorUnaryOperation(*this, bind2nd(less<double>(),0.0));
795 }
796
797 Data
798 Data::whereNonNegative() const
799 {
800 MAKELAZYOP(GEZ)
801 return C_TensorUnaryOperation(*this, bind2nd(greater_equal<double>(),0.0));
802 }
803
804 Data
805 Data::whereNonPositive() const
806 {
807 MAKELAZYOP(LEZ)
808 return C_TensorUnaryOperation(*this, bind2nd(less_equal<double>(),0.0));
809 }
810
811 Data
812 Data::whereZero(double tol) const
813 {
814 // Data dataAbs=abs();
815 // return C_TensorUnaryOperation(dataAbs, bind2nd(less_equal<double>(),tol));
816 MAKELAZYOPOFF(EZ,tol)
817 return C_TensorUnaryOperation(*this, bind2nd(AbsLTE(),tol));
818
819 }
820
821 Data
822 Data::whereNonZero(double tol) const
823 {
824 // Data dataAbs=abs();
825 // return C_TensorUnaryOperation(dataAbs, bind2nd(greater<double>(),tol));
826 MAKELAZYOPOFF(NEZ,tol)
827 return C_TensorUnaryOperation(*this, bind2nd(AbsGT(),tol));
828
829 }
830
831 Data
832 Data::interpolate(const FunctionSpace& functionspace) const
833 {
834 return Data(*this,functionspace);
835 }
836
837 bool
838 Data::probeInterpolation(const FunctionSpace& functionspace) const
839 {
840 return getFunctionSpace().probeInterpolation(functionspace);
841 }
842
843 Data
844 Data::gradOn(const FunctionSpace& functionspace) const
845 {
846 if (isEmpty())
847 {
848 throw DataException("Error - operation not permitted on instances of DataEmpty.");
849 }
850 double blocktimer_start = blocktimer_time();
851 if (functionspace.getDomain()!=getDomain())
852 throw DataException("Error - gradient cannot be calculated on different domains.");
853 DataTypes::ShapeType grad_shape=getDataPointShape();
854 grad_shape.push_back(functionspace.getDim());
855 Data out(0.0,grad_shape,functionspace,true);
856 getDomain()->setToGradient(out,*this);
857 blocktimer_increment("grad()", blocktimer_start);
858 return out;
859 }
860
861 Data
862 Data::grad() const
863 {
864 if (isEmpty())
865 {
866 throw DataException("Error - operation not permitted on instances of DataEmpty.");
867 }
868 return gradOn(escript::function(*getDomain()));
869 }
870
871 int
872 Data::getDataPointSize() const
873 {
874 return m_data->getNoValues();
875 }
876
877 DataTypes::ValueType::size_type
878 Data::getLength() const
879 {
880 return m_data->getLength();
881 }
882
883 const
884 boost::python::numeric :: array
885 Data:: getValueOfDataPoint(int dataPointNo)
886 {
887 return boost::python::numeric::array(getValueOfDataPointAsTuple(dataPointNo));
888 }
889
890 const boost::python::object
891 Data::getValueOfDataPointAsTuple(int dataPointNo)
892 {
893 forceResolve();
894 if (getNumDataPointsPerSample()>0) {
895 int sampleNo = dataPointNo/getNumDataPointsPerSample();
896 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
897 //
898 // Check a valid sample number has been supplied
899 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
900 throw DataException("Error - Data::getValueOfDataPointAsTuple: invalid sampleNo.");
901 }
902
903 //
904 // Check a valid data point number has been supplied
905 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
906 throw DataException("Error - Data::getValueOfDataPointAsTuple: invalid dataPointNoInSample.");
907 }
908 // TODO: global error handling
909 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
910 return pointToTuple(getDataPointShape(),&(getDataAtOffsetRO(offset)));
911 }
912 else
913 {
914 // The old method would return an empty numArray of the given shape
915 // I'm going to throw an exception because if we have zero points per sample we have problems
916 throw DataException("Error - need at least 1 datapoint per sample.");
917 }
918
919 }
920
921
922 void
923 Data::setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object)
924 {
925 // this will throw if the value cannot be represented
926 setValueOfDataPointToArray(dataPointNo,py_object);
927 }
928
929 void
930 Data::setValueOfDataPointToArray(int dataPointNo, const boost::python::object& obj)
931 {
932 if (isProtected()) {
933 throw DataException("Error - attempt to update protected Data object.");
934 }
935 forceResolve();
936
937 WrappedArray w(obj);
938 //
939 // check rank
940 if (static_cast<unsigned int>(w.getRank())<getDataPointRank())
941 throw DataException("Rank of numarray does not match Data object rank");
942
943 //
944 // check shape of num_array
945 for (unsigned int i=0; i<getDataPointRank(); i++) {
946 if (w.getShape()[i]!=getDataPointShape()[i])
947 throw DataException("Shape of numarray does not match Data object rank");
948 }
949 //
950 // make sure data is expanded:
951 //
952 if (!isExpanded()) {
953 expand();
954 }
955 if (getNumDataPointsPerSample()>0) {
956 int sampleNo = dataPointNo/getNumDataPointsPerSample();
957 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
958 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,w);
959 } else {
960 m_data->copyToDataPoint(-1, 0,w);
961 }
962 }
963
964 void
965 Data::setValueOfDataPoint(int dataPointNo, const double value)
966 {
967 if (isProtected()) {
968 throw DataException("Error - attempt to update protected Data object.");
969 }
970 //
971 // make sure data is expanded:
972 forceResolve();
973 if (!isExpanded()) {
974 expand();
975 }
976 if (getNumDataPointsPerSample()>0) {
977 int sampleNo = dataPointNo/getNumDataPointsPerSample();
978 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
979 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,value);
980 } else {
981 m_data->copyToDataPoint(-1, 0,value);
982 }
983 }
984
985 const
986 boost::python::numeric::array
987 Data::getValueOfGlobalDataPoint(int procNo, int dataPointNo)
988 {
989 return boost::python::numeric::array(getValueOfGlobalDataPointAsTuple(procNo, dataPointNo));
990 }
991
992 const
993 boost::python::object
994 Data::getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo)
995 {
996 // This could be lazier than it is now
997 forceResolve();
998
999 // copy datapoint into a buffer
1000 // broadcast buffer to all nodes
1001 // convert buffer to tuple
1002 // return tuple
1003
1004 const DataTypes::ShapeType& dataPointShape = getDataPointShape();
1005 size_t length=DataTypes::noValues(dataPointShape);
1006
1007 // added for the MPI communication
1008 double *tmpData = new double[length];
1009
1010 // updated for the MPI case
1011 if( get_MPIRank()==procNo ){
1012 if (getNumDataPointsPerSample()>0) {
1013 int sampleNo = dataPointNo/getNumDataPointsPerSample();
1014 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1015 //
1016 // Check a valid sample number has been supplied
1017 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
1018 throw DataException("Error - Data::convertToNumArray: invalid sampleNo.");
1019 }
1020
1021 //
1022 // Check a valid data point number has been supplied
1023 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
1024 throw DataException("Error - Data::convertToNumArray: invalid dataPointNoInSample.");
1025 }
1026 // TODO: global error handling
1027 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
1028
1029 memcpy(tmpData,&(getDataAtOffsetRO(offset)),length*sizeof(double));
1030 }
1031 }
1032 #ifdef PASO_MPI
1033 // broadcast the data to all other processes
1034 MPI_Bcast( tmpData, length, MPI_DOUBLE, procNo, get_MPIComm() );
1035 #endif
1036
1037 boost::python::tuple t=pointToTuple(dataPointShape,tmpData);
1038 delete [] tmpData;
1039 //
1040 // return the loaded array
1041 return t;
1042
1043 }
1044
1045
1046 boost::python::object
1047 Data::integrateToTuple_const() const
1048 {
1049 if (isLazy())
1050 {
1051 throw DataException("Error - cannot integrate for constant lazy data.");
1052 }
1053 return integrateWorker();
1054 }
1055
1056 boost::python::object
1057 Data::integrateToTuple()
1058 {
1059 if (isLazy())
1060 {
1061 expand();
1062 }
1063 return integrateWorker();
1064
1065 }
1066
1067
1068 boost::python::object
1069 Data::integrate_const() const
1070 {
1071 if (isLazy())
1072 {
1073 throw DataException("Error - cannot integrate for constant lazy data.");
1074 }
1075 return boost::python::numeric::array(integrateWorker());
1076 }
1077
1078 boost::python::object
1079 Data::integrate()
1080 {
1081 if (isLazy())
1082 {
1083 expand();
1084 }
1085 return boost::python::numeric::array(integrateWorker());
1086 }
1087
1088
1089
1090 boost::python::object
1091 Data::integrateWorker() const
1092 {
1093 DataTypes::ShapeType shape = getDataPointShape();
1094 int dataPointSize = getDataPointSize();
1095
1096 //
1097 // calculate the integral values
1098 vector<double> integrals(dataPointSize);
1099 vector<double> integrals_local(dataPointSize);
1100 const AbstractContinuousDomain* dom=dynamic_cast<const AbstractContinuousDomain*>(getDomain().get());
1101 if (dom==0)
1102 {
1103 throw DataException("Can not integrate over non-continuous domains.");
1104 }
1105 #ifdef PASO_MPI
1106 dom->setToIntegrals(integrals_local,*this);
1107 // Global sum: use an array instead of a vector because elements of array are guaranteed to be contiguous in memory
1108 double *tmp = new double[dataPointSize];
1109 double *tmp_local = new double[dataPointSize];
1110 for (int i=0; i<dataPointSize; i++) { tmp_local[i] = integrals_local[i]; }
1111 MPI_Allreduce( &tmp_local[0], &tmp[0], dataPointSize, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD );
1112 for (int i=0; i<dataPointSize; i++) { integrals[i] = tmp[i]; }
1113 tuple result=pointToTuple(shape,tmp);
1114 delete[] tmp;
1115 delete[] tmp_local;
1116 #else
1117 dom->setToIntegrals(integrals,*this);
1118 /* double *tmp = new double[dataPointSize];
1119 for (int i=0; i<dataPointSize; i++) { tmp[i]=integrals[i]; }*/
1120 tuple result=pointToTuple(shape,integrals);
1121 // delete tmp;
1122 #endif
1123
1124
1125 return result;
1126 }
1127
1128 Data
1129 Data::sin() const
1130 {
1131 MAKELAZYOP(SIN)
1132 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sin);
1133 }
1134
1135 Data
1136 Data::cos() const
1137 {
1138 MAKELAZYOP(COS)
1139 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cos);
1140 }
1141
1142 Data
1143 Data::tan() const
1144 {
1145 MAKELAZYOP(TAN)
1146 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tan);
1147 }
1148
1149 Data
1150 Data::asin() const
1151 {
1152 MAKELAZYOP(ASIN)
1153 return C_TensorUnaryOperation<double (*)(double)>(*this, ::asin);
1154 }
1155
1156 Data
1157 Data::acos() const
1158 {
1159 MAKELAZYOP(ACOS)
1160 return C_TensorUnaryOperation<double (*)(double)>(*this, ::acos);
1161 }
1162
1163
1164 Data
1165 Data::atan() const
1166 {
1167 MAKELAZYOP(ATAN)
1168 return C_TensorUnaryOperation<double (*)(double)>(*this, ::atan);
1169 }
1170
1171 Data
1172 Data::sinh() const
1173 {
1174 MAKELAZYOP(SINH)
1175 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sinh);
1176 }
1177
1178 Data
1179 Data::cosh() const
1180 {
1181 MAKELAZYOP(COSH)
1182 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cosh);
1183 }
1184
1185 Data
1186 Data::tanh() const
1187 {
1188 MAKELAZYOP(TANH)
1189 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tanh);
1190 }
1191
1192
1193 Data
1194 Data::erf() const
1195 {
1196 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1197 throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");
1198 #else
1199 MAKELAZYOP(ERF)
1200 return C_TensorUnaryOperation(*this, ::erf);
1201 #endif
1202 }
1203
1204 Data
1205 Data::asinh() const
1206 {
1207 MAKELAZYOP(ASINH)
1208 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1209 return C_TensorUnaryOperation(*this, escript::asinh_substitute);
1210 #else
1211 return C_TensorUnaryOperation(*this, ::asinh);
1212 #endif
1213 }
1214
1215 Data
1216 Data::acosh() const
1217 {
1218 MAKELAZYOP(ACOSH)
1219 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1220 return C_TensorUnaryOperation(*this, escript::acosh_substitute);
1221 #else
1222 return C_TensorUnaryOperation(*this, ::acosh);
1223 #endif
1224 }
1225
1226 Data
1227 Data::atanh() const
1228 {
1229 MAKELAZYOP(ATANH)
1230 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1231 return C_TensorUnaryOperation(*this, escript::atanh_substitute);
1232 #else
1233 return C_TensorUnaryOperation(*this, ::atanh);
1234 #endif
1235 }
1236
1237 Data
1238 Data::log10() const
1239 {
1240 MAKELAZYOP(LOG10)
1241 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log10);
1242 }
1243
1244 Data
1245 Data::log() const
1246 {
1247 MAKELAZYOP(LOG)
1248 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log);
1249 }
1250
1251 Data
1252 Data::sign() const
1253 {
1254 MAKELAZYOP(SIGN)
1255 return C_TensorUnaryOperation(*this, escript::fsign);
1256 }
1257
1258 Data
1259 Data::abs() const
1260 {
1261 MAKELAZYOP(ABS)
1262 return C_TensorUnaryOperation<double (*)(double)>(*this, ::fabs);
1263 }
1264
1265 Data
1266 Data::neg() const
1267 {
1268 MAKELAZYOP(NEG)
1269 return C_TensorUnaryOperation(*this, negate<double>());
1270 }
1271
1272 Data
1273 Data::pos() const
1274 {
1275 // not doing lazy check here is deliberate.
1276 // since a deep copy of lazy data should be cheap, I'll just let it happen now
1277 Data result;
1278 // perform a deep copy
1279 result.copy(*this);
1280 return result;
1281 }
1282
1283 Data
1284 Data::exp() const
1285 {
1286 MAKELAZYOP(EXP)
1287 return C_TensorUnaryOperation<double (*)(double)>(*this, ::exp);
1288 }
1289
1290 Data
1291 Data::sqrt() const
1292 {
1293 MAKELAZYOP(SQRT)
1294 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sqrt);
1295 }
1296
1297 double
1298 Data::Lsup_const() const
1299 {
1300 if (isLazy())
1301 {
1302 throw DataException("Error - cannot compute Lsup for constant lazy data.");
1303 }
1304 return LsupWorker();
1305 }
1306
1307 double
1308 Data::Lsup()
1309 {
1310 if (isLazy())
1311 {
1312 resolve();
1313 }
1314 return LsupWorker();
1315 }
1316
1317 double
1318 Data::sup_const() const
1319 {
1320 if (isLazy())
1321 {
1322 throw DataException("Error - cannot compute sup for constant lazy data.");
1323 }
1324 return supWorker();
1325 }
1326
1327 double
1328 Data::sup()
1329 {
1330 if (isLazy())
1331 {
1332 resolve();
1333 }
1334 return supWorker();
1335 }
1336
1337 double
1338 Data::inf_const() const
1339 {
1340 if (isLazy())
1341 {
1342 throw DataException("Error - cannot compute inf for constant lazy data.");
1343 }
1344 return infWorker();
1345 }
1346
1347 double
1348 Data::inf()
1349 {
1350 if (isLazy())
1351 {
1352 resolve();
1353 }
1354 return infWorker();
1355 }
1356
1357 double
1358 Data::LsupWorker() const
1359 {
1360 double localValue;
1361 //
1362 // set the initial absolute maximum value to zero
1363
1364 AbsMax abs_max_func;
1365 localValue = algorithm(abs_max_func,0);
1366 #ifdef PASO_MPI
1367 double globalValue;
1368 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1369 return globalValue;
1370 #else
1371 return localValue;
1372 #endif
1373 }
1374
1375 double
1376 Data::supWorker() const
1377 {
1378 double localValue;
1379 //
1380 // set the initial maximum value to min possible double
1381 FMax fmax_func;
1382 localValue = algorithm(fmax_func,numeric_limits<double>::max()*-1);
1383 #ifdef PASO_MPI
1384 double globalValue;
1385 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1386 return globalValue;
1387 #else
1388 return localValue;
1389 #endif
1390 }
1391
1392 double
1393 Data::infWorker() const
1394 {
1395 double localValue;
1396 //
1397 // set the initial minimum value to max possible double
1398 FMin fmin_func;
1399 localValue = algorithm(fmin_func,numeric_limits<double>::max());
1400 #ifdef PASO_MPI
1401 double globalValue;
1402 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD );
1403 return globalValue;
1404 #else
1405 return localValue;
1406 #endif
1407 }
1408
1409 /* TODO */
1410 /* global reduction */
1411 Data
1412 Data::maxval() const
1413 {
1414 if (isLazy())
1415 {
1416 Data temp(*this); // to get around the fact that you can't resolve a const Data
1417 temp.resolve();
1418 return temp.maxval();
1419 }
1420 //
1421 // set the initial maximum value to min possible double
1422 FMax fmax_func;
1423 return dp_algorithm(fmax_func,numeric_limits<double>::max()*-1);
1424 }
1425
1426 Data
1427 Data::minval() const
1428 {
1429 if (isLazy())
1430 {
1431 Data temp(*this); // to get around the fact that you can't resolve a const Data
1432 temp.resolve();
1433 return temp.minval();
1434 }
1435 //
1436 // set the initial minimum value to max possible double
1437 FMin fmin_func;
1438 return dp_algorithm(fmin_func,numeric_limits<double>::max());
1439 }
1440
1441 Data
1442 Data::swapaxes(const int axis0, const int axis1) const
1443 {
1444 if (isLazy())
1445 {
1446 Data temp(*this);
1447 temp.resolve();
1448 return temp.swapaxes(axis0,axis1);
1449 }
1450 int axis0_tmp,axis1_tmp;
1451 DataTypes::ShapeType s=getDataPointShape();
1452 DataTypes::ShapeType ev_shape;
1453 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1454 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1455 int rank=getDataPointRank();
1456 if (rank<2) {
1457 throw DataException("Error - Data::swapaxes argument must have at least rank 2.");
1458 }
1459 if (axis0<0 || axis0>rank-1) {
1460 throw DataException("Error - Data::swapaxes: axis0 must be between 0 and rank-1=" + rank-1);
1461 }
1462 if (axis1<0 || axis1>rank-1) {
1463 throw DataException("Error - Data::swapaxes: axis1 must be between 0 and rank-1=" + rank-1);
1464 }
1465 if (axis0 == axis1) {
1466 throw DataException("Error - Data::swapaxes: axis indices must be different.");
1467 }
1468 if (axis0 > axis1) {
1469 axis0_tmp=axis1;
1470 axis1_tmp=axis0;
1471 } else {
1472 axis0_tmp=axis0;
1473 axis1_tmp=axis1;
1474 }
1475 for (int i=0; i<rank; i++) {
1476 if (i == axis0_tmp) {
1477 ev_shape.push_back(s[axis1_tmp]);
1478 } else if (i == axis1_tmp) {
1479 ev_shape.push_back(s[axis0_tmp]);
1480 } else {
1481 ev_shape.push_back(s[i]);
1482 }
1483 }
1484 Data ev(0.,ev_shape,getFunctionSpace());
1485 ev.typeMatchRight(*this);
1486 m_data->swapaxes(ev.m_data.get(), axis0_tmp, axis1_tmp);
1487 return ev;
1488
1489 }
1490
1491 Data
1492 Data::symmetric() const
1493 {
1494 // check input
1495 DataTypes::ShapeType s=getDataPointShape();
1496 if (getDataPointRank()==2) {
1497 if(s[0] != s[1])
1498 throw DataException("Error - Data::symmetric can only be calculated for rank 2 object with equal first and second dimension.");
1499 }
1500 else if (getDataPointRank()==4) {
1501 if(!(s[0] == s[2] && s[1] == s[3]))
1502 throw DataException("Error - Data::symmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1503 }
1504 else {
1505 throw DataException("Error - Data::symmetric can only be calculated for rank 2 or 4 object.");
1506 }
1507 MAKELAZYOP(SYM)
1508 Data ev(0.,getDataPointShape(),getFunctionSpace());
1509 ev.typeMatchRight(*this);
1510 m_data->symmetric(ev.m_data.get());
1511 return ev;
1512 }
1513
1514 Data
1515 Data::nonsymmetric() const
1516 {
1517 MAKELAZYOP(NSYM)
1518 // check input
1519 DataTypes::ShapeType s=getDataPointShape();
1520 if (getDataPointRank()==2) {
1521 if(s[0] != s[1])
1522 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 object with equal first and second dimension.");
1523 DataTypes::ShapeType ev_shape;
1524 ev_shape.push_back(s[0]);
1525 ev_shape.push_back(s[1]);
1526 Data ev(0.,ev_shape,getFunctionSpace());
1527 ev.typeMatchRight(*this);
1528 m_data->nonsymmetric(ev.m_data.get());
1529 return ev;
1530 }
1531 else if (getDataPointRank()==4) {
1532 if(!(s[0] == s[2] && s[1] == s[3]))
1533 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1534 DataTypes::ShapeType ev_shape;
1535 ev_shape.push_back(s[0]);
1536 ev_shape.push_back(s[1]);
1537 ev_shape.push_back(s[2]);
1538 ev_shape.push_back(s[3]);
1539 Data ev(0.,ev_shape,getFunctionSpace());
1540 ev.typeMatchRight(*this);
1541 m_data->nonsymmetric(ev.m_data.get());
1542 return ev;
1543 }
1544 else {
1545 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 or 4 object.");
1546 }
1547 }
1548
1549 Data
1550 Data::trace(int axis_offset) const
1551 {
1552 MAKELAZYOPOFF(TRACE,axis_offset)
1553 if ((axis_offset<0) || (axis_offset>getDataPointRank()))
1554 {
1555 throw DataException("Error - Data::trace, axis_offset must be between 0 and rank-2 inclusive.");
1556 }
1557 DataTypes::ShapeType s=getDataPointShape();
1558 if (getDataPointRank()==2) {
1559 DataTypes::ShapeType ev_shape;
1560 Data ev(0.,ev_shape,getFunctionSpace());
1561 ev.typeMatchRight(*this);
1562 m_data->trace(ev.m_data.get(), axis_offset);
1563 return ev;
1564 }
1565 if (getDataPointRank()==3) {
1566 DataTypes::ShapeType ev_shape;
1567 if (axis_offset==0) {
1568 int s2=s[2];
1569 ev_shape.push_back(s2);
1570 }
1571 else if (axis_offset==1) {
1572 int s0=s[0];
1573 ev_shape.push_back(s0);
1574 }
1575 Data ev(0.,ev_shape,getFunctionSpace());
1576 ev.typeMatchRight(*this);
1577 m_data->trace(ev.m_data.get(), axis_offset);
1578 return ev;
1579 }
1580 if (getDataPointRank()==4) {
1581 DataTypes::ShapeType ev_shape;
1582 if (axis_offset==0) {
1583 ev_shape.push_back(s[2]);
1584 ev_shape.push_back(s[3]);
1585 }
1586 else if (axis_offset==1) {
1587 ev_shape.push_back(s[0]);
1588 ev_shape.push_back(s[3]);
1589 }
1590 else if (axis_offset==2) {
1591 ev_shape.push_back(s[0]);
1592 ev_shape.push_back(s[1]);
1593 }
1594 Data ev(0.,ev_shape,getFunctionSpace());
1595 ev.typeMatchRight(*this);
1596 m_data->trace(ev.m_data.get(), axis_offset);
1597 return ev;
1598 }
1599 else {
1600 throw DataException("Error - Data::trace can only be calculated for rank 2, 3 or 4 object.");
1601 }
1602 }
1603
1604 Data
1605 Data::transpose(int axis_offset) const
1606 {
1607 MAKELAZYOPOFF(TRANS,axis_offset)
1608 DataTypes::ShapeType s=getDataPointShape();
1609 DataTypes::ShapeType ev_shape;
1610 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1611 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1612 int rank=getDataPointRank();
1613 if (axis_offset<0 || axis_offset>rank) {
1614 throw DataException("Error - Data::transpose must have 0 <= axis_offset <= rank=" + rank);
1615 }
1616 for (int i=0; i<rank; i++) {
1617 int index = (axis_offset+i)%rank;
1618 ev_shape.push_back(s[index]); // Append to new shape
1619 }
1620 Data ev(0.,ev_shape,getFunctionSpace());
1621 ev.typeMatchRight(*this);
1622 m_data->transpose(ev.m_data.get(), axis_offset);
1623 return ev;
1624 }
1625
1626 Data
1627 Data::eigenvalues() const
1628 {
1629 if (isLazy())
1630 {
1631 Data temp(*this); // to get around the fact that you can't resolve a const Data
1632 temp.resolve();
1633 return temp.eigenvalues();
1634 }
1635 // check input
1636 DataTypes::ShapeType s=getDataPointShape();
1637 if (getDataPointRank()!=2)
1638 throw DataException("Error - Data::eigenvalues can only be calculated for rank 2 object.");
1639 if(s[0] != s[1])
1640 throw DataException("Error - Data::eigenvalues can only be calculated for object with equal first and second dimension.");
1641 // create return
1642 DataTypes::ShapeType ev_shape(1,s[0]);
1643 Data ev(0.,ev_shape,getFunctionSpace());
1644 ev.typeMatchRight(*this);
1645 m_data->eigenvalues(ev.m_data.get());
1646 return ev;
1647 }
1648
1649 const boost::python::tuple
1650 Data::eigenvalues_and_eigenvectors(const double tol) const
1651 {
1652 if (isLazy())
1653 {
1654 Data temp(*this); // to get around the fact that you can't resolve a const Data
1655 temp.resolve();
1656 return temp.eigenvalues_and_eigenvectors(tol);
1657 }
1658 DataTypes::ShapeType s=getDataPointShape();
1659 if (getDataPointRank()!=2)
1660 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for rank 2 object.");
1661 if(s[0] != s[1])
1662 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for object with equal first and second dimension.");
1663 // create return
1664 DataTypes::ShapeType ev_shape(1,s[0]);
1665 Data ev(0.,ev_shape,getFunctionSpace());
1666 ev.typeMatchRight(*this);
1667 DataTypes::ShapeType V_shape(2,s[0]);
1668 Data V(0.,V_shape,getFunctionSpace());
1669 V.typeMatchRight(*this);
1670 m_data->eigenvalues_and_eigenvectors(ev.m_data.get(),V.m_data.get(),tol);
1671 return make_tuple(boost::python::object(ev),boost::python::object(V));
1672 }
1673
1674 const boost::python::tuple
1675 Data::minGlobalDataPoint() const
1676 {
1677 // NB: calc_minGlobalDataPoint( had to be split off from minGlobalDataPoint( as boost::make_tuple causes an
1678 // abort (for unknown reasons) if there are openmp directives with it in the
1679 // surrounding function
1680
1681 int DataPointNo;
1682 int ProcNo;
1683 calc_minGlobalDataPoint(ProcNo,DataPointNo);
1684 return make_tuple(ProcNo,DataPointNo);
1685 }
1686
1687 void
1688 Data::calc_minGlobalDataPoint(int& ProcNo,
1689 int& DataPointNo) const
1690 {
1691 if (isLazy())
1692 {
1693 Data temp(*this); // to get around the fact that you can't resolve a const Data
1694 temp.resolve();
1695 return temp.calc_minGlobalDataPoint(ProcNo,DataPointNo);
1696 }
1697 int i,j;
1698 int lowi=0,lowj=0;
1699 double min=numeric_limits<double>::max();
1700
1701 Data temp=minval();
1702
1703 int numSamples=temp.getNumSamples();
1704 int numDPPSample=temp.getNumDataPointsPerSample();
1705
1706 double next,local_min;
1707 int local_lowi=0,local_lowj=0;
1708
1709 #pragma omp parallel firstprivate(local_lowi,local_lowj) private(next,local_min)
1710 {
1711 local_min=min;
1712 #pragma omp for private(i,j) schedule(static)
1713 for (i=0; i<numSamples; i++) {
1714 for (j=0; j<numDPPSample; j++) {
1715 next=temp.getDataAtOffsetRO(temp.getDataOffset(i,j));
1716 if (next<local_min) {
1717 local_min=next;
1718 local_lowi=i;
1719 local_lowj=j;
1720 }
1721 }
1722 }
1723 #pragma omp critical
1724 if (local_min<min) {
1725 min=local_min;
1726 lowi=local_lowi;
1727 lowj=local_lowj;
1728 }
1729 }
1730
1731 #ifdef PASO_MPI
1732 // determine the processor on which the minimum occurs
1733 next = temp.getDataPointRO(lowi,lowj);
1734 int lowProc = 0;
1735 double *globalMins = new double[get_MPISize()+1];
1736 int error;
1737 error = MPI_Gather ( &next, 1, MPI_DOUBLE, globalMins, 1, MPI_DOUBLE, 0, get_MPIComm() );
1738
1739 if( get_MPIRank()==0 ){
1740 next = globalMins[lowProc];
1741 for( i=1; i<get_MPISize(); i++ )
1742 if( next>globalMins[i] ){
1743 lowProc = i;
1744 next = globalMins[i];
1745 }
1746 }
1747 MPI_Bcast( &lowProc, 1, MPI_DOUBLE, 0, get_MPIComm() );
1748
1749 delete [] globalMins;
1750 ProcNo = lowProc;
1751 #else
1752 ProcNo = 0;
1753 #endif
1754 DataPointNo = lowj + lowi * numDPPSample;
1755 }
1756
1757 void
1758 Data::saveDX(std::string fileName) const
1759 {
1760 if (isEmpty())
1761 {
1762 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1763 }
1764 if (isLazy())
1765 {
1766 Data temp(*this); // to get around the fact that you can't resolve a const Data
1767 temp.resolve();
1768 temp.saveDX(fileName);
1769 return;
1770 }
1771 boost::python::dict args;
1772 args["data"]=boost::python::object(this);
1773 getDomain()->saveDX(fileName,args);
1774 return;
1775 }
1776
1777 void
1778 Data::saveVTK(std::string fileName) const
1779 {
1780 if (isEmpty())
1781 {
1782 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1783 }
1784 if (isLazy())
1785 {
1786 Data temp(*this); // to get around the fact that you can't resolve a const Data
1787 temp.resolve();
1788 temp.saveVTK(fileName);
1789 return;
1790 }
1791 boost::python::dict args;
1792 args["data"]=boost::python::object(this);
1793 getDomain()->saveVTK(fileName,args);
1794 return;
1795 }
1796
1797 Data&
1798 Data::operator+=(const Data& right)
1799 {
1800 if (isProtected()) {
1801 throw DataException("Error - attempt to update protected Data object.");
1802 }
1803 MAKELAZYBINSELF(right,ADD) // for lazy + is equivalent to +=
1804 exclusiveWrite(); // Since Lazy data does not modify its leaves we only need to worry here
1805 binaryOp(right,plus<double>());
1806 return (*this);
1807 }
1808
1809 Data&
1810 Data::operator+=(const boost::python::object& right)
1811 {
1812 if (isProtected()) {
1813 throw DataException("Error - attempt to update protected Data object.");
1814 }
1815 Data tmp(right,getFunctionSpace(),false);
1816 (*this)+=tmp;
1817 return *this;
1818 }
1819
1820 // Hmmm, operator= makes a deep copy but the copy constructor does not?
1821 Data&
1822 Data::operator=(const Data& other)
1823 {
1824 #if defined ASSIGNMENT_MEANS_DEEPCOPY
1825 // This should not be used.
1826 // Just leaving this here until I have completed transition
1827 copy(other);
1828 #else
1829 m_protected=false; // since any changes should be caught by exclusiveWrite();
1830 // m_data=other.m_data;
1831 set_m_data(other.m_data);
1832 #endif
1833 return (*this);
1834 }
1835
1836 Data&
1837 Data::operator-=(const Data& right)
1838 {
1839 if (isProtected()) {
1840 throw DataException("Error - attempt to update protected Data object.");
1841 }
1842 MAKELAZYBINSELF(right,SUB)
1843 exclusiveWrite();
1844 binaryOp(right,minus<double>());
1845 return (*this);
1846 }
1847
1848 Data&
1849 Data::operator-=(const boost::python::object& right)
1850 {
1851 if (isProtected()) {
1852 throw DataException("Error - attempt to update protected Data object.");
1853 }
1854 Data tmp(right,getFunctionSpace(),false);
1855 (*this)-=tmp;
1856 return (*this);
1857 }
1858
1859 Data&
1860 Data::operator*=(const Data& right)
1861 {
1862 if (isProtected()) {
1863 throw DataException("Error - attempt to update protected Data object.");
1864 }
1865 MAKELAZYBINSELF(right,MUL)
1866 exclusiveWrite();
1867 binaryOp(right,multiplies<double>());
1868 return (*this);
1869 }
1870
1871 Data&
1872 Data::operator*=(const boost::python::object& right)
1873 {
1874 if (isProtected()) {
1875 throw DataException("Error - attempt to update protected Data object.");
1876 }
1877 Data tmp(right,getFunctionSpace(),false);
1878 (*this)*=tmp;
1879 return (*this);
1880 }
1881
1882 Data&
1883 Data::operator/=(const Data& right)
1884 {
1885 if (isProtected()) {
1886 throw DataException("Error - attempt to update protected Data object.");
1887 }
1888 MAKELAZYBINSELF(right,DIV)
1889 exclusiveWrite();
1890 binaryOp(right,divides<double>());
1891 return (*this);
1892 }
1893
1894 Data&
1895 Data::operator/=(const boost::python::object& right)
1896 {
1897 if (isProtected()) {
1898 throw DataException("Error - attempt to update protected Data object.");
1899 }
1900 Data tmp(right,getFunctionSpace(),false);
1901 (*this)/=tmp;
1902 return (*this);
1903 }
1904
1905 Data
1906 Data::rpowO(const boost::python::object& left) const
1907 {
1908 Data left_d(left,*this);
1909 return left_d.powD(*this);
1910 }
1911
1912 Data
1913 Data::powO(const boost::python::object& right) const
1914 {
1915 Data tmp(right,getFunctionSpace(),false);
1916 return powD(tmp);
1917 }
1918
1919 Data
1920 Data::powD(const Data& right) const
1921 {
1922 MAKELAZYBIN(right,POW)
1923 return C_TensorBinaryOperation<double (*)(double, double)>(*this, right, ::pow);
1924 }
1925
1926 //
1927 // NOTE: It is essential to specify the namespace this operator belongs to
1928 Data
1929 escript::operator+(const Data& left, const Data& right)
1930 {
1931 MAKELAZYBIN2(left,right,ADD)
1932 return C_TensorBinaryOperation(left, right, plus<double>());
1933 }
1934
1935 //
1936 // NOTE: It is essential to specify the namespace this operator belongs to
1937 Data
1938 escript::operator-(const Data& left, const Data& right)
1939 {
1940 MAKELAZYBIN2(left,right,SUB)
1941 return C_TensorBinaryOperation(left, right, minus<double>());
1942 }
1943
1944 //
1945 // NOTE: It is essential to specify the namespace this operator belongs to
1946 Data
1947 escript::operator*(const Data& left, const Data& right)
1948 {
1949 MAKELAZYBIN2(left,right,MUL)
1950 return C_TensorBinaryOperation(left, right, multiplies<double>());
1951 }
1952
1953 //
1954 // NOTE: It is essential to specify the namespace this operator belongs to
1955 Data
1956 escript::operator/(const Data& left, const Data& right)
1957 {
1958 MAKELAZYBIN2(left,right,DIV)
1959 return C_TensorBinaryOperation(left, right, divides<double>());
1960 }
1961
1962 //
1963 // NOTE: It is essential to specify the namespace this operator belongs to
1964 Data
1965 escript::operator+(const Data& left, const boost::python::object& right)
1966 {
1967 Data tmp(right,left.getFunctionSpace(),false);
1968 MAKELAZYBIN2(left,tmp,ADD)
1969 return left+tmp;
1970 }
1971
1972 //
1973 // NOTE: It is essential to specify the namespace this operator belongs to
1974 Data
1975 escript::operator-(const Data& left, const boost::python::object& right)
1976 {
1977 Data tmp(right,left.getFunctionSpace(),false);
1978 MAKELAZYBIN2(left,tmp,SUB)
1979 return left-tmp;
1980 }
1981
1982 //
1983 // NOTE: It is essential to specify the namespace this operator belongs to
1984 Data
1985 escript::operator*(const Data& left, const boost::python::object& right)
1986 {
1987 Data tmp(right,left.getFunctionSpace(),false);
1988 MAKELAZYBIN2(left,tmp,MUL)
1989 return left*tmp;
1990 }
1991
1992 //
1993 // NOTE: It is essential to specify the namespace this operator belongs to
1994 Data
1995 escript::operator/(const Data& left, const boost::python::object& right)
1996 {
1997 Data tmp(right,left.getFunctionSpace(),false);
1998 MAKELAZYBIN2(left,tmp,DIV)
1999 return left/tmp;
2000 }
2001
2002 //
2003 // NOTE: It is essential to specify the namespace this operator belongs to
2004 Data
2005 escript::operator+(const boost::python::object& left, const Data& right)
2006 {
2007 Data tmp(left,right.getFunctionSpace(),false);
2008 MAKELAZYBIN2(tmp,right,ADD)
2009 return tmp+right;
2010 }
2011
2012 //
2013 // NOTE: It is essential to specify the namespace this operator belongs to
2014 Data
2015 escript::operator-(const boost::python::object& left, const Data& right)
2016 {
2017 Data tmp(left,right.getFunctionSpace(),false);
2018 MAKELAZYBIN2(tmp,right,SUB)
2019 return tmp-right;
2020 }
2021
2022 //
2023 // NOTE: It is essential to specify the namespace this operator belongs to
2024 Data
2025 escript::operator*(const boost::python::object& left, const Data& right)
2026 {
2027 Data tmp(left,right.getFunctionSpace(),false);
2028 MAKELAZYBIN2(tmp,right,MUL)
2029 return tmp*right;
2030 }
2031
2032 //
2033 // NOTE: It is essential to specify the namespace this operator belongs to
2034 Data
2035 escript::operator/(const boost::python::object& left, const Data& right)
2036 {
2037 Data tmp(left,right.getFunctionSpace(),false);
2038 MAKELAZYBIN2(tmp,right,DIV)
2039 return tmp/right;
2040 }
2041
2042
2043 /* TODO */
2044 /* global reduction */
2045 Data
2046 Data::getItem(const boost::python::object& key) const
2047 {
2048
2049 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2050
2051 if (slice_region.size()!=getDataPointRank()) {
2052 throw DataException("Error - slice size does not match Data rank.");
2053 }
2054
2055 return getSlice(slice_region);
2056 }
2057
2058 /* TODO */
2059 /* global reduction */
2060 Data
2061 Data::getSlice(const DataTypes::RegionType& region) const
2062 {
2063 return Data(*this,region);
2064 }
2065
2066 /* TODO */
2067 /* global reduction */
2068 void
2069 Data::setItemO(const boost::python::object& key,
2070 const boost::python::object& value)
2071 {
2072 Data tempData(value,getFunctionSpace());
2073 setItemD(key,tempData);
2074 }
2075
2076 void
2077 Data::setItemD(const boost::python::object& key,
2078 const Data& value)
2079 {
2080 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2081 if (slice_region.size()!=getDataPointRank()) {
2082 throw DataException("Error - slice size does not match Data rank.");
2083 }
2084 exclusiveWrite();
2085 if (getFunctionSpace()!=value.getFunctionSpace()) {
2086 setSlice(Data(value,getFunctionSpace()),slice_region);
2087 } else {
2088 setSlice(value,slice_region);
2089 }
2090 }
2091
2092 void
2093 Data::setSlice(const Data& value,
2094 const DataTypes::RegionType& region)
2095 {
2096 if (isProtected()) {
2097 throw DataException("Error - attempt to update protected Data object.");
2098 }
2099 forceResolve();
2100 /* if (isLazy())
2101 {
2102 throw DataException("Error - setSlice not permitted on lazy data.");
2103 }*/
2104 exclusiveWrite(); // In case someone finds a way to call this without going through setItemD
2105 Data tempValue(value);
2106 typeMatchLeft(tempValue);
2107 typeMatchRight(tempValue);
2108 getReady()->setSlice(tempValue.m_data.get(),region);
2109 }
2110
2111 void
2112 Data::typeMatchLeft(Data& right) const
2113 {
2114 if (right.isLazy() && !isLazy())
2115 {
2116 right.resolve();
2117 }
2118 if (isExpanded()){
2119 right.expand();
2120 } else if (isTagged()) {
2121 if (right.isConstant()) {
2122 right.tag();
2123 }
2124 }
2125 }
2126
2127 void
2128 Data::typeMatchRight(const Data& right)
2129 {
2130 if (isLazy() && !right.isLazy())
2131 {
2132 resolve();
2133 }
2134 if (isTagged()) {
2135 if (right.isExpanded()) {
2136 expand();
2137 }
2138 } else if (isConstant()) {
2139 if (right.isExpanded()) {
2140 expand();
2141 } else if (right.isTagged()) {
2142 tag();
2143 }
2144 }
2145 }
2146
2147 void
2148 Data::setTaggedValueByName(std::string name,
2149 const boost::python::object& value)
2150 {
2151 if (getFunctionSpace().getDomain()->isValidTagName(name)) {
2152 forceResolve();
2153 exclusiveWrite();
2154 int tagKey=getFunctionSpace().getDomain()->getTag(name);
2155 setTaggedValue(tagKey,value);
2156 }
2157 }
2158
2159 void
2160 Data::setTaggedValue(int tagKey,
2161 const boost::python::object& value)
2162 {
2163 if (isProtected()) {
2164 throw DataException("Error - attempt to update protected Data object.");
2165 }
2166 //
2167 // Ensure underlying data object is of type DataTagged
2168 forceResolve();
2169 exclusiveWrite();
2170 if (isConstant()) tag();
2171 WrappedArray w(value);
2172
2173 DataVector temp_data2;
2174 temp_data2.copyFromArray(w,1);
2175
2176 m_data->setTaggedValue(tagKey,w.getShape(), temp_data2);
2177 }
2178
2179
2180 void
2181 Data::setTaggedValueFromCPP(int tagKey,
2182 const DataTypes::ShapeType& pointshape,
2183 const DataTypes::ValueType& value,
2184 int dataOffset)
2185 {
2186 if (isProtected()) {
2187 throw DataException("Error - attempt to update protected Data object.");
2188 }
2189 //
2190 // Ensure underlying data object is of type DataTagged
2191 forceResolve();
2192 if (isConstant()) tag();
2193 exclusiveWrite();
2194 //
2195 // Call DataAbstract::setTaggedValue
2196 m_data->setTaggedValue(tagKey,pointshape, value, dataOffset);
2197 }
2198
2199 int
2200 Data::getTagNumber(int dpno)
2201 {
2202 if (isEmpty())
2203 {
2204 throw DataException("Error - operation not permitted on instances of DataEmpty.");
2205 }
2206 return getFunctionSpace().getTagFromDataPointNo(dpno);
2207 }
2208
2209
2210 ostream& escript::operator<<(ostream& o, const Data& data)
2211 {
2212 o << data.toString();
2213 return o;
2214 }
2215
2216 Data
2217 escript::C_GeneralTensorProduct(Data& arg_0,
2218 Data& arg_1,
2219 int axis_offset,
2220 int transpose)
2221 {
2222 // General tensor product: res(SL x SR) = arg_0(SL x SM) * arg_1(SM x SR)
2223 // SM is the product of the last axis_offset entries in arg_0.getShape().
2224
2225 // deal with any lazy data
2226 // if (arg_0.isLazy()) {arg_0.resolve();}
2227 // if (arg_1.isLazy()) {arg_1.resolve();}
2228 if (arg_0.isLazy() || arg_1.isLazy() || (AUTOLAZYON && (arg_0.isExpanded() || arg_1.isExpanded())))
2229 {
2230 DataLazy* c=new DataLazy(arg_0.borrowDataPtr(), arg_1.borrowDataPtr(), PROD, axis_offset,transpose);
2231 return Data(c);
2232 }
2233
2234 // Interpolate if necessary and find an appropriate function space
2235 Data arg_0_Z, arg_1_Z;
2236 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2237 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2238 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2239 arg_1_Z = Data(arg_1);
2240 }
2241 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2242 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2243 arg_0_Z =Data(arg_0);
2244 }
2245 else {
2246 throw DataException("Error - C_GeneralTensorProduct: arguments have incompatible function spaces.");
2247 }
2248 } else {
2249 arg_0_Z = Data(arg_0);
2250 arg_1_Z = Data(arg_1);
2251 }
2252 // Get rank and shape of inputs
2253 int rank0 = arg_0_Z.getDataPointRank();
2254 int rank1 = arg_1_Z.getDataPointRank();
2255 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
2256 const DataTypes::ShapeType& shape1 = arg_1_Z.getDataPointShape();
2257
2258 // Prepare for the loops of the product and verify compatibility of shapes
2259 int start0=0, start1=0;
2260 if (transpose == 0) {}
2261 else if (transpose == 1) { start0 = axis_offset; }
2262 else if (transpose == 2) { start1 = rank1-axis_offset; }
2263 else { throw DataException("C_GeneralTensorProduct: Error - transpose should be 0, 1 or 2"); }
2264
2265
2266 // Adjust the shapes for transpose
2267 DataTypes::ShapeType tmpShape0(rank0); // pre-sizing the vectors rather
2268 DataTypes::ShapeType tmpShape1(rank1); // than using push_back
2269 for (int i=0; i<rank0; i++) { tmpShape0[i]=shape0[(i+start0)%rank0]; }
2270 for (int i=0; i<rank1; i++) { tmpShape1[i]=shape1[(i+start1)%rank1]; }
2271
2272 #if 0
2273 // For debugging: show shape after transpose
2274 char tmp[100];
2275 std::string shapeStr;
2276 shapeStr = "(";
2277 for (int i=0; i<rank0; i++) { sprintf(tmp, "%d,", tmpShape0[i]); shapeStr += tmp; }
2278 shapeStr += ")";
2279 cout << "C_GeneralTensorProduct: Shape of arg0 is " << shapeStr << endl;
2280 shapeStr = "(";
2281 for (int i=0; i<rank1; i++) { sprintf(tmp, "%d,", tmpShape1[i]); shapeStr += tmp; }
2282 shapeStr += ")";
2283 cout << "C_GeneralTensorProduct: Shape of arg1 is " << shapeStr << endl;
2284 #endif
2285
2286 // Prepare for the loops of the product
2287 int SL=1, SM=1, SR=1;
2288 for (int i=0; i<rank0-axis_offset; i++) {
2289 SL *= tmpShape0[i];
2290 }
2291 for (int i=rank0-axis_offset; i<rank0; i++) {
2292 if (tmpShape0[i] != tmpShape1[i-(rank0-axis_offset)]) {
2293 throw DataException("C_GeneralTensorProduct: Error - incompatible shapes");
2294 }
2295 SM *= tmpShape0[i];
2296 }
2297 for (int i=axis_offset; i<rank1; i++) {
2298 SR *= tmpShape1[i];
2299 }
2300
2301 // Define the shape of the output (rank of shape is the sum of the loop ranges below)
2302 DataTypes::ShapeType shape2(rank0+rank1-2*axis_offset);
2303 { // block to limit the scope of out_index
2304 int out_index=0;
2305 for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z
2306 for (int i=axis_offset; i<rank1; i++, ++out_index) { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z
2307 }
2308
2309 if (shape2.size()>ESCRIPT_MAX_DATA_RANK)
2310 {
2311 ostringstream os;
2312 os << "C_GeneralTensorProduct: Error - Attempt to create a rank " << shape2.size() << " object. The maximum rank is " << ESCRIPT_MAX_DATA_RANK << ".";
2313 throw DataException(os.str());
2314 }
2315
2316 // Declare output Data object
2317 Data res;
2318
2319 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2320 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataConstant output
2321 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2322 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2323 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2324 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2325 }
2326 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2327
2328 // Prepare the DataConstant input
2329 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2330 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2331
2332 // Borrow DataTagged input from Data object
2333 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2334 if (tmp_1==0) { throw DataException("GTP_1 Programming error - casting to DataTagged."); }
2335
2336 // Prepare a DataTagged output 2
2337 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataTagged output
2338 res.tag();
2339 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2340 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2341
2342 // Prepare offset into DataConstant
2343 int offset_0 = tmp_0->getPointOffset(0,0);
2344 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2345
2346 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2347 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2348
2349 // Compute an MVP for the default
2350 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2351 // Compute an MVP for each tag
2352 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2353 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2354 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2355 tmp_2->addTag(i->first);
2356
2357 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2358 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2359
2360 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2361 }
2362
2363 }
2364 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2365
2366 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2367 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2368 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2369 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2370 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2371 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2372 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2373 int sampleNo_1,dataPointNo_1;
2374 int numSamples_1 = arg_1_Z.getNumSamples();
2375 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2376 int offset_0 = tmp_0->getPointOffset(0,0);
2377 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2378 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2379 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2380 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2381 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2382 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2383 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2384 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2385 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2386 }
2387 }
2388
2389 }
2390 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2391
2392 // Borrow DataTagged input from Data object
2393 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2394 if (tmp_0==0) { throw DataException("GTP_0 Programming error - casting to DataTagged."); }
2395
2396 // Prepare the DataConstant input
2397 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2398 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2399
2400 // Prepare a DataTagged output 2
2401 res = Data(0.0, shape2, arg_0_Z.getFunctionSpace()); // DataTagged output
2402 res.tag();
2403 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2404 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2405
2406 // Prepare offset into DataConstant
2407 int offset_1 = tmp_1->getPointOffset(0,0);
2408 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2409 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2410 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2411
2412 // Compute an MVP for the default
2413 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2414 // Compute an MVP for each tag
2415 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2416 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2417 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2418
2419 tmp_2->addTag(i->first);
2420 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2421 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2422 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2423 }
2424
2425 }
2426 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2427
2428 // Borrow DataTagged input from Data object
2429 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2430 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2431
2432 // Borrow DataTagged input from Data object
2433 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2434 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2435
2436 // Prepare a DataTagged output 2
2437 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace());
2438 res.tag(); // DataTagged output
2439 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2440 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2441
2442 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2443 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2444 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2445
2446 // Compute an MVP for the default
2447 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2448 // Merge the tags
2449 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2450 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2451 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2452 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2453 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2454 }
2455 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2456 tmp_2->addTag(i->first);
2457 }
2458 // Compute an MVP for each tag
2459 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2460 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2461 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2462 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2463 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2464
2465 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2466 }
2467
2468 }
2469 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2470
2471 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2472 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2473 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2474 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2475 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2476 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2477 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2478 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2479 int sampleNo_0,dataPointNo_0;
2480 int numSamples_0 = arg_0_Z.getNumSamples();
2481 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2482 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2483 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2484 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2485 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2486 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2487 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2488 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2489 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2490 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2491 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2492 }
2493 }
2494
2495 }
2496 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2497
2498 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2499 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2500 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2501 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2502 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2503 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2504 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2505 int sampleNo_0,dataPointNo_0;
2506 int numSamples_0 = arg_0_Z.getNumSamples();
2507 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2508 int offset_1 = tmp_1->getPointOffset(0,0);
2509 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2510 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2511 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2512 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2513 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2514 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2515 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2516 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2517 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2518 }
2519 }
2520
2521
2522 }
2523 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2524
2525 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2526 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2527 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2528 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2529 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2530 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2531 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2532 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2533 int sampleNo_0,dataPointNo_0;
2534 int numSamples_0 = arg_0_Z.getNumSamples();
2535 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2536 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2537 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2538 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2539 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2540 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2541 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2542 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2543 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2544 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2545 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2546 }
2547 }
2548
2549 }
2550 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2551
2552 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2553 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2554 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2555 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2556 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2557 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2558 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2559 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2560 int sampleNo_0,dataPointNo_0;
2561 int numSamples_0 = arg_0_Z.getNumSamples();
2562 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2563 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2564 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2565 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2566 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2567 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2568 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2569 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2570 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2571 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2572 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2573 }
2574 }
2575
2576 }
2577 else {
2578 throw DataException("Error - C_GeneralTensorProduct: unknown combination of inputs");
2579 }
2580
2581 return res;
2582 }
2583
2584 DataAbstract*
2585 Data::borrowData() const
2586 {
2587 return m_data.get();
2588 }
2589
2590 // Not all that happy about returning a non-const from a const
2591 DataAbstract_ptr
2592 Data::borrowDataPtr() const
2593 {
2594 return m_data;
2595 }
2596
2597 // Not all that happy about returning a non-const from a const
2598 DataReady_ptr
2599 Data::borrowReadyPtr() const
2600 {
2601 DataReady_ptr dr=dynamic_pointer_cast<DataReady>(m_data);
2602 EsysAssert((dr!=0), "Error - casting to DataReady.");
2603 return dr;
2604 }
2605
2606 std::string
2607 Data::toString() const
2608 {
2609 if (!m_data->isEmpty() &&
2610 !m_data->isLazy() &&
2611 getLength()>escriptParams.getInt("TOO_MANY_LINES"))
2612 {
2613 stringstream temp;
2614 temp << "Summary: inf="<< inf_const() << " sup=" << sup_const() << " data points=" << getNumDataPoints();
2615 return temp.str();
2616 }
2617 return m_data->toString();
2618 }
2619
2620
2621 // This method is not thread-safe
2622 DataTypes::ValueType::reference
2623 Data::getDataAtOffsetRW(DataTypes::ValueType::size_type i)
2624 {
2625 exclusiveWrite();
2626 return getReady()->getDataAtOffsetRW(i);
2627 }
2628
2629 // This method is not thread-safe
2630 DataTypes::ValueType::const_reference
2631 Data::getDataAtOffsetRO(DataTypes::ValueType::size_type i)
2632 {
2633 forceResolve();
2634 return getReady()->getDataAtOffsetRO(i);
2635 }
2636
2637
2638 // DataTypes::ValueType::const_reference
2639 // Data::getDataAtOffsetRO(DataTypes::ValueType::size_type i) const
2640 // {
2641 // if (isLazy())
2642 // {
2643 // throw DataException("Programmer error - getDataAtOffsetRO() not permitted on Lazy Data (object is const which prevents resolving).");
2644 // }
2645 // return getReady()->getDataAtOffsetRO(i);
2646 // }
2647
2648
2649 DataTypes::ValueType::const_reference
2650 Data::getDataPointRO(int sampleNo, int dataPointNo)
2651 {
2652 forceResolve();
2653 if (!isReady())
2654 {
2655 throw DataException("Programmer error -getDataPointRO() not permitted on Lazy Data.");
2656 }
2657 else
2658 {
2659 const DataReady* dr=getReady();
2660 return dr->getDataAtOffsetRO(dr->getPointOffset(sampleNo, dataPointNo));
2661 }
2662 }
2663
2664
2665
2666
2667 DataTypes::ValueType::reference
2668 Data::getDataPointRW(int sampleNo, int dataPointNo)
2669 {
2670 forceResolve();
2671 if (!isReady())
2672 {
2673 throw DataException("Programmer error - getDataPointRW() not permitted on Lazy Data.");
2674 }
2675 else
2676 {
2677 exclusiveWrite();
2678 DataReady* dr=getReady();
2679 return dr->getDataAtOffsetRW(dr->getPointOffset(sampleNo, dataPointNo));
2680 }
2681 }
2682
2683 DataTypes::ValueType*
2684 Data::allocSampleBuffer() const
2685 {
2686 if (isLazy())
2687 {
2688 return new DataTypes::ValueType(getSampleBufferSize());
2689 }
2690 else
2691 {
2692 return NULL;
2693 }
2694 }
2695
2696 void
2697 Data::freeSampleBuffer(DataTypes::ValueType* buffer)
2698 {
2699 if (buffer!=0)
2700 {
2701 delete buffer;
2702 }
2703 }
2704
2705
2706 /* Member functions specific to the MPI implementation */
2707
2708 void
2709 Data::print()
2710 {
2711 int i,j;
2712
2713 printf( "Data is %dX%d\n", getNumSamples(), getNumDataPointsPerSample() );
2714 for( i=0; i<getNumSamples(); i++ )
2715 {
2716 printf( "[%6d]", i );
2717 for( j=0; j<getNumDataPointsPerSample(); j++ )
2718 printf( "\t%10.7g", (getSampleDataRW(i))[j] ); // doesn't really need RW access
2719 printf( "\n" );
2720 }
2721 }
2722 void
2723 Data::dump(const std::string fileName) const
2724 {
2725 try
2726 {
2727 if (isLazy())
2728 {
2729 Data temp(*this); // this is to get a non-const object which we can resolve
2730 temp.resolve();
2731 temp.dump(fileName);
2732 }
2733 else
2734 {
2735 return m_data->dump(fileName);
2736 }
2737 }
2738 catch (std::exception& e)
2739 {
2740 cout << e.what() << endl;
2741 }
2742 }
2743
2744 int
2745 Data::get_MPISize() const
2746 {
2747 int size;
2748 #ifdef PASO_MPI
2749 int error;
2750 error = MPI_Comm_size( get_MPIComm(), &size );
2751 #else
2752 size = 1;
2753 #endif
2754 return size;
2755 }
2756
2757 int
2758 Data::get_MPIRank() const
2759 {
2760 int rank;
2761 #ifdef PASO_MPI
2762 int error;
2763 error = MPI_Comm_rank( get_MPIComm(), &rank );
2764 #else
2765 rank = 0;
2766 #endif
2767 return rank;
2768 }
2769
2770 MPI_Comm
2771 Data::get_MPIComm() const
2772 {
2773 #ifdef PASO_MPI
2774 return MPI_COMM_WORLD;
2775 #else
2776 return -1;
2777 #endif
2778 }
2779
2780

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26