/[escript]/branches/schroedinger/escript/src/Data.cpp
ViewVC logotype

Contents of /branches/schroedinger/escript/src/Data.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1886 - (show annotations)
Wed Oct 15 01:34:18 2008 UTC (13 years ago) by jfenwick
File size: 76522 byte(s)
Branch commit.
Added unary ops up to pos.
toString now prints expression.
Added inlines to UnaryFuncs.h.

Still only supporting DataExpanded.

1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2008 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 #include "Data.h"
16
17 #include "DataExpanded.h"
18 #include "DataConstant.h"
19 #include "DataTagged.h"
20 #include "DataEmpty.h"
21 #include "DataLazy.h"
22 #include "FunctionSpaceFactory.h"
23 #include "AbstractContinuousDomain.h"
24 #include "UnaryFuncs.h"
25 #include "FunctionSpaceException.h"
26
27 extern "C" {
28 #include "escript/blocktimer.h"
29 }
30
31 #include <fstream>
32 #include <algorithm>
33 #include <vector>
34 #include <functional>
35
36 #include <boost/python/dict.hpp>
37 #include <boost/python/extract.hpp>
38 #include <boost/python/long.hpp>
39
40 using namespace std;
41 using namespace boost::python;
42 using namespace boost;
43 using namespace escript;
44
45 Data::Data()
46 {
47 //
48 // Default data is type DataEmpty
49 DataAbstract* temp=new DataEmpty();
50 m_data=temp->getPtr();
51 m_protected=false;
52 }
53
54 Data::Data(double value,
55 const tuple& shape,
56 const FunctionSpace& what,
57 bool expanded)
58 {
59 DataTypes::ShapeType dataPointShape;
60 for (int i = 0; i < shape.attr("__len__")(); ++i) {
61 dataPointShape.push_back(extract<const int>(shape[i]));
62 }
63
64 int len = DataTypes::noValues(dataPointShape);
65 DataVector temp_data(len,value,len);
66 initialise(temp_data, dataPointShape, what, expanded);
67 m_protected=false;
68 }
69
70 Data::Data(double value,
71 const DataTypes::ShapeType& dataPointShape,
72 const FunctionSpace& what,
73 bool expanded)
74 {
75 int len = DataTypes::noValues(dataPointShape);
76
77 DataVector temp_data(len,value,len);
78 // DataArrayView temp_dataView(temp_data, dataPointShape);
79
80 // initialise(temp_dataView, what, expanded);
81 initialise(temp_data, dataPointShape, what, expanded);
82
83 m_protected=false;
84 }
85
86 Data::Data(const Data& inData)
87 {
88 m_data=inData.m_data;
89 m_protected=inData.isProtected();
90 }
91
92
93 Data::Data(const Data& inData,
94 const DataTypes::RegionType& region)
95 {
96 //
97 // Create Data which is a slice of another Data
98 DataAbstract* tmp = inData.m_data->getSlice(region);
99 m_data=DataAbstract_ptr(tmp);
100 m_protected=false;
101 }
102
103 Data::Data(const Data& inData,
104 const FunctionSpace& functionspace)
105 {
106 if (inData.isEmpty())
107 {
108 throw DataException("Error - will not interpolate for instances of DataEmpty.");
109 }
110 if (inData.isLazy())
111 {
112 throw DataException("Error - will not interpolate for instances of DataLazy - yet.");
113 }
114 if (inData.getFunctionSpace()==functionspace) {
115 m_data=inData.m_data;
116 }
117 else
118 { // this cast is safe for ref count because the class holds a reference
119 const DataReady* dr=dynamic_cast<const DataReady*>(inData.m_data.get());
120 if (inData.isConstant()) { // for a constant function, we just need to use the new function space
121 if (!inData.probeInterpolation(functionspace))
122 { // Even though this is constant, we still need to check whether interpolation is allowed
123 throw FunctionSpaceException("Call to probeInterpolation returned false for DataConstant.");
124 }
125 DataConstant* dc=new DataConstant(functionspace,inData.m_data->getShape(),dr->getVector());
126 m_data=DataAbstract_ptr(dc);
127 } else {
128 Data tmp(0,inData.getDataPointShape(),functionspace,true);
129 // Note: Must use a reference or pointer to a derived object
130 // in order to get polymorphic behaviour. Shouldn't really
131 // be able to create an instance of AbstractDomain but that was done
132 // as a boost:python work around which may no longer be required.
133 /*const AbstractDomain& inDataDomain=inData.getDomain();*/
134 const_Domain_ptr inDataDomain=inData.getDomain();
135 if (inDataDomain==functionspace.getDomain()) {
136 inDataDomain->interpolateOnDomain(tmp,inData);
137 } else {
138 inDataDomain->interpolateACross(tmp,inData);
139 }
140 m_data=tmp.m_data;
141 }
142 }
143 m_protected=false;
144 }
145
146 Data::Data(DataAbstract* underlyingdata)
147 {
148 // m_data=shared_ptr<DataAbstract>(underlyingdata);
149 m_data=underlyingdata->getPtr();
150 m_protected=false;
151 }
152
153 Data::Data(DataAbstract_ptr underlyingdata)
154 {
155 m_data=underlyingdata;
156 m_protected=false;
157 }
158
159
160 Data::Data(const numeric::array& value,
161 const FunctionSpace& what,
162 bool expanded)
163 {
164 initialise(value,what,expanded);
165 m_protected=false;
166 }
167 /*
168 Data::Data(const DataArrayView& value,
169 const FunctionSpace& what,
170 bool expanded)
171 {
172 initialise(value,what,expanded);
173 m_protected=false;
174 }*/
175
176 Data::Data(const DataTypes::ValueType& value,
177 const DataTypes::ShapeType& shape,
178 const FunctionSpace& what,
179 bool expanded)
180 {
181 initialise(value,shape,what,expanded);
182 m_protected=false;
183 }
184
185
186 Data::Data(const object& value,
187 const FunctionSpace& what,
188 bool expanded)
189 {
190 numeric::array asNumArray(value);
191 initialise(asNumArray,what,expanded);
192 m_protected=false;
193 }
194
195
196 Data::Data(const object& value,
197 const Data& other)
198 {
199 numeric::array asNumArray(value);
200
201 // extract the shape of the numarray
202 DataTypes::ShapeType tempShape=DataTypes::shapeFromNumArray(asNumArray);
203 // /* for (int i=0; i < asNumArray.getrank(); i++) {
204 // tempShape.push_back(extract<int>(asNumArray.getshape()[i]));
205 // }*/
206 // // get the space for the data vector
207 // int len = DataTypes::noValues(tempShape);
208 // DataVector temp_data(len, 0.0, len);
209 // /* DataArrayView temp_dataView(temp_data, tempShape);
210 // temp_dataView.copy(asNumArray);*/
211 // temp_data.copyFromNumArray(asNumArray);
212
213 //
214 // Create DataConstant using the given value and all other parameters
215 // copied from other. If value is a rank 0 object this Data
216 // will assume the point data shape of other.
217
218 if (DataTypes::getRank(tempShape)/*temp_dataView.getRank()*/==0) {
219
220
221 // get the space for the data vector
222 int len1 = DataTypes::noValues(tempShape);
223 DataVector temp_data(len1, 0.0, len1);
224 temp_data.copyFromNumArray(asNumArray);
225
226 int len = DataTypes::noValues(other.getDataPointShape());
227
228 DataVector temp2_data(len, temp_data[0]/*temp_dataView()*/, len);
229 //DataArrayView temp2_dataView(temp2_data, other.getPointDataView().getShape());
230 // initialise(temp2_dataView, other.getFunctionSpace(), false);
231
232 DataConstant* t=new DataConstant(other.getFunctionSpace(),other.getDataPointShape(),temp2_data);
233 // boost::shared_ptr<DataAbstract> sp(t);
234 // m_data=sp;
235 m_data=DataAbstract_ptr(t);
236
237 } else {
238 //
239 // Create a DataConstant with the same sample shape as other
240 // initialise(temp_dataView, other.getFunctionSpace(), false);
241 DataConstant* t=new DataConstant(asNumArray,other.getFunctionSpace());
242 // boost::shared_ptr<DataAbstract> sp(t);
243 // m_data=sp;
244 m_data=DataAbstract_ptr(t);
245 }
246 m_protected=false;
247 }
248
249 Data::~Data()
250 {
251
252 }
253
254
255
256 void
257 Data::initialise(const boost::python::numeric::array& value,
258 const FunctionSpace& what,
259 bool expanded)
260 {
261 //
262 // Construct a Data object of the appropriate type.
263 // Construct the object first as there seems to be a bug which causes
264 // undefined behaviour if an exception is thrown during construction
265 // within the shared_ptr constructor.
266 if (expanded) {
267 DataAbstract* temp=new DataExpanded(value, what);
268 // boost::shared_ptr<DataAbstract> temp_data(temp);
269 // m_data=temp_data;
270 m_data=temp->getPtr();
271 } else {
272 DataAbstract* temp=new DataConstant(value, what);
273 // boost::shared_ptr<DataAbstract> temp_data(temp);
274 // m_data=temp_data;
275 m_data=temp->getPtr();
276 }
277 }
278
279
280 void
281 Data::initialise(const DataTypes::ValueType& value,
282 const DataTypes::ShapeType& shape,
283 const FunctionSpace& what,
284 bool expanded)
285 {
286 //
287 // Construct a Data object of the appropriate type.
288 // Construct the object first as there seems to be a bug which causes
289 // undefined behaviour if an exception is thrown during construction
290 // within the shared_ptr constructor.
291 if (expanded) {
292 DataAbstract* temp=new DataExpanded(what, shape, value);
293 // boost::shared_ptr<DataAbstract> temp_data(temp);
294 // m_data=temp_data;
295 m_data=temp->getPtr();
296 } else {
297 DataAbstract* temp=new DataConstant(what, shape, value);
298 // boost::shared_ptr<DataAbstract> temp_data(temp);
299 // m_data=temp_data;
300 m_data=temp->getPtr();
301 }
302 }
303
304
305 // void
306 // Data::CompareDebug(const Data& rd)
307 // {
308 // using namespace std;
309 // bool mismatch=false;
310 // std::cout << "Comparing left and right" << endl;
311 // const DataTagged* left=dynamic_cast<DataTagged*>(m_data.get());
312 // const DataTagged* right=dynamic_cast<DataTagged*>(rd.m_data.get());
313 //
314 // if (left==0)
315 // {
316 // cout << "left arg is not a DataTagged\n";
317 // return;
318 // }
319 //
320 // if (right==0)
321 // {
322 // cout << "right arg is not a DataTagged\n";
323 // return;
324 // }
325 // cout << "Num elements=" << left->getVector().size() << ":" << right->getVector().size() << std::endl;
326 // cout << "Shapes ";
327 // if (left->getShape()==right->getShape())
328 // {
329 // cout << "ok\n";
330 // }
331 // else
332 // {
333 // cout << "Problem: shapes do not match\n";
334 // mismatch=true;
335 // }
336 // int lim=left->getVector().size();
337 // if (right->getVector().size()) lim=right->getVector().size();
338 // for (int i=0;i<lim;++i)
339 // {
340 // if (left->getVector()[i]!=right->getVector()[i])
341 // {
342 // cout << "[" << i << "] value mismatch " << left->getVector()[i] << ":" << right->getVector()[i] << endl;
343 // mismatch=true;
344 // }
345 // }
346 //
347 // // still need to check the tag map
348 // // also need to watch what is happening to function spaces, are they copied or what?
349 //
350 // const DataTagged::DataMapType& mapleft=left->getTagLookup();
351 // const DataTagged::DataMapType& mapright=right->getTagLookup();
352 //
353 // if (mapleft.size()!=mapright.size())
354 // {
355 // cout << "Maps are different sizes " << mapleft.size() << ":" << mapright.size() << endl;
356 // mismatch=true;
357 // cout << "Left map\n";
358 // DataTagged::DataMapType::const_iterator i,j;
359 // for (i=mapleft.begin();i!=mapleft.end();++i) {
360 // cout << "(" << i->first << "=>" << i->second << ")\n";
361 // }
362 // cout << "Right map\n";
363 // for (i=mapright.begin();i!=mapright.end();++i) {
364 // cout << "(" << i->first << "=>" << i->second << ")\n";
365 // }
366 // cout << "End map\n";
367 //
368 // }
369 //
370 // DataTagged::DataMapType::const_iterator i,j;
371 // for (i=mapleft.begin(),j=mapright.begin();i!=mapleft.end() && j!=mapright.end();++i,++j) {
372 // if ((i->first!=j->first) || (i->second!=j->second))
373 // {
374 // cout << "(" << i->first << "=>" << i->second << ")";
375 // cout << ":(" << j->first << "=>" << j->second << ") ";
376 // mismatch=true;
377 // }
378 // }
379 // if (mismatch)
380 // {
381 // cout << "#Mismatch\n";
382 // }
383 // }
384
385 escriptDataC
386 Data::getDataC()
387 {
388 escriptDataC temp;
389 temp.m_dataPtr=(void*)this;
390 return temp;
391 }
392
393 escriptDataC
394 Data::getDataC() const
395 {
396 escriptDataC temp;
397 temp.m_dataPtr=(void*)this;
398 return temp;
399 }
400
401 const boost::python::tuple
402 Data::getShapeTuple() const
403 {
404 const DataTypes::ShapeType& shape=getDataPointShape();
405 switch(getDataPointRank()) {
406 case 0:
407 return make_tuple();
408 case 1:
409 return make_tuple(long_(shape[0]));
410 case 2:
411 return make_tuple(long_(shape[0]),long_(shape[1]));
412 case 3:
413 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]));
414 case 4:
415 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]),long_(shape[3]));
416 default:
417 throw DataException("Error - illegal Data rank.");
418 }
419 }
420
421
422 // The different name is needed because boost has trouble with overloaded functions.
423 // It can't work out what type the function is based soley on its name.
424 // There are ways to fix this involving creating function pointer variables for each form
425 // but there doesn't seem to be a need given that the methods have the same name from the python point of view
426 Data*
427 Data::copySelf()
428 {
429 DataAbstract* temp=m_data->deepCopy();
430 return new Data(temp);
431 }
432
433 void
434 Data::copy(const Data& other)
435 {
436 DataAbstract* temp=other.m_data->deepCopy();
437 DataAbstract_ptr p=temp->getPtr();
438 m_data=p;
439 }
440
441
442 Data
443 Data::delay()
444 {
445 DataLazy* dl=new DataLazy(m_data);
446 return Data(dl);
447 }
448
449 void
450 Data::setToZero()
451 {
452 if (isEmpty())
453 {
454 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
455 }
456 {
457 DataExpanded* temp=dynamic_cast<DataExpanded*>(m_data.get());
458 if (temp!=0) {
459 temp->setToZero();
460 return;
461 }
462 }
463 {
464 DataTagged* temp=dynamic_cast<DataTagged*>(m_data.get());
465 if (temp!=0) {
466 temp->setToZero();
467 return;
468 }
469 }
470 {
471 DataConstant* temp=dynamic_cast<DataConstant*>(m_data.get());
472 if (temp!=0) {
473 temp->setToZero();
474 return;
475 }
476 }
477 throw DataException("Error - Data can not be set to zero.");
478 }
479
480 void
481 Data::copyWithMask(const Data& other,
482 const Data& mask)
483 {
484 if (other.isEmpty() || mask.isEmpty())
485 {
486 throw DataException("Error - copyWithMask not permitted using instances of DataEmpty.");
487 }
488 Data mask1;
489 Data mask2;
490 mask1 = mask.wherePositive();
491
492 mask2.copy(mask1);
493 mask1 *= other;
494
495 mask2 *= *this;
496 mask2 = *this - mask2;
497 *this = mask1 + mask2;
498 }
499
500 bool
501 Data::isExpanded() const
502 {
503 DataExpanded* temp=dynamic_cast<DataExpanded*>(m_data.get());
504 return (temp!=0);
505 }
506
507 bool
508 Data::isTagged() const
509 {
510 DataTagged* temp=dynamic_cast<DataTagged*>(m_data.get());
511 return (temp!=0);
512 }
513
514 bool
515 Data::isEmpty() const
516 {
517 DataEmpty* temp=dynamic_cast<DataEmpty*>(m_data.get());
518 return (temp!=0);
519 }
520
521 bool
522 Data::isConstant() const
523 {
524 DataConstant* temp=dynamic_cast<DataConstant*>(m_data.get());
525 return (temp!=0);
526 }
527
528 bool
529 Data::isLazy() const
530 {
531 return m_data->isLazy();
532 }
533
534 // at the moment this is synonymous with !isLazy() but that could change
535 bool
536 Data::isReady() const
537 {
538 return (dynamic_cast<DataReady*>(m_data.get())!=0);
539 }
540
541
542 void
543 Data::setProtection()
544 {
545 m_protected=true;
546 }
547
548 bool
549 Data::isProtected() const
550 {
551 return m_protected;
552 }
553
554
555
556 void
557 Data::expand()
558 {
559 if (isConstant()) {
560 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
561 DataAbstract* temp=new DataExpanded(*tempDataConst);
562 // shared_ptr<DataAbstract> temp_data(temp);
563 // m_data=temp_data;
564 m_data=temp->getPtr();
565 } else if (isTagged()) {
566 DataTagged* tempDataTag=dynamic_cast<DataTagged*>(m_data.get());
567 DataAbstract* temp=new DataExpanded(*tempDataTag);
568 // shared_ptr<DataAbstract> temp_data(temp);
569 // m_data=temp_data;
570 m_data=temp->getPtr();
571 } else if (isExpanded()) {
572 //
573 // do nothing
574 } else if (isEmpty()) {
575 throw DataException("Error - Expansion of DataEmpty not possible.");
576 } else if (isLazy()) {
577 DataReady_ptr res=m_data->resolve();
578 m_data=res;
579 } else {
580 throw DataException("Error - Expansion not implemented for this Data type.");
581 }
582 }
583
584 void
585 Data::tag()
586 {
587 if (isConstant()) {
588 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
589 DataAbstract* temp=new DataTagged(*tempDataConst);
590 // shared_ptr<DataAbstract> temp_data(temp);
591 // m_data=temp_data;
592 m_data=temp->getPtr();
593 } else if (isTagged()) {
594 // do nothing
595 } else if (isExpanded()) {
596 throw DataException("Error - Creating tag data from DataExpanded not possible.");
597 } else if (isEmpty()) {
598 throw DataException("Error - Creating tag data from DataEmpty not possible.");
599 } else {
600 throw DataException("Error - Tagging not implemented for this Data type.");
601 }
602 }
603
604 Data
605 Data::oneOver() const
606 {
607 return C_TensorUnaryOperation(*this, bind1st(divides<double>(),1.));
608 }
609
610 Data
611 Data::wherePositive() const
612 {
613 return C_TensorUnaryOperation(*this, bind2nd(greater<double>(),0.0));
614 }
615
616 Data
617 Data::whereNegative() const
618 {
619 return C_TensorUnaryOperation(*this, bind2nd(less<double>(),0.0));
620 }
621
622 Data
623 Data::whereNonNegative() const
624 {
625 return C_TensorUnaryOperation(*this, bind2nd(greater_equal<double>(),0.0));
626 }
627
628 Data
629 Data::whereNonPositive() const
630 {
631 return C_TensorUnaryOperation(*this, bind2nd(less_equal<double>(),0.0));
632 }
633
634 Data
635 Data::whereZero(double tol) const
636 {
637 Data dataAbs=abs();
638 return C_TensorUnaryOperation(dataAbs, bind2nd(less_equal<double>(),tol));
639 }
640
641 Data
642 Data::whereNonZero(double tol) const
643 {
644 Data dataAbs=abs();
645 return C_TensorUnaryOperation(dataAbs, bind2nd(greater<double>(),tol));
646 }
647
648 Data
649 Data::interpolate(const FunctionSpace& functionspace) const
650 {
651 return Data(*this,functionspace);
652 }
653
654 bool
655 Data::probeInterpolation(const FunctionSpace& functionspace) const
656 {
657 if (getFunctionSpace()==functionspace) {
658 return true;
659 } else {
660 const_Domain_ptr domain=getDomain();
661 if (*domain==*functionspace.getDomain()) {
662 return domain->probeInterpolationOnDomain(getFunctionSpace().getTypeCode(),functionspace.getTypeCode());
663 } else {
664 return domain->probeInterpolationACross(getFunctionSpace().getTypeCode(),*(functionspace.getDomain()),functionspace.getTypeCode());
665 }
666 }
667 }
668
669 Data
670 Data::gradOn(const FunctionSpace& functionspace) const
671 {
672 if (isEmpty())
673 {
674 throw DataException("Error - operation not permitted on instances of DataEmpty.");
675 }
676 double blocktimer_start = blocktimer_time();
677 if (functionspace.getDomain()!=getDomain())
678 throw DataException("Error - gradient cannot be calculated on different domains.");
679 DataTypes::ShapeType grad_shape=getDataPointShape();
680 grad_shape.push_back(functionspace.getDim());
681 Data out(0.0,grad_shape,functionspace,true);
682 getDomain()->setToGradient(out,*this);
683 blocktimer_increment("grad()", blocktimer_start);
684 return out;
685 }
686
687 Data
688 Data::grad() const
689 {
690 if (isEmpty())
691 {
692 throw DataException("Error - operation not permitted on instances of DataEmpty.");
693 }
694 return gradOn(escript::function(*getDomain()));
695 }
696
697 int
698 Data::getDataPointSize() const
699 {
700 return m_data->getNoValues();
701 }
702
703 DataTypes::ValueType::size_type
704 Data::getLength() const
705 {
706 return m_data->getLength();
707 }
708
709 const
710 boost::python::numeric::array
711 Data:: getValueOfDataPoint(int dataPointNo)
712 {
713 size_t length=0;
714 int i, j, k, l;
715 //
716 // determine the rank and shape of each data point
717 int dataPointRank = getDataPointRank();
718 const DataTypes::ShapeType& dataPointShape = getDataPointShape();
719
720 //
721 // create the numeric array to be returned
722 boost::python::numeric::array numArray(0.0);
723
724 //
725 // the shape of the returned numeric array will be the same
726 // as that of the data point
727 int arrayRank = dataPointRank;
728 const DataTypes::ShapeType& arrayShape = dataPointShape;
729
730 //
731 // resize the numeric array to the shape just calculated
732 if (arrayRank==0) {
733 numArray.resize(1);
734 }
735 if (arrayRank==1) {
736 numArray.resize(arrayShape[0]);
737 }
738 if (arrayRank==2) {
739 numArray.resize(arrayShape[0],arrayShape[1]);
740 }
741 if (arrayRank==3) {
742 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2]);
743 }
744 if (arrayRank==4) {
745 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2],arrayShape[3]);
746 }
747
748 if (getNumDataPointsPerSample()>0) {
749 int sampleNo = dataPointNo/getNumDataPointsPerSample();
750 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
751 //
752 // Check a valid sample number has been supplied
753 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
754 throw DataException("Error - Data::convertToNumArray: invalid sampleNo.");
755 }
756
757 //
758 // Check a valid data point number has been supplied
759 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
760 throw DataException("Error - Data::convertToNumArray: invalid dataPointNoInSample.");
761 }
762 // TODO: global error handling
763 // create a view of the data if it is stored locally
764 // DataArrayView dataPointView = getDataPoint(sampleNo, dataPointNoInSample);
765 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
766
767
768 switch( dataPointRank ){
769 case 0 :
770 numArray[0] = getDataAtOffset(offset);
771 break;
772 case 1 :
773 for( i=0; i<dataPointShape[0]; i++ )
774 numArray[i]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i));
775 break;
776 case 2 :
777 for( i=0; i<dataPointShape[0]; i++ )
778 for( j=0; j<dataPointShape[1]; j++)
779 numArray[make_tuple(i,j)]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j));
780 break;
781 case 3 :
782 for( i=0; i<dataPointShape[0]; i++ )
783 for( j=0; j<dataPointShape[1]; j++ )
784 for( k=0; k<dataPointShape[2]; k++)
785 numArray[make_tuple(i,j,k)]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k));
786 break;
787 case 4 :
788 for( i=0; i<dataPointShape[0]; i++ )
789 for( j=0; j<dataPointShape[1]; j++ )
790 for( k=0; k<dataPointShape[2]; k++ )
791 for( l=0; l<dataPointShape[3]; l++)
792 numArray[make_tuple(i,j,k,l)]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k,l));
793 break;
794 }
795 }
796 //
797 // return the array
798 return numArray;
799
800 }
801
802 void
803 Data::setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object)
804 {
805 // this will throw if the value cannot be represented
806 boost::python::numeric::array num_array(py_object);
807 setValueOfDataPointToArray(dataPointNo,num_array);
808
809
810 }
811
812 void
813 Data::setValueOfDataPointToArray(int dataPointNo, const boost::python::numeric::array& num_array)
814 {
815 if (isProtected()) {
816 throw DataException("Error - attempt to update protected Data object.");
817 }
818 //
819 // check rank
820 if (num_array.getrank()<getDataPointRank())
821 throw DataException("Rank of numarray does not match Data object rank");
822
823 //
824 // check shape of num_array
825 for (int i=0; i<getDataPointRank(); i++) {
826 if (extract<int>(num_array.getshape()[i])!=getDataPointShape()[i])
827 throw DataException("Shape of numarray does not match Data object rank");
828 }
829 //
830 // make sure data is expanded:
831 if (!isExpanded()) {
832 expand();
833 }
834 if (getNumDataPointsPerSample()>0) {
835 int sampleNo = dataPointNo/getNumDataPointsPerSample();
836 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
837 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,num_array);
838 } else {
839 m_data->copyToDataPoint(-1, 0,num_array);
840 }
841 }
842
843 void
844 Data::setValueOfDataPoint(int dataPointNo, const double value)
845 {
846 if (isProtected()) {
847 throw DataException("Error - attempt to update protected Data object.");
848 }
849 //
850 // make sure data is expanded:
851 if (!isExpanded()) {
852 expand();
853 }
854 if (getNumDataPointsPerSample()>0) {
855 int sampleNo = dataPointNo/getNumDataPointsPerSample();
856 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
857 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,value);
858 } else {
859 m_data->copyToDataPoint(-1, 0,value);
860 }
861 }
862
863 const
864 boost::python::numeric::array
865 Data::getValueOfGlobalDataPoint(int procNo, int dataPointNo)
866 {
867 size_t length=0;
868 int i, j, k, l, pos;
869 //
870 // determine the rank and shape of each data point
871 int dataPointRank = getDataPointRank();
872 const DataTypes::ShapeType& dataPointShape = getDataPointShape();
873
874 //
875 // create the numeric array to be returned
876 boost::python::numeric::array numArray(0.0);
877
878 //
879 // the shape of the returned numeric array will be the same
880 // as that of the data point
881 int arrayRank = dataPointRank;
882 const DataTypes::ShapeType& arrayShape = dataPointShape;
883
884 //
885 // resize the numeric array to the shape just calculated
886 if (arrayRank==0) {
887 numArray.resize(1);
888 }
889 if (arrayRank==1) {
890 numArray.resize(arrayShape[0]);
891 }
892 if (arrayRank==2) {
893 numArray.resize(arrayShape[0],arrayShape[1]);
894 }
895 if (arrayRank==3) {
896 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2]);
897 }
898 if (arrayRank==4) {
899 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2],arrayShape[3]);
900 }
901
902 // added for the MPI communication
903 length=1;
904 for( i=0; i<arrayRank; i++ ) length *= arrayShape[i];
905 double *tmpData = new double[length];
906
907 //
908 // load the values for the data point into the numeric array.
909
910 // updated for the MPI case
911 if( get_MPIRank()==procNo ){
912 if (getNumDataPointsPerSample()>0) {
913 int sampleNo = dataPointNo/getNumDataPointsPerSample();
914 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
915 //
916 // Check a valid sample number has been supplied
917 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
918 throw DataException("Error - Data::convertToNumArray: invalid sampleNo.");
919 }
920
921 //
922 // Check a valid data point number has been supplied
923 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
924 throw DataException("Error - Data::convertToNumArray: invalid dataPointNoInSample.");
925 }
926 // TODO: global error handling
927 // create a view of the data if it is stored locally
928 //DataArrayView dataPointView = getDataPoint(sampleNo, dataPointNoInSample);
929 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
930
931 // pack the data from the view into tmpData for MPI communication
932 pos=0;
933 switch( dataPointRank ){
934 case 0 :
935 tmpData[0] = getDataAtOffset(offset);
936 break;
937 case 1 :
938 for( i=0; i<dataPointShape[0]; i++ )
939 tmpData[i]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i));
940 break;
941 case 2 :
942 for( i=0; i<dataPointShape[0]; i++ )
943 for( j=0; j<dataPointShape[1]; j++, pos++ )
944 tmpData[pos]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j));
945 break;
946 case 3 :
947 for( i=0; i<dataPointShape[0]; i++ )
948 for( j=0; j<dataPointShape[1]; j++ )
949 for( k=0; k<dataPointShape[2]; k++, pos++ )
950 tmpData[pos]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k));
951 break;
952 case 4 :
953 for( i=0; i<dataPointShape[0]; i++ )
954 for( j=0; j<dataPointShape[1]; j++ )
955 for( k=0; k<dataPointShape[2]; k++ )
956 for( l=0; l<dataPointShape[3]; l++, pos++ )
957 tmpData[pos]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k,l));
958 break;
959 }
960 }
961 }
962 #ifdef PASO_MPI
963 // broadcast the data to all other processes
964 MPI_Bcast( tmpData, length, MPI_DOUBLE, procNo, get_MPIComm() );
965 #endif
966
967 // unpack the data
968 switch( dataPointRank ){
969 case 0 :
970 numArray[0]=tmpData[0];
971 break;
972 case 1 :
973 for( i=0; i<dataPointShape[0]; i++ )
974 numArray[i]=tmpData[i];
975 break;
976 case 2 :
977 for( i=0; i<dataPointShape[0]; i++ )
978 for( j=0; j<dataPointShape[1]; j++ )
979 numArray[make_tuple(i,j)]=tmpData[i+j*dataPointShape[0]];
980 break;
981 case 3 :
982 for( i=0; i<dataPointShape[0]; i++ )
983 for( j=0; j<dataPointShape[1]; j++ )
984 for( k=0; k<dataPointShape[2]; k++ )
985 numArray[make_tuple(i,j,k)]=tmpData[i+dataPointShape[0]*(j*+k*dataPointShape[1])];
986 break;
987 case 4 :
988 for( i=0; i<dataPointShape[0]; i++ )
989 for( j=0; j<dataPointShape[1]; j++ )
990 for( k=0; k<dataPointShape[2]; k++ )
991 for( l=0; l<dataPointShape[3]; l++ )
992 numArray[make_tuple(i,j,k,l)]=tmpData[i+dataPointShape[0]*(j*+dataPointShape[1]*(k+l*dataPointShape[2]))];
993 break;
994 }
995
996 delete [] tmpData;
997 //
998 // return the loaded array
999 return numArray;
1000 }
1001
1002
1003
1004 boost::python::numeric::array
1005 Data::integrate() const
1006 {
1007 int index;
1008 int rank = getDataPointRank();
1009 DataTypes::ShapeType shape = getDataPointShape();
1010 int dataPointSize = getDataPointSize();
1011
1012 //
1013 // calculate the integral values
1014 vector<double> integrals(dataPointSize);
1015 vector<double> integrals_local(dataPointSize);
1016 #ifdef PASO_MPI
1017 AbstractContinuousDomain::asAbstractContinuousDomain(getDomain()).setToIntegrals(integrals_local,*this);
1018 // Global sum: use an array instead of a vector because elements of array are guaranteed to be contiguous in memory
1019 double *tmp = new double[dataPointSize];
1020 double *tmp_local = new double[dataPointSize];
1021 for (int i=0; i<dataPointSize; i++) { tmp_local[i] = integrals_local[i]; }
1022 MPI_Allreduce( &tmp_local[0], &tmp[0], dataPointSize, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD );
1023 for (int i=0; i<dataPointSize; i++) { integrals[i] = tmp[i]; }
1024 delete[] tmp;
1025 delete[] tmp_local;
1026 #else
1027 AbstractContinuousDomain::asAbstractContinuousDomain(*getDomain()).setToIntegrals(integrals,*this);
1028 #endif
1029
1030 //
1031 // create the numeric array to be returned
1032 // and load the array with the integral values
1033 boost::python::numeric::array bp_array(1.0);
1034 if (rank==0) {
1035 bp_array.resize(1);
1036 index = 0;
1037 bp_array[0] = integrals[index];
1038 }
1039 if (rank==1) {
1040 bp_array.resize(shape[0]);
1041 for (int i=0; i<shape[0]; i++) {
1042 index = i;
1043 bp_array[i] = integrals[index];
1044 }
1045 }
1046 if (rank==2) {
1047 bp_array.resize(shape[0],shape[1]);
1048 for (int i=0; i<shape[0]; i++) {
1049 for (int j=0; j<shape[1]; j++) {
1050 index = i + shape[0] * j;
1051 bp_array[make_tuple(i,j)] = integrals[index];
1052 }
1053 }
1054 }
1055 if (rank==3) {
1056 bp_array.resize(shape[0],shape[1],shape[2]);
1057 for (int i=0; i<shape[0]; i++) {
1058 for (int j=0; j<shape[1]; j++) {
1059 for (int k=0; k<shape[2]; k++) {
1060 index = i + shape[0] * ( j + shape[1] * k );
1061 bp_array[make_tuple(i,j,k)] = integrals[index];
1062 }
1063 }
1064 }
1065 }
1066 if (rank==4) {
1067 bp_array.resize(shape[0],shape[1],shape[2],shape[3]);
1068 for (int i=0; i<shape[0]; i++) {
1069 for (int j=0; j<shape[1]; j++) {
1070 for (int k=0; k<shape[2]; k++) {
1071 for (int l=0; l<shape[3]; l++) {
1072 index = i + shape[0] * ( j + shape[1] * ( k + shape[2] * l ) );
1073 bp_array[make_tuple(i,j,k,l)] = integrals[index];
1074 }
1075 }
1076 }
1077 }
1078 }
1079
1080 //
1081 // return the loaded array
1082 return bp_array;
1083 }
1084
1085 Data
1086 Data::sin() const
1087 {
1088 if (isLazy())
1089 {
1090 DataLazy* c=new DataLazy(borrowDataPtr(),SIN);
1091 return Data(c);
1092 }
1093 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sin);
1094 }
1095
1096 Data
1097 Data::cos() const
1098 {
1099 if (isLazy())
1100 {
1101 DataLazy* c=new DataLazy(borrowDataPtr(),COS);
1102 return Data(c);
1103 }
1104 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cos);
1105 }
1106
1107 Data
1108 Data::tan() const
1109 {
1110 if (isLazy())
1111 {
1112 DataLazy* c=new DataLazy(borrowDataPtr(),TAN);
1113 return Data(c);
1114 }
1115 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tan);
1116 }
1117
1118 Data
1119 Data::asin() const
1120 {
1121 if (isLazy())
1122 {
1123 DataLazy* c=new DataLazy(borrowDataPtr(),ASIN);
1124 return Data(c);
1125 }
1126 return C_TensorUnaryOperation<double (*)(double)>(*this, ::asin);
1127 }
1128
1129 Data
1130 Data::acos() const
1131 {
1132 if (isLazy())
1133 {
1134 DataLazy* c=new DataLazy(borrowDataPtr(),ACOS);
1135 return Data(c);
1136 }
1137 return C_TensorUnaryOperation<double (*)(double)>(*this, ::acos);
1138 }
1139
1140
1141 Data
1142 Data::atan() const
1143 {
1144 if (isLazy())
1145 {
1146 DataLazy* c=new DataLazy(borrowDataPtr(),ATAN);
1147 return Data(c);
1148 }
1149 return C_TensorUnaryOperation<double (*)(double)>(*this, ::atan);
1150 }
1151
1152 Data
1153 Data::sinh() const
1154 {
1155 if (isLazy())
1156 {
1157 DataLazy* c=new DataLazy(borrowDataPtr(),SINH);
1158 return Data(c);
1159 }
1160 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sinh);
1161 }
1162
1163 Data
1164 Data::cosh() const
1165 {
1166 if (isLazy())
1167 {
1168 DataLazy* c=new DataLazy(borrowDataPtr(),COSH);
1169 return Data(c);
1170 }
1171 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cosh);
1172 }
1173
1174 Data
1175 Data::tanh() const
1176 {
1177 if (isLazy())
1178 {
1179 DataLazy* c=new DataLazy(borrowDataPtr(),TANH);
1180 return Data(c);
1181 }
1182 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tanh);
1183 }
1184
1185
1186 Data
1187 Data::erf() const
1188 {
1189 #ifdef _WIN32
1190 throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");
1191 #else
1192 if (isLazy())
1193 {
1194 DataLazy* c=new DataLazy(borrowDataPtr(),ERF);
1195 return Data(c);
1196 }
1197 return C_TensorUnaryOperation(*this, ::erf);
1198 #endif
1199 }
1200
1201 Data
1202 Data::asinh() const
1203 {
1204 if (isLazy())
1205 {
1206 DataLazy* c=new DataLazy(borrowDataPtr(),ASINH);
1207 return Data(c);
1208 }
1209 #ifdef _WIN32
1210 return C_TensorUnaryOperation(*this, escript::asinh_substitute);
1211 #else
1212 return C_TensorUnaryOperation(*this, ::asinh);
1213 #endif
1214 }
1215
1216 Data
1217 Data::acosh() const
1218 {
1219 if (isLazy())
1220 {
1221 DataLazy* c=new DataLazy(borrowDataPtr(),ACOSH);
1222 return Data(c);
1223 }
1224 #ifdef _WIN32
1225 return C_TensorUnaryOperation(*this, escript::acosh_substitute);
1226 #else
1227 return C_TensorUnaryOperation(*this, ::acosh);
1228 #endif
1229 }
1230
1231 Data
1232 Data::atanh() const
1233 {
1234 if (isLazy())
1235 {
1236 DataLazy* c=new DataLazy(borrowDataPtr(),ATANH);
1237 return Data(c);
1238 }
1239 #ifdef _WIN32
1240 return C_TensorUnaryOperation(*this, escript::atanh_substitute);
1241 #else
1242 return C_TensorUnaryOperation(*this, ::atanh);
1243 #endif
1244 }
1245
1246 Data
1247 Data::log10() const
1248 { if (isLazy())
1249 {
1250 DataLazy* c=new DataLazy(borrowDataPtr(),LOG10);
1251 return Data(c);
1252 }
1253 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log10);
1254 }
1255
1256 Data
1257 Data::log() const
1258 {
1259 if (isLazy())
1260 {
1261 DataLazy* c=new DataLazy(borrowDataPtr(),LOG);
1262 return Data(c);
1263 }
1264 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log);
1265 }
1266
1267 Data
1268 Data::sign() const
1269 {
1270 if (isLazy())
1271 {
1272 DataLazy* c=new DataLazy(borrowDataPtr(),SIGN);
1273 return Data(c);
1274 }
1275 return C_TensorUnaryOperation(*this, escript::fsign);
1276 }
1277
1278 Data
1279 Data::abs() const
1280 {
1281 if (isLazy())
1282 {
1283 DataLazy* c=new DataLazy(borrowDataPtr(),ABS);
1284 return Data(c);
1285 }
1286 return C_TensorUnaryOperation<double (*)(double)>(*this, ::fabs);
1287 }
1288
1289 Data
1290 Data::neg() const
1291 {
1292 if (isLazy())
1293 {
1294 DataLazy* c=new DataLazy(borrowDataPtr(),NEG);
1295 return Data(c);
1296 }
1297 return C_TensorUnaryOperation(*this, negate<double>());
1298 }
1299
1300 Data
1301 Data::pos() const
1302 {
1303 // not doing lazy check here is deliberate.
1304 // since a deep copy of lazy data should be cheap, I'll just let it happen now
1305 Data result;
1306 // perform a deep copy
1307 result.copy(*this);
1308 return result;
1309 }
1310
1311 Data
1312 Data::exp() const
1313 {
1314 return C_TensorUnaryOperation<double (*)(double)>(*this, ::exp);
1315 }
1316
1317 Data
1318 Data::sqrt() const
1319 {
1320 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sqrt);
1321 }
1322
1323 double
1324 Data::Lsup() const
1325 {
1326 double localValue;
1327 //
1328 // set the initial absolute maximum value to zero
1329
1330 AbsMax abs_max_func;
1331 localValue = algorithm(abs_max_func,0);
1332 #ifdef PASO_MPI
1333 double globalValue;
1334 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1335 return globalValue;
1336 #else
1337 return localValue;
1338 #endif
1339 }
1340
1341 double
1342 Data::sup() const
1343 {
1344 double localValue;
1345 //
1346 // set the initial maximum value to min possible double
1347 FMax fmax_func;
1348 localValue = algorithm(fmax_func,numeric_limits<double>::max()*-1);
1349 #ifdef PASO_MPI
1350 double globalValue;
1351 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1352 return globalValue;
1353 #else
1354 return localValue;
1355 #endif
1356 }
1357
1358 double
1359 Data::inf() const
1360 {
1361 double localValue;
1362 //
1363 // set the initial minimum value to max possible double
1364 FMin fmin_func;
1365 localValue = algorithm(fmin_func,numeric_limits<double>::max());
1366 #ifdef PASO_MPI
1367 double globalValue;
1368 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD );
1369 return globalValue;
1370 #else
1371 return localValue;
1372 #endif
1373 }
1374
1375 /* TODO */
1376 /* global reduction */
1377 Data
1378 Data::maxval() const
1379 {
1380 //
1381 // set the initial maximum value to min possible double
1382 FMax fmax_func;
1383 return dp_algorithm(fmax_func,numeric_limits<double>::max()*-1);
1384 }
1385
1386 Data
1387 Data::minval() const
1388 {
1389 //
1390 // set the initial minimum value to max possible double
1391 FMin fmin_func;
1392 return dp_algorithm(fmin_func,numeric_limits<double>::max());
1393 }
1394
1395 Data
1396 Data::swapaxes(const int axis0, const int axis1) const
1397 {
1398 int axis0_tmp,axis1_tmp;
1399 DataTypes::ShapeType s=getDataPointShape();
1400 DataTypes::ShapeType ev_shape;
1401 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1402 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1403 int rank=getDataPointRank();
1404 if (rank<2) {
1405 throw DataException("Error - Data::swapaxes argument must have at least rank 2.");
1406 }
1407 if (axis0<0 || axis0>rank-1) {
1408 throw DataException("Error - Data::swapaxes: axis0 must be between 0 and rank-1=" + rank-1);
1409 }
1410 if (axis1<0 || axis1>rank-1) {
1411 throw DataException("Error - Data::swapaxes: axis1 must be between 0 and rank-1=" + rank-1);
1412 }
1413 if (axis0 == axis1) {
1414 throw DataException("Error - Data::swapaxes: axis indices must be different.");
1415 }
1416 if (axis0 > axis1) {
1417 axis0_tmp=axis1;
1418 axis1_tmp=axis0;
1419 } else {
1420 axis0_tmp=axis0;
1421 axis1_tmp=axis1;
1422 }
1423 for (int i=0; i<rank; i++) {
1424 if (i == axis0_tmp) {
1425 ev_shape.push_back(s[axis1_tmp]);
1426 } else if (i == axis1_tmp) {
1427 ev_shape.push_back(s[axis0_tmp]);
1428 } else {
1429 ev_shape.push_back(s[i]);
1430 }
1431 }
1432 Data ev(0.,ev_shape,getFunctionSpace());
1433 ev.typeMatchRight(*this);
1434 m_data->swapaxes(ev.m_data.get(), axis0_tmp, axis1_tmp);
1435 return ev;
1436
1437 }
1438
1439 Data
1440 Data::symmetric() const
1441 {
1442 // check input
1443 DataTypes::ShapeType s=getDataPointShape();
1444 if (getDataPointRank()==2) {
1445 if(s[0] != s[1])
1446 throw DataException("Error - Data::symmetric can only be calculated for rank 2 object with equal first and second dimension.");
1447 }
1448 else if (getDataPointRank()==4) {
1449 if(!(s[0] == s[2] && s[1] == s[3]))
1450 throw DataException("Error - Data::symmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1451 }
1452 else {
1453 throw DataException("Error - Data::symmetric can only be calculated for rank 2 or 4 object.");
1454 }
1455 Data ev(0.,getDataPointShape(),getFunctionSpace());
1456 ev.typeMatchRight(*this);
1457 m_data->symmetric(ev.m_data.get());
1458 return ev;
1459 }
1460
1461 Data
1462 Data::nonsymmetric() const
1463 {
1464 // check input
1465 DataTypes::ShapeType s=getDataPointShape();
1466 if (getDataPointRank()==2) {
1467 if(s[0] != s[1])
1468 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 object with equal first and second dimension.");
1469 DataTypes::ShapeType ev_shape;
1470 ev_shape.push_back(s[0]);
1471 ev_shape.push_back(s[1]);
1472 Data ev(0.,ev_shape,getFunctionSpace());
1473 ev.typeMatchRight(*this);
1474 m_data->nonsymmetric(ev.m_data.get());
1475 return ev;
1476 }
1477 else if (getDataPointRank()==4) {
1478 if(!(s[0] == s[2] && s[1] == s[3]))
1479 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1480 DataTypes::ShapeType ev_shape;
1481 ev_shape.push_back(s[0]);
1482 ev_shape.push_back(s[1]);
1483 ev_shape.push_back(s[2]);
1484 ev_shape.push_back(s[3]);
1485 Data ev(0.,ev_shape,getFunctionSpace());
1486 ev.typeMatchRight(*this);
1487 m_data->nonsymmetric(ev.m_data.get());
1488 return ev;
1489 }
1490 else {
1491 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 or 4 object.");
1492 }
1493 }
1494
1495 Data
1496 Data::trace(int axis_offset) const
1497 {
1498 DataTypes::ShapeType s=getDataPointShape();
1499 if (getDataPointRank()==2) {
1500 DataTypes::ShapeType ev_shape;
1501 Data ev(0.,ev_shape,getFunctionSpace());
1502 ev.typeMatchRight(*this);
1503 m_data->trace(ev.m_data.get(), axis_offset);
1504 return ev;
1505 }
1506 if (getDataPointRank()==3) {
1507 DataTypes::ShapeType ev_shape;
1508 if (axis_offset==0) {
1509 int s2=s[2];
1510 ev_shape.push_back(s2);
1511 }
1512 else if (axis_offset==1) {
1513 int s0=s[0];
1514 ev_shape.push_back(s0);
1515 }
1516 Data ev(0.,ev_shape,getFunctionSpace());
1517 ev.typeMatchRight(*this);
1518 m_data->trace(ev.m_data.get(), axis_offset);
1519 return ev;
1520 }
1521 if (getDataPointRank()==4) {
1522 DataTypes::ShapeType ev_shape;
1523 if (axis_offset==0) {
1524 ev_shape.push_back(s[2]);
1525 ev_shape.push_back(s[3]);
1526 }
1527 else if (axis_offset==1) {
1528 ev_shape.push_back(s[0]);
1529 ev_shape.push_back(s[3]);
1530 }
1531 else if (axis_offset==2) {
1532 ev_shape.push_back(s[0]);
1533 ev_shape.push_back(s[1]);
1534 }
1535 Data ev(0.,ev_shape,getFunctionSpace());
1536 ev.typeMatchRight(*this);
1537 m_data->trace(ev.m_data.get(), axis_offset);
1538 return ev;
1539 }
1540 else {
1541 throw DataException("Error - Data::trace can only be calculated for rank 2, 3 or 4 object.");
1542 }
1543 }
1544
1545 Data
1546 Data::transpose(int axis_offset) const
1547 {
1548 DataTypes::ShapeType s=getDataPointShape();
1549 DataTypes::ShapeType ev_shape;
1550 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1551 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1552 int rank=getDataPointRank();
1553 if (axis_offset<0 || axis_offset>rank) {
1554 throw DataException("Error - Data::transpose must have 0 <= axis_offset <= rank=" + rank);
1555 }
1556 for (int i=0; i<rank; i++) {
1557 int index = (axis_offset+i)%rank;
1558 ev_shape.push_back(s[index]); // Append to new shape
1559 }
1560 Data ev(0.,ev_shape,getFunctionSpace());
1561 ev.typeMatchRight(*this);
1562 m_data->transpose(ev.m_data.get(), axis_offset);
1563 return ev;
1564 }
1565
1566 Data
1567 Data::eigenvalues() const
1568 {
1569 // check input
1570 DataTypes::ShapeType s=getDataPointShape();
1571 if (getDataPointRank()!=2)
1572 throw DataException("Error - Data::eigenvalues can only be calculated for rank 2 object.");
1573 if(s[0] != s[1])
1574 throw DataException("Error - Data::eigenvalues can only be calculated for object with equal first and second dimension.");
1575 // create return
1576 DataTypes::ShapeType ev_shape(1,s[0]);
1577 Data ev(0.,ev_shape,getFunctionSpace());
1578 ev.typeMatchRight(*this);
1579 m_data->eigenvalues(ev.m_data.get());
1580 return ev;
1581 }
1582
1583 const boost::python::tuple
1584 Data::eigenvalues_and_eigenvectors(const double tol) const
1585 {
1586 DataTypes::ShapeType s=getDataPointShape();
1587 if (getDataPointRank()!=2)
1588 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for rank 2 object.");
1589 if(s[0] != s[1])
1590 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for object with equal first and second dimension.");
1591 // create return
1592 DataTypes::ShapeType ev_shape(1,s[0]);
1593 Data ev(0.,ev_shape,getFunctionSpace());
1594 ev.typeMatchRight(*this);
1595 DataTypes::ShapeType V_shape(2,s[0]);
1596 Data V(0.,V_shape,getFunctionSpace());
1597 V.typeMatchRight(*this);
1598 m_data->eigenvalues_and_eigenvectors(ev.m_data.get(),V.m_data.get(),tol);
1599 return make_tuple(boost::python::object(ev),boost::python::object(V));
1600 }
1601
1602 const boost::python::tuple
1603 Data::minGlobalDataPoint() const
1604 {
1605 // NB: calc_minGlobalDataPoint( had to be split off from minGlobalDataPoint( as boost::make_tuple causes an
1606 // abort (for unknown reasons) if there are openmp directives with it in the
1607 // surrounding function
1608
1609 int DataPointNo;
1610 int ProcNo;
1611 calc_minGlobalDataPoint(ProcNo,DataPointNo);
1612 return make_tuple(ProcNo,DataPointNo);
1613 }
1614
1615 void
1616 Data::calc_minGlobalDataPoint(int& ProcNo,
1617 int& DataPointNo) const
1618 {
1619 int i,j;
1620 int lowi=0,lowj=0;
1621 double min=numeric_limits<double>::max();
1622
1623 Data temp=minval();
1624
1625 int numSamples=temp.getNumSamples();
1626 int numDPPSample=temp.getNumDataPointsPerSample();
1627
1628 double next,local_min;
1629 int local_lowi,local_lowj;
1630
1631 #pragma omp parallel private(next,local_min,local_lowi,local_lowj)
1632 {
1633 local_min=min;
1634 #pragma omp for private(i,j) schedule(static)
1635 for (i=0; i<numSamples; i++) {
1636 for (j=0; j<numDPPSample; j++) {
1637 next=temp.getDataAtOffset(temp.getDataOffset(i,j));
1638 if (next<local_min) {
1639 local_min=next;
1640 local_lowi=i;
1641 local_lowj=j;
1642 }
1643 }
1644 }
1645 #pragma omp critical
1646 if (local_min<min) {
1647 min=local_min;
1648 lowi=local_lowi;
1649 lowj=local_lowj;
1650 }
1651 }
1652
1653 #ifdef PASO_MPI
1654 // determine the processor on which the minimum occurs
1655 next = temp.getDataPoint(lowi,lowj);
1656 int lowProc = 0;
1657 double *globalMins = new double[get_MPISize()+1];
1658 int error = MPI_Gather ( &next, 1, MPI_DOUBLE, globalMins, 1, MPI_DOUBLE, 0, get_MPIComm() );
1659
1660 if( get_MPIRank()==0 ){
1661 next = globalMins[lowProc];
1662 for( i=1; i<get_MPISize(); i++ )
1663 if( next>globalMins[i] ){
1664 lowProc = i;
1665 next = globalMins[i];
1666 }
1667 }
1668 MPI_Bcast( &lowProc, 1, MPI_DOUBLE, 0, get_MPIComm() );
1669
1670 delete [] globalMins;
1671 ProcNo = lowProc;
1672 #else
1673 ProcNo = 0;
1674 #endif
1675 DataPointNo = lowj + lowi * numDPPSample;
1676 }
1677
1678 void
1679 Data::saveDX(std::string fileName) const
1680 {
1681 if (isEmpty())
1682 {
1683 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1684 }
1685 boost::python::dict args;
1686 args["data"]=boost::python::object(this);
1687 getDomain()->saveDX(fileName,args);
1688 return;
1689 }
1690
1691 void
1692 Data::saveVTK(std::string fileName) const
1693 {
1694 if (isEmpty())
1695 {
1696 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1697 }
1698 boost::python::dict args;
1699 args["data"]=boost::python::object(this);
1700 getDomain()->saveVTK(fileName,args);
1701 return;
1702 }
1703
1704 Data&
1705 Data::operator+=(const Data& right)
1706 {
1707 if (isProtected()) {
1708 throw DataException("Error - attempt to update protected Data object.");
1709 }
1710 binaryOp(right,plus<double>());
1711 return (*this);
1712 }
1713
1714 Data&
1715 Data::operator+=(const boost::python::object& right)
1716 {
1717 Data tmp(right,getFunctionSpace(),false);
1718 binaryOp(tmp,plus<double>());
1719 return (*this);
1720 }
1721 Data&
1722 Data::operator=(const Data& other)
1723 {
1724 copy(other);
1725 return (*this);
1726 }
1727
1728 Data&
1729 Data::operator-=(const Data& right)
1730 {
1731 if (isProtected()) {
1732 throw DataException("Error - attempt to update protected Data object.");
1733 }
1734 binaryOp(right,minus<double>());
1735 return (*this);
1736 }
1737
1738 Data&
1739 Data::operator-=(const boost::python::object& right)
1740 {
1741 Data tmp(right,getFunctionSpace(),false);
1742 binaryOp(tmp,minus<double>());
1743 return (*this);
1744 }
1745
1746 Data&
1747 Data::operator*=(const Data& right)
1748 {
1749 if (isProtected()) {
1750 throw DataException("Error - attempt to update protected Data object.");
1751 }
1752 binaryOp(right,multiplies<double>());
1753 return (*this);
1754 }
1755
1756 Data&
1757 Data::operator*=(const boost::python::object& right)
1758 {
1759 Data tmp(right,getFunctionSpace(),false);
1760 binaryOp(tmp,multiplies<double>());
1761 return (*this);
1762 }
1763
1764 Data&
1765 Data::operator/=(const Data& right)
1766 {
1767 if (isProtected()) {
1768 throw DataException("Error - attempt to update protected Data object.");
1769 }
1770 binaryOp(right,divides<double>());
1771 return (*this);
1772 }
1773
1774 Data&
1775 Data::operator/=(const boost::python::object& right)
1776 {
1777 Data tmp(right,getFunctionSpace(),false);
1778 binaryOp(tmp,divides<double>());
1779 return (*this);
1780 }
1781
1782 Data
1783 Data::rpowO(const boost::python::object& left) const
1784 {
1785 Data left_d(left,*this);
1786 return left_d.powD(*this);
1787 }
1788
1789 Data
1790 Data::powO(const boost::python::object& right) const
1791 {
1792 Data tmp(right,getFunctionSpace(),false);
1793 return powD(tmp);
1794 }
1795
1796 Data
1797 Data::powD(const Data& right) const
1798 {
1799 return C_TensorBinaryOperation<double (*)(double, double)>(*this, right, ::pow);
1800 }
1801
1802 //
1803 // NOTE: It is essential to specify the namespace this operator belongs to
1804 Data
1805 escript::operator+(const Data& left, const Data& right)
1806 {
1807 if (left.isLazy() || right.isLazy())
1808 {
1809 DataLazy* c=new DataLazy(left.borrowDataPtr(),right.borrowDataPtr(),ADD);
1810 return Data(c);
1811 }
1812 return C_TensorBinaryOperation(left, right, plus<double>());
1813 }
1814
1815 //
1816 // NOTE: It is essential to specify the namespace this operator belongs to
1817 Data
1818 escript::operator-(const Data& left, const Data& right)
1819 {
1820 if (left.isLazy() || right.isLazy())
1821 {
1822 DataLazy* c=new DataLazy(left.borrowDataPtr(),right.borrowDataPtr(),SUB);
1823 return Data(c);
1824 }
1825 return C_TensorBinaryOperation(left, right, minus<double>());
1826 }
1827
1828 //
1829 // NOTE: It is essential to specify the namespace this operator belongs to
1830 Data
1831 escript::operator*(const Data& left, const Data& right)
1832 {
1833 if (left.isLazy() || right.isLazy())
1834 {
1835 DataLazy* c=new DataLazy(left.borrowDataPtr(),right.borrowDataPtr(),MUL);
1836 return Data(c);
1837 }
1838 return C_TensorBinaryOperation(left, right, multiplies<double>());
1839 }
1840
1841 //
1842 // NOTE: It is essential to specify the namespace this operator belongs to
1843 Data
1844 escript::operator/(const Data& left, const Data& right)
1845 {
1846 if (left.isLazy() || right.isLazy())
1847 {
1848 DataLazy* c=new DataLazy(left.borrowDataPtr(),right.borrowDataPtr(),DIV);
1849 return Data(c);
1850 }
1851 return C_TensorBinaryOperation(left, right, divides<double>());
1852 }
1853
1854 //
1855 // NOTE: It is essential to specify the namespace this operator belongs to
1856 Data
1857 escript::operator+(const Data& left, const boost::python::object& right)
1858 {
1859 return left+Data(right,left.getFunctionSpace(),false);
1860 }
1861
1862 //
1863 // NOTE: It is essential to specify the namespace this operator belongs to
1864 Data
1865 escript::operator-(const Data& left, const boost::python::object& right)
1866 {
1867 return left-Data(right,left.getFunctionSpace(),false);
1868 }
1869
1870 //
1871 // NOTE: It is essential to specify the namespace this operator belongs to
1872 Data
1873 escript::operator*(const Data& left, const boost::python::object& right)
1874 {
1875 return left*Data(right,left.getFunctionSpace(),false);
1876 }
1877
1878 //
1879 // NOTE: It is essential to specify the namespace this operator belongs to
1880 Data
1881 escript::operator/(const Data& left, const boost::python::object& right)
1882 {
1883 return left/Data(right,left.getFunctionSpace(),false);
1884 }
1885
1886 //
1887 // NOTE: It is essential to specify the namespace this operator belongs to
1888 Data
1889 escript::operator+(const boost::python::object& left, const Data& right)
1890 {
1891 return Data(left,right.getFunctionSpace(),false)+right;
1892 }
1893
1894 //
1895 // NOTE: It is essential to specify the namespace this operator belongs to
1896 Data
1897 escript::operator-(const boost::python::object& left, const Data& right)
1898 {
1899 return Data(left,right.getFunctionSpace(),false)-right;
1900 }
1901
1902 //
1903 // NOTE: It is essential to specify the namespace this operator belongs to
1904 Data
1905 escript::operator*(const boost::python::object& left, const Data& right)
1906 {
1907 return Data(left,right.getFunctionSpace(),false)*right;
1908 }
1909
1910 //
1911 // NOTE: It is essential to specify the namespace this operator belongs to
1912 Data
1913 escript::operator/(const boost::python::object& left, const Data& right)
1914 {
1915 return Data(left,right.getFunctionSpace(),false)/right;
1916 }
1917
1918
1919 /* TODO */
1920 /* global reduction */
1921 Data
1922 Data::getItem(const boost::python::object& key) const
1923 {
1924 // const DataArrayView& view=getPointDataView();
1925
1926 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
1927
1928 if (slice_region.size()!=getDataPointRank()) {
1929 throw DataException("Error - slice size does not match Data rank.");
1930 }
1931
1932 return getSlice(slice_region);
1933 }
1934
1935 /* TODO */
1936 /* global reduction */
1937 Data
1938 Data::getSlice(const DataTypes::RegionType& region) const
1939 {
1940 return Data(*this,region);
1941 }
1942
1943 /* TODO */
1944 /* global reduction */
1945 void
1946 Data::setItemO(const boost::python::object& key,
1947 const boost::python::object& value)
1948 {
1949 Data tempData(value,getFunctionSpace());
1950 setItemD(key,tempData);
1951 }
1952
1953 void
1954 Data::setItemD(const boost::python::object& key,
1955 const Data& value)
1956 {
1957 // const DataArrayView& view=getPointDataView();
1958
1959 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
1960 if (slice_region.size()!=getDataPointRank()) {
1961 throw DataException("Error - slice size does not match Data rank.");
1962 }
1963 if (getFunctionSpace()!=value.getFunctionSpace()) {
1964 setSlice(Data(value,getFunctionSpace()),slice_region);
1965 } else {
1966 setSlice(value,slice_region);
1967 }
1968 }
1969
1970 void
1971 Data::setSlice(const Data& value,
1972 const DataTypes::RegionType& region)
1973 {
1974 if (isProtected()) {
1975 throw DataException("Error - attempt to update protected Data object.");
1976 }
1977 if (isLazy())
1978 {
1979 throw DataException("Error - setSlice not permitted on lazy data.");
1980 }
1981 Data tempValue(value);
1982 typeMatchLeft(tempValue);
1983 typeMatchRight(tempValue);
1984 getReady()->setSlice(tempValue.m_data.get(),region);
1985 }
1986
1987 void
1988 Data::typeMatchLeft(Data& right) const
1989 {
1990 if (isExpanded()){
1991 right.expand();
1992 } else if (isTagged()) {
1993 if (right.isConstant()) {
1994 right.tag();
1995 }
1996 }
1997 }
1998
1999 void
2000 Data::typeMatchRight(const Data& right)
2001 {
2002 if (isTagged()) {
2003 if (right.isExpanded()) {
2004 expand();
2005 }
2006 } else if (isConstant()) {
2007 if (right.isExpanded()) {
2008 expand();
2009 } else if (right.isTagged()) {
2010 tag();
2011 }
2012 }
2013 }
2014
2015 void
2016 Data::setTaggedValueByName(std::string name,
2017 const boost::python::object& value)
2018 {
2019 if (getFunctionSpace().getDomain()->isValidTagName(name)) {
2020 int tagKey=getFunctionSpace().getDomain()->getTag(name);
2021 setTaggedValue(tagKey,value);
2022 }
2023 }
2024 void
2025 Data::setTaggedValue(int tagKey,
2026 const boost::python::object& value)
2027 {
2028 if (isProtected()) {
2029 throw DataException("Error - attempt to update protected Data object.");
2030 }
2031 //
2032 // Ensure underlying data object is of type DataTagged
2033 if (isConstant()) tag();
2034
2035 numeric::array asNumArray(value);
2036
2037
2038 // extract the shape of the numarray
2039 DataTypes::ShapeType tempShape;
2040 for (int i=0; i < asNumArray.getrank(); i++) {
2041 tempShape.push_back(extract<int>(asNumArray.getshape()[i]));
2042 }
2043
2044 // get the space for the data vector
2045 // int len = DataTypes::noValues(tempShape);
2046 // DataVector temp_data(len, 0.0, len);
2047 // DataArrayView temp_dataView(temp_data, tempShape);
2048 // temp_dataView.copy(asNumArray);
2049
2050 DataVector temp_data2;
2051 temp_data2.copyFromNumArray(asNumArray);
2052
2053 //
2054 // Call DataAbstract::setTaggedValue
2055 //m_data->setTaggedValue(tagKey,temp_dataView);
2056
2057 m_data->setTaggedValue(tagKey,tempShape, temp_data2);
2058 }
2059
2060
2061 void
2062 Data::setTaggedValueFromCPP(int tagKey,
2063 const DataTypes::ShapeType& pointshape,
2064 const DataTypes::ValueType& value,
2065 int dataOffset)
2066 {
2067 if (isProtected()) {
2068 throw DataException("Error - attempt to update protected Data object.");
2069 }
2070 //
2071 // Ensure underlying data object is of type DataTagged
2072 if (isConstant()) tag();
2073
2074 //
2075 // Call DataAbstract::setTaggedValue
2076 m_data->setTaggedValue(tagKey,pointshape, value, dataOffset);
2077 }
2078
2079 int
2080 Data::getTagNumber(int dpno)
2081 {
2082 if (isEmpty())
2083 {
2084 throw DataException("Error - operation not permitted on instances of DataEmpty.");
2085 }
2086 return getFunctionSpace().getTagFromDataPointNo(dpno);
2087 }
2088
2089
2090 ostream& escript::operator<<(ostream& o, const Data& data)
2091 {
2092 o << data.toString();
2093 return o;
2094 }
2095
2096 Data
2097 escript::C_GeneralTensorProduct(Data& arg_0,
2098 Data& arg_1,
2099 int axis_offset,
2100 int transpose)
2101 {
2102 // General tensor product: res(SL x SR) = arg_0(SL x SM) * arg_1(SM x SR)
2103 // SM is the product of the last axis_offset entries in arg_0.getShape().
2104
2105 // Interpolate if necessary and find an appropriate function space
2106 Data arg_0_Z, arg_1_Z;
2107 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2108 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2109 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2110 arg_1_Z = Data(arg_1);
2111 }
2112 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2113 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2114 arg_0_Z =Data(arg_0);
2115 }
2116 else {
2117 throw DataException("Error - C_GeneralTensorProduct: arguments have incompatible function spaces.");
2118 }
2119 } else {
2120 arg_0_Z = Data(arg_0);
2121 arg_1_Z = Data(arg_1);
2122 }
2123 // Get rank and shape of inputs
2124 int rank0 = arg_0_Z.getDataPointRank();
2125 int rank1 = arg_1_Z.getDataPointRank();
2126 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
2127 const DataTypes::ShapeType& shape1 = arg_1_Z.getDataPointShape();
2128
2129 // Prepare for the loops of the product and verify compatibility of shapes
2130 int start0=0, start1=0;
2131 if (transpose == 0) {}
2132 else if (transpose == 1) { start0 = axis_offset; }
2133 else if (transpose == 2) { start1 = rank1-axis_offset; }
2134 else { throw DataException("C_GeneralTensorProduct: Error - transpose should be 0, 1 or 2"); }
2135
2136
2137 // Adjust the shapes for transpose
2138 DataTypes::ShapeType tmpShape0(rank0); // pre-sizing the vectors rather
2139 DataTypes::ShapeType tmpShape1(rank1); // than using push_back
2140 for (int i=0; i<rank0; i++) { tmpShape0[i]=shape0[(i+start0)%rank0]; }
2141 for (int i=0; i<rank1; i++) { tmpShape1[i]=shape1[(i+start1)%rank1]; }
2142
2143 #if 0
2144 // For debugging: show shape after transpose
2145 char tmp[100];
2146 std::string shapeStr;
2147 shapeStr = "(";
2148 for (int i=0; i<rank0; i++) { sprintf(tmp, "%d,", tmpShape0[i]); shapeStr += tmp; }
2149 shapeStr += ")";
2150 cout << "C_GeneralTensorProduct: Shape of arg0 is " << shapeStr << endl;
2151 shapeStr = "(";
2152 for (int i=0; i<rank1; i++) { sprintf(tmp, "%d,", tmpShape1[i]); shapeStr += tmp; }
2153 shapeStr += ")";
2154 cout << "C_GeneralTensorProduct: Shape of arg1 is " << shapeStr << endl;
2155 #endif
2156
2157 // Prepare for the loops of the product
2158 int SL=1, SM=1, SR=1;
2159 for (int i=0; i<rank0-axis_offset; i++) {
2160 SL *= tmpShape0[i];
2161 }
2162 for (int i=rank0-axis_offset; i<rank0; i++) {
2163 if (tmpShape0[i] != tmpShape1[i-(rank0-axis_offset)]) {
2164 throw DataException("C_GeneralTensorProduct: Error - incompatible shapes");
2165 }
2166 SM *= tmpShape0[i];
2167 }
2168 for (int i=axis_offset; i<rank1; i++) {
2169 SR *= tmpShape1[i];
2170 }
2171
2172 // Define the shape of the output (rank of shape is the sum of the loop ranges below)
2173 DataTypes::ShapeType shape2(rank0+rank1-2*axis_offset);
2174 { // block to limit the scope of out_index
2175 int out_index=0;
2176 for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z
2177 for (int i=axis_offset; i<rank1; i++, ++out_index) { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z
2178 }
2179
2180 // Declare output Data object
2181 Data res;
2182
2183 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2184 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataConstant output
2185 double *ptr_0 = &(arg_0_Z.getDataAtOffset(0));
2186 double *ptr_1 = &(arg_1_Z.getDataAtOffset(0));
2187 double *ptr_2 = &(res.getDataAtOffset(0));
2188 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2189 }
2190 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2191
2192 // Prepare the DataConstant input
2193 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2194 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2195
2196 // Borrow DataTagged input from Data object
2197 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2198 if (tmp_1==0) { throw DataException("GTP_1 Programming error - casting to DataTagged."); }
2199
2200 // Prepare a DataTagged output 2
2201 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataTagged output
2202 res.tag();
2203 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2204 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2205
2206 // Prepare offset into DataConstant
2207 int offset_0 = tmp_0->getPointOffset(0,0);
2208 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2209 // Get the views
2210 // DataArrayView view_1 = tmp_1->getDefaultValue();
2211 // DataArrayView view_2 = tmp_2->getDefaultValue();
2212 // // Get the pointers to the actual data
2213 // double *ptr_1 = &((view_1.getData())[0]);
2214 // double *ptr_2 = &((view_2.getData())[0]);
2215
2216 double *ptr_1 = &(tmp_1->getDefaultValue(0));
2217 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2218
2219
2220 // Compute an MVP for the default
2221 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2222 // Compute an MVP for each tag
2223 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2224 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2225 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2226 tmp_2->addTag(i->first);
2227 // DataArrayView view_1 = tmp_1->getDataPointByTag(i->first);
2228 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2229 // double *ptr_1 = &view_1.getData(0);
2230 // double *ptr_2 = &view_2.getData(0);
2231
2232 double *ptr_1 = &(tmp_1->getDataByTag(i->first,0));
2233 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2234
2235 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2236 }
2237
2238 }
2239 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2240
2241 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2242 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2243 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2244 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2245 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2246 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2247 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2248 int sampleNo_1,dataPointNo_1;
2249 int numSamples_1 = arg_1_Z.getNumSamples();
2250 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2251 int offset_0 = tmp_0->getPointOffset(0,0);
2252 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2253 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2254 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2255 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2256 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2257 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2258 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2259 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2260 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2261 }
2262 }
2263
2264 }
2265 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2266
2267 // Borrow DataTagged input from Data object
2268 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2269 if (tmp_0==0) { throw DataException("GTP_0 Programming error - casting to DataTagged."); }
2270
2271 // Prepare the DataConstant input
2272 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2273 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2274
2275 // Prepare a DataTagged output 2
2276 res = Data(0.0, shape2, arg_0_Z.getFunctionSpace()); // DataTagged output
2277 res.tag();
2278 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2279 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2280
2281 // Prepare offset into DataConstant
2282 int offset_1 = tmp_1->getPointOffset(0,0);
2283 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2284 // Get the views
2285 // DataArrayView view_0 = tmp_0->getDefaultValue();
2286 // DataArrayView view_2 = tmp_2->getDefaultValue();
2287 // // Get the pointers to the actual data
2288 // double *ptr_0 = &((view_0.getData())[0]);
2289 // double *ptr_2 = &((view_2.getData())[0]);
2290
2291 double *ptr_0 = &(tmp_0->getDefaultValue(0));
2292 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2293
2294 // Compute an MVP for the default
2295 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2296 // Compute an MVP for each tag
2297 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2298 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2299 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2300 // tmp_2->addTaggedValue(i->first,tmp_2->getDefaultValue());
2301 // DataArrayView view_0 = tmp_0->getDataPointByTag(i->first);
2302 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2303 // double *ptr_0 = &view_0.getData(0);
2304 // double *ptr_2 = &view_2.getData(0);
2305
2306 tmp_2->addTag(i->first);
2307 double *ptr_0 = &(tmp_0->getDataByTag(i->first,0));
2308 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2309 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2310 }
2311
2312 }
2313 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2314
2315 // Borrow DataTagged input from Data object
2316 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2317 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2318
2319 // Borrow DataTagged input from Data object
2320 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2321 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2322
2323 // Prepare a DataTagged output 2
2324 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace());
2325 res.tag(); // DataTagged output
2326 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2327 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2328
2329 // // Get the views
2330 // DataArrayView view_0 = tmp_0->getDefaultValue();
2331 // DataArrayView view_1 = tmp_1->getDefaultValue();
2332 // DataArrayView view_2 = tmp_2->getDefaultValue();
2333 // // Get the pointers to the actual data
2334 // double *ptr_0 = &((view_0.getData())[0]);
2335 // double *ptr_1 = &((view_1.getData())[0]);
2336 // double *ptr_2 = &((view_2.getData())[0]);
2337
2338 double *ptr_0 = &(tmp_0->getDefaultValue(0));
2339 double *ptr_1 = &(tmp_1->getDefaultValue(0));
2340 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2341
2342
2343 // Compute an MVP for the default
2344 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2345 // Merge the tags
2346 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2347 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2348 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2349 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2350 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2351 }
2352 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2353 tmp_2->addTag(i->first);
2354 }
2355 // Compute an MVP for each tag
2356 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2357 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2358 // DataArrayView view_0 = tmp_0->getDataPointByTag(i->first);
2359 // DataArrayView view_1 = tmp_1->getDataPointByTag(i->first);
2360 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2361 // double *ptr_0 = &view_0.getData(0);
2362 // double *ptr_1 = &view_1.getData(0);
2363 // double *ptr_2 = &view_2.getData(0);
2364
2365 double *ptr_0 = &(tmp_0->getDataByTag(i->first,0));
2366 double *ptr_1 = &(tmp_1->getDataByTag(i->first,0));
2367 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2368
2369 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2370 }
2371
2372 }
2373 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2374
2375 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2376 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2377 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2378 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2379 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2380 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2381 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2382 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2383 int sampleNo_0,dataPointNo_0;
2384 int numSamples_0 = arg_0_Z.getNumSamples();
2385 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2386 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2387 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2388 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2389 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2390 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2391 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2392 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2393 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2394 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2395 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2396 }
2397 }
2398
2399 }
2400 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2401
2402 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2403 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2404 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2405 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2406 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2407 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2408 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2409 int sampleNo_0,dataPointNo_0;
2410 int numSamples_0 = arg_0_Z.getNumSamples();
2411 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2412 int offset_1 = tmp_1->getPointOffset(0,0);
2413 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2414 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2415 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2416 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2417 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2418 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2419 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2420 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2421 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2422 }
2423 }
2424
2425
2426 }
2427 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2428
2429 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2430 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2431 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2432 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2433 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2434 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2435 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2436 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2437 int sampleNo_0,dataPointNo_0;
2438 int numSamples_0 = arg_0_Z.getNumSamples();
2439 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2440 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2441 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2442 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2443 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2444 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2445 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2446 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2447 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2448 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2449 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2450 }
2451 }
2452
2453 }
2454 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2455
2456 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2457 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2458 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2459 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2460 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2461 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2462 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2463 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2464 int sampleNo_0,dataPointNo_0;
2465 int numSamples_0 = arg_0_Z.getNumSamples();
2466 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2467 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2468 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2469 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2470 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2471 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2472 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2473 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2474 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2475 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2476 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2477 }
2478 }
2479
2480 }
2481 else {
2482 throw DataException("Error - C_GeneralTensorProduct: unknown combination of inputs");
2483 }
2484
2485 return res;
2486 }
2487
2488 DataAbstract*
2489 Data::borrowData() const
2490 {
2491 return m_data.get();
2492 }
2493
2494 // Not all that happy about returning a non-const from a const
2495 DataAbstract_ptr
2496 Data::borrowDataPtr() const
2497 {
2498 return m_data;
2499 }
2500
2501 // Not all that happy about returning a non-const from a const
2502 DataReady_ptr
2503 Data::borrowReadyPtr() const
2504 {
2505 DataReady_ptr dr=dynamic_pointer_cast<DataReady>(m_data);
2506 EsysAssert((dr!=0), "Error - casting to DataReady.");
2507 return dr;
2508 }
2509
2510 std::string
2511 Data::toString() const
2512 {
2513 static const DataTypes::ValueType::size_type TOO_MANY_POINTS=80;
2514 if (isLazy())
2515 { // This needs to change back to printing out something useful once the summary ops
2516 return m_data->toString(); // are defined
2517 }
2518 if (getNumDataPoints()*getDataPointSize()>TOO_MANY_POINTS)
2519 {
2520 stringstream temp;
2521 temp << "Summary: inf="<< inf() << " sup=" << sup() << " data points=" << getNumDataPoints();
2522 return temp.str();
2523 }
2524 return m_data->toString();
2525 }
2526
2527
2528
2529 DataTypes::ValueType::const_reference
2530 Data::getDataAtOffset(DataTypes::ValueType::size_type i) const
2531 {
2532 if (isLazy())
2533 {
2534 throw DataException("getDataAtOffset not permitted on lazy data.");
2535 }
2536 return getReady()->getDataAtOffset(i);
2537 }
2538
2539
2540 DataTypes::ValueType::reference
2541 Data::getDataAtOffset(DataTypes::ValueType::size_type i)
2542 {
2543 if (isLazy())
2544 {
2545 throw DataException("getDataAtOffset not permitted on lazy data.");
2546 }
2547 return getReady()->getDataAtOffset(i);
2548 }
2549
2550 DataTypes::ValueType::const_reference
2551 Data::getDataPoint(int sampleNo, int dataPointNo) const
2552 {
2553 if (!isReady())
2554 {
2555 throw DataException("Programmer error - getDataPoint() not permitted on Lazy Data.");
2556 }
2557 else
2558 {
2559 const DataReady* dr=getReady();
2560 return dr->getDataAtOffset(dr->getPointOffset(sampleNo, dataPointNo));
2561 }
2562 }
2563
2564
2565 DataTypes::ValueType::reference
2566 Data::getDataPoint(int sampleNo, int dataPointNo)
2567 {
2568 if (!isReady())
2569 {
2570 throw DataException("Programmer error - getDataPoint() not permitted on Lazy Data.");
2571 }
2572 else
2573 {
2574 DataReady* dr=getReady();
2575 return dr->getDataAtOffset(dr->getPointOffset(sampleNo, dataPointNo));
2576 }
2577 }
2578
2579
2580 /* Member functions specific to the MPI implementation */
2581
2582 void
2583 Data::print()
2584 {
2585 int i,j;
2586
2587 printf( "Data is %dX%d\n", getNumSamples(), getNumDataPointsPerSample() );
2588 for( i=0; i<getNumSamples(); i++ )
2589 {
2590 printf( "[%6d]", i );
2591 for( j=0; j<getNumDataPointsPerSample(); j++ )
2592 printf( "\t%10.7g", (getSampleData(i))[j] );
2593 printf( "\n" );
2594 }
2595 }
2596 void
2597 Data::dump(const std::string fileName) const
2598 {
2599 try
2600 {
2601 return m_data->dump(fileName);
2602 }
2603 catch (exception& e)
2604 {
2605 cout << e.what() << endl;
2606 }
2607 }
2608
2609 int
2610 Data::get_MPISize() const
2611 {
2612 int size;
2613 #ifdef PASO_MPI
2614 int error;
2615 error = MPI_Comm_size( get_MPIComm(), &size );
2616 #else
2617 size = 1;
2618 #endif
2619 return size;
2620 }
2621
2622 int
2623 Data::get_MPIRank() const
2624 {
2625 int rank;
2626 #ifdef PASO_MPI
2627 int error;
2628 error = MPI_Comm_rank( get_MPIComm(), &rank );
2629 #else
2630 rank = 0;
2631 #endif
2632 return rank;
2633 }
2634
2635 MPI_Comm
2636 Data::get_MPIComm() const
2637 {
2638 #ifdef PASO_MPI
2639 return MPI_COMM_WORLD;
2640 #else
2641 return -1;
2642 #endif
2643 }
2644
2645

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26