/[escript]/trunk/escript/src/Data.cpp
ViewVC logotype

Contents of /trunk/escript/src/Data.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2048 - (show annotations)
Mon Nov 17 08:46:00 2008 UTC (11 years, 2 months ago) by phornby
File size: 86143 byte(s)
Experimental commit to move the code to a windows box
on the other side of a firewall. Purpose: add support for the
imploved intelc math library on windows.


1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2008 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 #include "Data.h"
16
17 #include "DataExpanded.h"
18 #include "DataConstant.h"
19 #include "DataTagged.h"
20 #include "DataEmpty.h"
21 #include "DataLazy.h"
22 #include "FunctionSpaceFactory.h"
23 #include "AbstractContinuousDomain.h"
24 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
25 #include "UnaryFuncs.h"
26 #endif
27 #include "FunctionSpaceException.h"
28 #include "EscriptParams.h"
29
30 extern "C" {
31 #include "escript/blocktimer.h"
32 }
33
34 #include <fstream>
35 #include <algorithm>
36 #include <vector>
37 #include <functional>
38
39 #include <boost/python/dict.hpp>
40 #include <boost/python/extract.hpp>
41 #include <boost/python/long.hpp>
42
43 using namespace std;
44 using namespace boost::python;
45 using namespace boost;
46 using namespace escript;
47
48 // ensure the current object is not a DataLazy
49 // The idea was that we could add an optional warning whenever a resolve is forced
50 #define FORCERESOLVE if (isLazy()) {resolve();}
51
52 Data::Data()
53 {
54 //
55 // Default data is type DataEmpty
56 DataAbstract* temp=new DataEmpty();
57 m_data=temp->getPtr();
58 m_protected=false;
59 }
60
61 Data::Data(double value,
62 const tuple& shape,
63 const FunctionSpace& what,
64 bool expanded)
65 {
66 DataTypes::ShapeType dataPointShape;
67 for (int i = 0; i < shape.attr("__len__")(); ++i) {
68 dataPointShape.push_back(extract<const int>(shape[i]));
69 }
70
71 int len = DataTypes::noValues(dataPointShape);
72 DataVector temp_data(len,value,len);
73 initialise(temp_data, dataPointShape, what, expanded);
74 m_protected=false;
75 }
76
77 Data::Data(double value,
78 const DataTypes::ShapeType& dataPointShape,
79 const FunctionSpace& what,
80 bool expanded)
81 {
82 int len = DataTypes::noValues(dataPointShape);
83
84 DataVector temp_data(len,value,len);
85 // DataArrayView temp_dataView(temp_data, dataPointShape);
86
87 // initialise(temp_dataView, what, expanded);
88 initialise(temp_data, dataPointShape, what, expanded);
89
90 m_protected=false;
91 }
92
93 Data::Data(const Data& inData)
94 {
95 m_data=inData.m_data;
96 m_protected=inData.isProtected();
97 }
98
99
100 Data::Data(const Data& inData,
101 const DataTypes::RegionType& region)
102 {
103 DataAbstract_ptr dat=inData.m_data;
104 if (inData.isLazy())
105 {
106 dat=inData.m_data->resolve();
107 }
108 else
109 {
110 dat=inData.m_data;
111 }
112 //
113 // Create Data which is a slice of another Data
114 DataAbstract* tmp = dat->getSlice(region);
115 m_data=DataAbstract_ptr(tmp);
116 m_protected=false;
117 }
118
119 Data::Data(const Data& inData,
120 const FunctionSpace& functionspace)
121 {
122 if (inData.isEmpty())
123 {
124 throw DataException("Error - will not interpolate for instances of DataEmpty.");
125 }
126 if (inData.getFunctionSpace()==functionspace) {
127 m_data=inData.m_data;
128 }
129 else
130 {
131
132 if (inData.isConstant()) { // for a constant function, we just need to use the new function space
133 if (!inData.probeInterpolation(functionspace))
134 { // Even though this is constant, we still need to check whether interpolation is allowed
135 throw FunctionSpaceException("Call to probeInterpolation returned false for DataConstant.");
136 }
137 // if the data is not lazy, this will just be a cast to DataReady
138 DataReady_ptr dr=inData.m_data->resolve();
139 DataConstant* dc=new DataConstant(functionspace,inData.m_data->getShape(),dr->getVector());
140 m_data=DataAbstract_ptr(dc);
141 } else {
142 Data tmp(0,inData.getDataPointShape(),functionspace,true);
143 // Note: Must use a reference or pointer to a derived object
144 // in order to get polymorphic behaviour. Shouldn't really
145 // be able to create an instance of AbstractDomain but that was done
146 // as a boost:python work around which may no longer be required.
147 /*const AbstractDomain& inDataDomain=inData.getDomain();*/
148 const_Domain_ptr inDataDomain=inData.getDomain();
149 if (inDataDomain==functionspace.getDomain()) {
150 inDataDomain->interpolateOnDomain(tmp,inData);
151 } else {
152 inDataDomain->interpolateACross(tmp,inData);
153 }
154 m_data=tmp.m_data;
155 }
156 }
157 m_protected=false;
158 }
159
160 Data::Data(DataAbstract* underlyingdata)
161 {
162 // m_data=shared_ptr<DataAbstract>(underlyingdata);
163 m_data=underlyingdata->getPtr();
164 m_protected=false;
165 }
166
167 Data::Data(DataAbstract_ptr underlyingdata)
168 {
169 m_data=underlyingdata;
170 m_protected=false;
171 }
172
173
174 Data::Data(const numeric::array& value,
175 const FunctionSpace& what,
176 bool expanded)
177 {
178 initialise(value,what,expanded);
179 m_protected=false;
180 }
181 /*
182 Data::Data(const DataArrayView& value,
183 const FunctionSpace& what,
184 bool expanded)
185 {
186 initialise(value,what,expanded);
187 m_protected=false;
188 }*/
189
190 Data::Data(const DataTypes::ValueType& value,
191 const DataTypes::ShapeType& shape,
192 const FunctionSpace& what,
193 bool expanded)
194 {
195 initialise(value,shape,what,expanded);
196 m_protected=false;
197 }
198
199
200 Data::Data(const object& value,
201 const FunctionSpace& what,
202 bool expanded)
203 {
204 numeric::array asNumArray(value);
205 initialise(asNumArray,what,expanded);
206 m_protected=false;
207 }
208
209
210 Data::Data(const object& value,
211 const Data& other)
212 {
213 numeric::array asNumArray(value);
214
215 // extract the shape of the numarray
216 DataTypes::ShapeType tempShape=DataTypes::shapeFromNumArray(asNumArray);
217 // /* for (int i=0; i < asNumArray.getrank(); i++) {
218 // tempShape.push_back(extract<int>(asNumArray.getshape()[i]));
219 // }*/
220 // // get the space for the data vector
221 // int len = DataTypes::noValues(tempShape);
222 // DataVector temp_data(len, 0.0, len);
223 // /* DataArrayView temp_dataView(temp_data, tempShape);
224 // temp_dataView.copy(asNumArray);*/
225 // temp_data.copyFromNumArray(asNumArray);
226
227 //
228 // Create DataConstant using the given value and all other parameters
229 // copied from other. If value is a rank 0 object this Data
230 // will assume the point data shape of other.
231
232 if (DataTypes::getRank(tempShape)/*temp_dataView.getRank()*/==0) {
233
234
235 // get the space for the data vector
236 int len1 = DataTypes::noValues(tempShape);
237 DataVector temp_data(len1, 0.0, len1);
238 temp_data.copyFromNumArray(asNumArray);
239
240 int len = DataTypes::noValues(other.getDataPointShape());
241
242 DataVector temp2_data(len, temp_data[0]/*temp_dataView()*/, len);
243 //DataArrayView temp2_dataView(temp2_data, other.getPointDataView().getShape());
244 // initialise(temp2_dataView, other.getFunctionSpace(), false);
245
246 DataConstant* t=new DataConstant(other.getFunctionSpace(),other.getDataPointShape(),temp2_data);
247 // boost::shared_ptr<DataAbstract> sp(t);
248 // m_data=sp;
249 m_data=DataAbstract_ptr(t);
250
251 } else {
252 //
253 // Create a DataConstant with the same sample shape as other
254 // initialise(temp_dataView, other.getFunctionSpace(), false);
255 DataConstant* t=new DataConstant(asNumArray,other.getFunctionSpace());
256 // boost::shared_ptr<DataAbstract> sp(t);
257 // m_data=sp;
258 m_data=DataAbstract_ptr(t);
259 }
260 m_protected=false;
261 }
262
263 Data::~Data()
264 {
265
266 }
267
268
269
270 void
271 Data::initialise(const boost::python::numeric::array& value,
272 const FunctionSpace& what,
273 bool expanded)
274 {
275 //
276 // Construct a Data object of the appropriate type.
277 // Construct the object first as there seems to be a bug which causes
278 // undefined behaviour if an exception is thrown during construction
279 // within the shared_ptr constructor.
280 if (expanded) {
281 DataAbstract* temp=new DataExpanded(value, what);
282 // boost::shared_ptr<DataAbstract> temp_data(temp);
283 // m_data=temp_data;
284 m_data=temp->getPtr();
285 } else {
286 DataAbstract* temp=new DataConstant(value, what);
287 // boost::shared_ptr<DataAbstract> temp_data(temp);
288 // m_data=temp_data;
289 m_data=temp->getPtr();
290 }
291 }
292
293
294 void
295 Data::initialise(const DataTypes::ValueType& value,
296 const DataTypes::ShapeType& shape,
297 const FunctionSpace& what,
298 bool expanded)
299 {
300 //
301 // Construct a Data object of the appropriate type.
302 // Construct the object first as there seems to be a bug which causes
303 // undefined behaviour if an exception is thrown during construction
304 // within the shared_ptr constructor.
305 if (expanded) {
306 DataAbstract* temp=new DataExpanded(what, shape, value);
307 // boost::shared_ptr<DataAbstract> temp_data(temp);
308 // m_data=temp_data;
309 m_data=temp->getPtr();
310 } else {
311 DataAbstract* temp=new DataConstant(what, shape, value);
312 // boost::shared_ptr<DataAbstract> temp_data(temp);
313 // m_data=temp_data;
314 m_data=temp->getPtr();
315 }
316 }
317
318
319 // void
320 // Data::CompareDebug(const Data& rd)
321 // {
322 // using namespace std;
323 // bool mismatch=false;
324 // std::cout << "Comparing left and right" << endl;
325 // const DataTagged* left=dynamic_cast<DataTagged*>(m_data.get());
326 // const DataTagged* right=dynamic_cast<DataTagged*>(rd.m_data.get());
327 //
328 // if (left==0)
329 // {
330 // cout << "left arg is not a DataTagged\n";
331 // return;
332 // }
333 //
334 // if (right==0)
335 // {
336 // cout << "right arg is not a DataTagged\n";
337 // return;
338 // }
339 // cout << "Num elements=" << left->getVector().size() << ":" << right->getVector().size() << std::endl;
340 // cout << "Shapes ";
341 // if (left->getShape()==right->getShape())
342 // {
343 // cout << "ok\n";
344 // }
345 // else
346 // {
347 // cout << "Problem: shapes do not match\n";
348 // mismatch=true;
349 // }
350 // int lim=left->getVector().size();
351 // if (right->getVector().size()) lim=right->getVector().size();
352 // for (int i=0;i<lim;++i)
353 // {
354 // if (left->getVector()[i]!=right->getVector()[i])
355 // {
356 // cout << "[" << i << "] value mismatch " << left->getVector()[i] << ":" << right->getVector()[i] << endl;
357 // mismatch=true;
358 // }
359 // }
360 //
361 // // still need to check the tag map
362 // // also need to watch what is happening to function spaces, are they copied or what?
363 //
364 // const DataTagged::DataMapType& mapleft=left->getTagLookup();
365 // const DataTagged::DataMapType& mapright=right->getTagLookup();
366 //
367 // if (mapleft.size()!=mapright.size())
368 // {
369 // cout << "Maps are different sizes " << mapleft.size() << ":" << mapright.size() << endl;
370 // mismatch=true;
371 // cout << "Left map\n";
372 // DataTagged::DataMapType::const_iterator i,j;
373 // for (i=mapleft.begin();i!=mapleft.end();++i) {
374 // cout << "(" << i->first << "=>" << i->second << ")\n";
375 // }
376 // cout << "Right map\n";
377 // for (i=mapright.begin();i!=mapright.end();++i) {
378 // cout << "(" << i->first << "=>" << i->second << ")\n";
379 // }
380 // cout << "End map\n";
381 //
382 // }
383 //
384 // DataTagged::DataMapType::const_iterator i,j;
385 // for (i=mapleft.begin(),j=mapright.begin();i!=mapleft.end() && j!=mapright.end();++i,++j) {
386 // if ((i->first!=j->first) || (i->second!=j->second))
387 // {
388 // cout << "(" << i->first << "=>" << i->second << ")";
389 // cout << ":(" << j->first << "=>" << j->second << ") ";
390 // mismatch=true;
391 // }
392 // }
393 // if (mismatch)
394 // {
395 // cout << "#Mismatch\n";
396 // }
397 // }
398
399 escriptDataC
400 Data::getDataC()
401 {
402 escriptDataC temp;
403 temp.m_dataPtr=(void*)this;
404 return temp;
405 }
406
407 escriptDataC
408 Data::getDataC() const
409 {
410 escriptDataC temp;
411 temp.m_dataPtr=(void*)this;
412 return temp;
413 }
414
415 const boost::python::tuple
416 Data::getShapeTuple() const
417 {
418 const DataTypes::ShapeType& shape=getDataPointShape();
419 switch(getDataPointRank()) {
420 case 0:
421 return make_tuple();
422 case 1:
423 return make_tuple(long_(shape[0]));
424 case 2:
425 return make_tuple(long_(shape[0]),long_(shape[1]));
426 case 3:
427 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]));
428 case 4:
429 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]),long_(shape[3]));
430 default:
431 throw DataException("Error - illegal Data rank.");
432 }
433 }
434
435
436 // The different name is needed because boost has trouble with overloaded functions.
437 // It can't work out what type the function is based soley on its name.
438 // There are ways to fix this involving creating function pointer variables for each form
439 // but there doesn't seem to be a need given that the methods have the same name from the python point of view
440 Data*
441 Data::copySelf()
442 {
443 DataAbstract* temp=m_data->deepCopy();
444 return new Data(temp);
445 }
446
447 void
448 Data::copy(const Data& other)
449 {
450 DataAbstract* temp=other.m_data->deepCopy();
451 DataAbstract_ptr p=temp->getPtr();
452 m_data=p;
453 }
454
455
456 Data
457 Data::delay()
458 {
459 DataLazy* dl=new DataLazy(m_data);
460 return Data(dl);
461 }
462
463 void
464 Data::delaySelf()
465 {
466 if (!isLazy())
467 {
468 m_data=(new DataLazy(m_data))->getPtr();
469 }
470 }
471
472 void
473 Data::setToZero()
474 {
475 if (isEmpty())
476 {
477 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
478 }
479 m_data->setToZero();
480 }
481
482 void
483 Data::copyWithMask(const Data& other,
484 const Data& mask)
485 {
486 // 1. Interpolate if required so all Datas use the same FS as this
487 // 2. Tag or Expand so that all Data's are the same type
488 // 3. Iterate over the data vectors copying values where mask is >0
489 if (other.isEmpty() || mask.isEmpty())
490 {
491 throw DataException("Error - copyWithMask not permitted using instances of DataEmpty.");
492 }
493 Data other2(other);
494 Data mask2(mask);
495 other2.resolve();
496 mask2.resolve();
497 this->resolve();
498 FunctionSpace myFS=getFunctionSpace();
499 FunctionSpace oFS=other2.getFunctionSpace();
500 FunctionSpace mFS=mask2.getFunctionSpace();
501 if (oFS!=myFS)
502 {
503 if (other2.probeInterpolation(myFS))
504 {
505 other2=other2.interpolate(myFS);
506 }
507 else
508 {
509 throw DataException("Error - copyWithMask: other FunctionSpace is not compatible with this one.");
510 }
511 }
512 if (mFS!=myFS)
513 {
514 if (mask2.probeInterpolation(myFS))
515 {
516 mask2=mask2.interpolate(myFS);
517 }
518 else
519 {
520 throw DataException("Error - copyWithMask: mask FunctionSpace is not compatible with this one.");
521 }
522 }
523 // Ensure that all args have the same type
524 if (this->isExpanded() || mask2.isExpanded() || other2.isExpanded())
525 {
526 this->expand();
527 other2.expand();
528 mask2.expand();
529 }
530 else if (this->isTagged() || mask2.isTagged() || other2.isTagged())
531 {
532 this->tag();
533 other2.tag();
534 mask2.tag();
535 }
536 else if (this->isConstant() && mask2.isConstant() && other2.isConstant())
537 {
538 }
539 else
540 {
541 throw DataException("Error - Unknown DataAbstract passed to copyWithMask.");
542 }
543 // Now we iterate over the elements
544 DataVector& self=getReadyPtr()->getVector();
545 const DataVector& ovec=other2.getReadyPtr()->getVector();
546 const DataVector& mvec=mask2.getReadyPtr()->getVector();
547 if ((self.size()!=ovec.size()) || (self.size()!=mvec.size()))
548 {
549 throw DataException("Error - size mismatch in arguments to copyWithMask.");
550 }
551 size_t num_points=self.size();
552
553 // OPENMP 3.0 allows unsigned loop vars.
554 #if defined(_OPENMP) && (_OPENMP < 200805)
555 long i;
556 #else
557 size_t i;
558 #endif
559 #pragma omp parallel for private(i) schedule(static)
560 for (i=0;i<num_points;++i)
561 {
562 if (mvec[i]>0)
563 {
564 self[i]=ovec[i];
565 }
566 }
567 }
568
569
570
571 bool
572 Data::isExpanded() const
573 {
574 DataExpanded* temp=dynamic_cast<DataExpanded*>(m_data.get());
575 return (temp!=0);
576 }
577
578 bool
579 Data::isTagged() const
580 {
581 DataTagged* temp=dynamic_cast<DataTagged*>(m_data.get());
582 return (temp!=0);
583 }
584
585 bool
586 Data::isEmpty() const
587 {
588 DataEmpty* temp=dynamic_cast<DataEmpty*>(m_data.get());
589 return (temp!=0);
590 }
591
592 bool
593 Data::isConstant() const
594 {
595 DataConstant* temp=dynamic_cast<DataConstant*>(m_data.get());
596 return (temp!=0);
597 }
598
599 bool
600 Data::isLazy() const
601 {
602 return m_data->isLazy();
603 }
604
605 // at the moment this is synonymous with !isLazy() but that could change
606 bool
607 Data::isReady() const
608 {
609 return (dynamic_cast<DataReady*>(m_data.get())!=0);
610 }
611
612
613 void
614 Data::setProtection()
615 {
616 m_protected=true;
617 }
618
619 bool
620 Data::isProtected() const
621 {
622 return m_protected;
623 }
624
625
626
627 void
628 Data::expand()
629 {
630 if (isConstant()) {
631 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
632 DataAbstract* temp=new DataExpanded(*tempDataConst);
633 // shared_ptr<DataAbstract> temp_data(temp);
634 // m_data=temp_data;
635 m_data=temp->getPtr();
636 } else if (isTagged()) {
637 DataTagged* tempDataTag=dynamic_cast<DataTagged*>(m_data.get());
638 DataAbstract* temp=new DataExpanded(*tempDataTag);
639 // shared_ptr<DataAbstract> temp_data(temp);
640 // m_data=temp_data;
641 m_data=temp->getPtr();
642 } else if (isExpanded()) {
643 //
644 // do nothing
645 } else if (isEmpty()) {
646 throw DataException("Error - Expansion of DataEmpty not possible.");
647 } else if (isLazy()) {
648 resolve();
649 expand(); // resolve might not give us expanded data
650 } else {
651 throw DataException("Error - Expansion not implemented for this Data type.");
652 }
653 }
654
655 void
656 Data::tag()
657 {
658 if (isConstant()) {
659 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
660 DataAbstract* temp=new DataTagged(*tempDataConst);
661 // shared_ptr<DataAbstract> temp_data(temp);
662 // m_data=temp_data;
663 m_data=temp->getPtr();
664 } else if (isTagged()) {
665 // do nothing
666 } else if (isExpanded()) {
667 throw DataException("Error - Creating tag data from DataExpanded not possible.");
668 } else if (isEmpty()) {
669 throw DataException("Error - Creating tag data from DataEmpty not possible.");
670 } else if (isLazy()) {
671 DataAbstract_ptr res=m_data->resolve();
672 if (m_data->isExpanded())
673 {
674 throw DataException("Error - data would resolve to DataExpanded, tagging is not possible.");
675 }
676 m_data=res;
677 tag();
678 } else {
679 throw DataException("Error - Tagging not implemented for this Data type.");
680 }
681 }
682
683 void
684 Data::resolve()
685 {
686 if (isLazy())
687 {
688 m_data=m_data->resolve();
689 }
690 }
691
692
693 Data
694 Data::oneOver() const
695 {
696 if (isLazy())
697 {
698 DataLazy* c=new DataLazy(borrowDataPtr(),RECIP);
699 return Data(c);
700 }
701 return C_TensorUnaryOperation(*this, bind1st(divides<double>(),1.));
702 }
703
704 Data
705 Data::wherePositive() const
706 {
707 if (isLazy())
708 {
709 DataLazy* c=new DataLazy(borrowDataPtr(),GZ);
710 return Data(c);
711 }
712 return C_TensorUnaryOperation(*this, bind2nd(greater<double>(),0.0));
713 }
714
715 Data
716 Data::whereNegative() const
717 {
718 if (isLazy())
719 {
720 DataLazy* c=new DataLazy(borrowDataPtr(),LZ);
721 return Data(c);
722 }
723 return C_TensorUnaryOperation(*this, bind2nd(less<double>(),0.0));
724 }
725
726 Data
727 Data::whereNonNegative() const
728 {
729 if (isLazy())
730 {
731 DataLazy* c=new DataLazy(borrowDataPtr(),GEZ);
732 return Data(c);
733 }
734 return C_TensorUnaryOperation(*this, bind2nd(greater_equal<double>(),0.0));
735 }
736
737 Data
738 Data::whereNonPositive() const
739 {
740 if (isLazy())
741 {
742 DataLazy* c=new DataLazy(borrowDataPtr(),LEZ);
743 return Data(c);
744 }
745 return C_TensorUnaryOperation(*this, bind2nd(less_equal<double>(),0.0));
746 }
747
748 Data
749 Data::whereZero(double tol) const
750 {
751 Data dataAbs=abs();
752 return C_TensorUnaryOperation(dataAbs, bind2nd(less_equal<double>(),tol));
753 }
754
755 Data
756 Data::whereNonZero(double tol) const
757 {
758 Data dataAbs=abs();
759 return C_TensorUnaryOperation(dataAbs, bind2nd(greater<double>(),tol));
760 }
761
762 Data
763 Data::interpolate(const FunctionSpace& functionspace) const
764 {
765 return Data(*this,functionspace);
766 }
767
768 bool
769 Data::probeInterpolation(const FunctionSpace& functionspace) const
770 {
771 return getFunctionSpace().probeInterpolation(functionspace);
772 // if (getFunctionSpace()==functionspace) {
773 // return true;
774 // } else {
775 // const_Domain_ptr domain=getDomain();
776 // if (*domain==*functionspace.getDomain()) {
777 // return domain->probeInterpolationOnDomain(getFunctionSpace().getTypeCode(),functionspace.getTypeCode());
778 // } else {
779 // return domain->probeInterpolationACross(getFunctionSpace().getTypeCode(),*(functionspace.getDomain()),functionspace.getTypeCode());
780 // }
781 // }
782 }
783
784 Data
785 Data::gradOn(const FunctionSpace& functionspace) const
786 {
787 if (isEmpty())
788 {
789 throw DataException("Error - operation not permitted on instances of DataEmpty.");
790 }
791 double blocktimer_start = blocktimer_time();
792 if (functionspace.getDomain()!=getDomain())
793 throw DataException("Error - gradient cannot be calculated on different domains.");
794 DataTypes::ShapeType grad_shape=getDataPointShape();
795 grad_shape.push_back(functionspace.getDim());
796 Data out(0.0,grad_shape,functionspace,true);
797 getDomain()->setToGradient(out,*this);
798 blocktimer_increment("grad()", blocktimer_start);
799 return out;
800 }
801
802 Data
803 Data::grad() const
804 {
805 if (isEmpty())
806 {
807 throw DataException("Error - operation not permitted on instances of DataEmpty.");
808 }
809 return gradOn(escript::function(*getDomain()));
810 }
811
812 int
813 Data::getDataPointSize() const
814 {
815 return m_data->getNoValues();
816 }
817
818 DataTypes::ValueType::size_type
819 Data::getLength() const
820 {
821 return m_data->getLength();
822 }
823
824 const
825 boost::python::numeric::array
826 Data:: getValueOfDataPoint(int dataPointNo)
827 {
828 int i, j, k, l;
829
830 FORCERESOLVE;
831
832 //
833 // determine the rank and shape of each data point
834 int dataPointRank = getDataPointRank();
835 const DataTypes::ShapeType& dataPointShape = getDataPointShape();
836
837 //
838 // create the numeric array to be returned
839 boost::python::numeric::array numArray(0.0);
840
841 //
842 // the shape of the returned numeric array will be the same
843 // as that of the data point
844 int arrayRank = dataPointRank;
845 const DataTypes::ShapeType& arrayShape = dataPointShape;
846
847 //
848 // resize the numeric array to the shape just calculated
849 if (arrayRank==0) {
850 numArray.resize(1);
851 }
852 if (arrayRank==1) {
853 numArray.resize(arrayShape[0]);
854 }
855 if (arrayRank==2) {
856 numArray.resize(arrayShape[0],arrayShape[1]);
857 }
858 if (arrayRank==3) {
859 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2]);
860 }
861 if (arrayRank==4) {
862 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2],arrayShape[3]);
863 }
864
865 if (getNumDataPointsPerSample()>0) {
866 int sampleNo = dataPointNo/getNumDataPointsPerSample();
867 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
868 //
869 // Check a valid sample number has been supplied
870 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
871 throw DataException("Error - Data::convertToNumArray: invalid sampleNo.");
872 }
873
874 //
875 // Check a valid data point number has been supplied
876 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
877 throw DataException("Error - Data::convertToNumArray: invalid dataPointNoInSample.");
878 }
879 // TODO: global error handling
880 // create a view of the data if it is stored locally
881 // DataArrayView dataPointView = getDataPoint(sampleNo, dataPointNoInSample);
882 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
883
884
885 switch( dataPointRank ){
886 case 0 :
887 numArray[0] = getDataAtOffset(offset);
888 break;
889 case 1 :
890 for( i=0; i<dataPointShape[0]; i++ )
891 numArray[i]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i));
892 break;
893 case 2 :
894 for( i=0; i<dataPointShape[0]; i++ )
895 for( j=0; j<dataPointShape[1]; j++)
896 numArray[make_tuple(i,j)]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j));
897 break;
898 case 3 :
899 for( i=0; i<dataPointShape[0]; i++ )
900 for( j=0; j<dataPointShape[1]; j++ )
901 for( k=0; k<dataPointShape[2]; k++)
902 numArray[make_tuple(i,j,k)]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k));
903 break;
904 case 4 :
905 for( i=0; i<dataPointShape[0]; i++ )
906 for( j=0; j<dataPointShape[1]; j++ )
907 for( k=0; k<dataPointShape[2]; k++ )
908 for( l=0; l<dataPointShape[3]; l++)
909 numArray[make_tuple(i,j,k,l)]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k,l));
910 break;
911 }
912 }
913 //
914 // return the array
915 return numArray;
916
917 }
918
919 void
920 Data::setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object)
921 {
922 // this will throw if the value cannot be represented
923 boost::python::numeric::array num_array(py_object);
924 setValueOfDataPointToArray(dataPointNo,num_array);
925 }
926
927 void
928 Data::setValueOfDataPointToArray(int dataPointNo, const boost::python::numeric::array& num_array)
929 {
930 if (isProtected()) {
931 throw DataException("Error - attempt to update protected Data object.");
932 }
933 FORCERESOLVE;
934 //
935 // check rank
936 if (static_cast<unsigned int>(num_array.getrank())<getDataPointRank())
937 throw DataException("Rank of numarray does not match Data object rank");
938
939 //
940 // check shape of num_array
941 for (unsigned int i=0; i<getDataPointRank(); i++) {
942 if (extract<int>(num_array.getshape()[i])!=getDataPointShape()[i])
943 throw DataException("Shape of numarray does not match Data object rank");
944 }
945 //
946 // make sure data is expanded:
947 //
948 if (!isExpanded()) {
949 expand();
950 }
951 if (getNumDataPointsPerSample()>0) {
952 int sampleNo = dataPointNo/getNumDataPointsPerSample();
953 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
954 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,num_array);
955 } else {
956 m_data->copyToDataPoint(-1, 0,num_array);
957 }
958 }
959
960 void
961 Data::setValueOfDataPoint(int dataPointNo, const double value)
962 {
963 if (isProtected()) {
964 throw DataException("Error - attempt to update protected Data object.");
965 }
966 //
967 // make sure data is expanded:
968 FORCERESOLVE;
969 if (!isExpanded()) {
970 expand();
971 }
972 if (getNumDataPointsPerSample()>0) {
973 int sampleNo = dataPointNo/getNumDataPointsPerSample();
974 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
975 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,value);
976 } else {
977 m_data->copyToDataPoint(-1, 0,value);
978 }
979 }
980
981 const
982 boost::python::numeric::array
983 Data::getValueOfGlobalDataPoint(int procNo, int dataPointNo)
984 {
985 size_t length=0;
986 int i, j, k, l, pos;
987 FORCERESOLVE;
988 //
989 // determine the rank and shape of each data point
990 int dataPointRank = getDataPointRank();
991 const DataTypes::ShapeType& dataPointShape = getDataPointShape();
992
993 //
994 // create the numeric array to be returned
995 boost::python::numeric::array numArray(0.0);
996
997 //
998 // the shape of the returned numeric array will be the same
999 // as that of the data point
1000 int arrayRank = dataPointRank;
1001 const DataTypes::ShapeType& arrayShape = dataPointShape;
1002
1003 //
1004 // resize the numeric array to the shape just calculated
1005 if (arrayRank==0) {
1006 numArray.resize(1);
1007 }
1008 if (arrayRank==1) {
1009 numArray.resize(arrayShape[0]);
1010 }
1011 if (arrayRank==2) {
1012 numArray.resize(arrayShape[0],arrayShape[1]);
1013 }
1014 if (arrayRank==3) {
1015 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2]);
1016 }
1017 if (arrayRank==4) {
1018 numArray.resize(arrayShape[0],arrayShape[1],arrayShape[2],arrayShape[3]);
1019 }
1020
1021 // added for the MPI communication
1022 length=1;
1023 for( i=0; i<arrayRank; i++ ) length *= arrayShape[i];
1024 double *tmpData = new double[length];
1025
1026 //
1027 // load the values for the data point into the numeric array.
1028
1029 // updated for the MPI case
1030 if( get_MPIRank()==procNo ){
1031 if (getNumDataPointsPerSample()>0) {
1032 int sampleNo = dataPointNo/getNumDataPointsPerSample();
1033 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1034 //
1035 // Check a valid sample number has been supplied
1036 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
1037 throw DataException("Error - Data::convertToNumArray: invalid sampleNo.");
1038 }
1039
1040 //
1041 // Check a valid data point number has been supplied
1042 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
1043 throw DataException("Error - Data::convertToNumArray: invalid dataPointNoInSample.");
1044 }
1045 // TODO: global error handling
1046 // create a view of the data if it is stored locally
1047 //DataArrayView dataPointView = getDataPoint(sampleNo, dataPointNoInSample);
1048 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
1049
1050 // pack the data from the view into tmpData for MPI communication
1051 pos=0;
1052 switch( dataPointRank ){
1053 case 0 :
1054 tmpData[0] = getDataAtOffset(offset);
1055 break;
1056 case 1 :
1057 for( i=0; i<dataPointShape[0]; i++ )
1058 tmpData[i]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i));
1059 break;
1060 case 2 :
1061 for( i=0; i<dataPointShape[0]; i++ )
1062 for( j=0; j<dataPointShape[1]; j++, pos++ )
1063 tmpData[pos]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j));
1064 break;
1065 case 3 :
1066 for( i=0; i<dataPointShape[0]; i++ )
1067 for( j=0; j<dataPointShape[1]; j++ )
1068 for( k=0; k<dataPointShape[2]; k++, pos++ )
1069 tmpData[pos]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k));
1070 break;
1071 case 4 :
1072 for( i=0; i<dataPointShape[0]; i++ )
1073 for( j=0; j<dataPointShape[1]; j++ )
1074 for( k=0; k<dataPointShape[2]; k++ )
1075 for( l=0; l<dataPointShape[3]; l++, pos++ )
1076 tmpData[pos]=getDataAtOffset(offset+DataTypes::getRelIndex(dataPointShape, i,j,k,l));
1077 break;
1078 }
1079 }
1080 }
1081 #ifdef PASO_MPI
1082 // broadcast the data to all other processes
1083 MPI_Bcast( tmpData, length, MPI_DOUBLE, procNo, get_MPIComm() );
1084 #endif
1085
1086 // unpack the data
1087 switch( dataPointRank ){
1088 case 0 :
1089 numArray[0]=tmpData[0];
1090 break;
1091 case 1 :
1092 for( i=0; i<dataPointShape[0]; i++ )
1093 numArray[i]=tmpData[i];
1094 break;
1095 case 2 :
1096 for( i=0; i<dataPointShape[0]; i++ )
1097 for( j=0; j<dataPointShape[1]; j++ )
1098 numArray[make_tuple(i,j)]=tmpData[i+j*dataPointShape[0]];
1099 break;
1100 case 3 :
1101 for( i=0; i<dataPointShape[0]; i++ )
1102 for( j=0; j<dataPointShape[1]; j++ )
1103 for( k=0; k<dataPointShape[2]; k++ )
1104 numArray[make_tuple(i,j,k)]=tmpData[i+dataPointShape[0]*(j*+k*dataPointShape[1])];
1105 break;
1106 case 4 :
1107 for( i=0; i<dataPointShape[0]; i++ )
1108 for( j=0; j<dataPointShape[1]; j++ )
1109 for( k=0; k<dataPointShape[2]; k++ )
1110 for( l=0; l<dataPointShape[3]; l++ )
1111 numArray[make_tuple(i,j,k,l)]=tmpData[i+dataPointShape[0]*(j*+dataPointShape[1]*(k+l*dataPointShape[2]))];
1112 break;
1113 }
1114
1115 delete [] tmpData;
1116 //
1117 // return the loaded array
1118 return numArray;
1119 }
1120
1121
1122 boost::python::numeric::array
1123 Data::integrate_const() const
1124 {
1125 if (isLazy())
1126 {
1127 throw DataException("Error - cannot integrate for constant lazy data.");
1128 }
1129 return integrateWorker();
1130 }
1131
1132 boost::python::numeric::array
1133 Data::integrate()
1134 {
1135 if (isLazy())
1136 {
1137 expand();
1138 }
1139 return integrateWorker();
1140 }
1141
1142
1143
1144 boost::python::numeric::array
1145 Data::integrateWorker() const
1146 {
1147 int index;
1148 int rank = getDataPointRank();
1149 DataTypes::ShapeType shape = getDataPointShape();
1150 int dataPointSize = getDataPointSize();
1151
1152 //
1153 // calculate the integral values
1154 vector<double> integrals(dataPointSize);
1155 vector<double> integrals_local(dataPointSize);
1156 #ifdef PASO_MPI
1157 AbstractContinuousDomain::asAbstractContinuousDomain(*getDomain()).setToIntegrals(integrals_local,*this);
1158 // Global sum: use an array instead of a vector because elements of array are guaranteed to be contiguous in memory
1159 double *tmp = new double[dataPointSize];
1160 double *tmp_local = new double[dataPointSize];
1161 for (int i=0; i<dataPointSize; i++) { tmp_local[i] = integrals_local[i]; }
1162 MPI_Allreduce( &tmp_local[0], &tmp[0], dataPointSize, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD );
1163 for (int i=0; i<dataPointSize; i++) { integrals[i] = tmp[i]; }
1164 delete[] tmp;
1165 delete[] tmp_local;
1166 #else
1167 AbstractContinuousDomain::asAbstractContinuousDomain(*getDomain()).setToIntegrals(integrals,*this);
1168 #endif
1169
1170 //
1171 // create the numeric array to be returned
1172 // and load the array with the integral values
1173 boost::python::numeric::array bp_array(1.0);
1174 if (rank==0) {
1175 bp_array.resize(1);
1176 index = 0;
1177 bp_array[0] = integrals[index];
1178 }
1179 if (rank==1) {
1180 bp_array.resize(shape[0]);
1181 for (int i=0; i<shape[0]; i++) {
1182 index = i;
1183 bp_array[i] = integrals[index];
1184 }
1185 }
1186 if (rank==2) {
1187 bp_array.resize(shape[0],shape[1]);
1188 for (int i=0; i<shape[0]; i++) {
1189 for (int j=0; j<shape[1]; j++) {
1190 index = i + shape[0] * j;
1191 bp_array[make_tuple(i,j)] = integrals[index];
1192 }
1193 }
1194 }
1195 if (rank==3) {
1196 bp_array.resize(shape[0],shape[1],shape[2]);
1197 for (int i=0; i<shape[0]; i++) {
1198 for (int j=0; j<shape[1]; j++) {
1199 for (int k=0; k<shape[2]; k++) {
1200 index = i + shape[0] * ( j + shape[1] * k );
1201 bp_array[make_tuple(i,j,k)] = integrals[index];
1202 }
1203 }
1204 }
1205 }
1206 if (rank==4) {
1207 bp_array.resize(shape[0],shape[1],shape[2],shape[3]);
1208 for (int i=0; i<shape[0]; i++) {
1209 for (int j=0; j<shape[1]; j++) {
1210 for (int k=0; k<shape[2]; k++) {
1211 for (int l=0; l<shape[3]; l++) {
1212 index = i + shape[0] * ( j + shape[1] * ( k + shape[2] * l ) );
1213 bp_array[make_tuple(i,j,k,l)] = integrals[index];
1214 }
1215 }
1216 }
1217 }
1218 }
1219
1220 //
1221 // return the loaded array
1222 return bp_array;
1223 }
1224
1225 Data
1226 Data::sin() const
1227 {
1228 if (isLazy())
1229 {
1230 DataLazy* c=new DataLazy(borrowDataPtr(),SIN);
1231 return Data(c);
1232 }
1233 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sin);
1234 }
1235
1236 Data
1237 Data::cos() const
1238 {
1239 if (isLazy())
1240 {
1241 DataLazy* c=new DataLazy(borrowDataPtr(),COS);
1242 return Data(c);
1243 }
1244 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cos);
1245 }
1246
1247 Data
1248 Data::tan() const
1249 {
1250 if (isLazy())
1251 {
1252 DataLazy* c=new DataLazy(borrowDataPtr(),TAN);
1253 return Data(c);
1254 }
1255 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tan);
1256 }
1257
1258 Data
1259 Data::asin() const
1260 {
1261 if (isLazy())
1262 {
1263 DataLazy* c=new DataLazy(borrowDataPtr(),ASIN);
1264 return Data(c);
1265 }
1266 return C_TensorUnaryOperation<double (*)(double)>(*this, ::asin);
1267 }
1268
1269 Data
1270 Data::acos() const
1271 {
1272 if (isLazy())
1273 {
1274 DataLazy* c=new DataLazy(borrowDataPtr(),ACOS);
1275 return Data(c);
1276 }
1277 return C_TensorUnaryOperation<double (*)(double)>(*this, ::acos);
1278 }
1279
1280
1281 Data
1282 Data::atan() const
1283 {
1284 if (isLazy())
1285 {
1286 DataLazy* c=new DataLazy(borrowDataPtr(),ATAN);
1287 return Data(c);
1288 }
1289 return C_TensorUnaryOperation<double (*)(double)>(*this, ::atan);
1290 }
1291
1292 Data
1293 Data::sinh() const
1294 {
1295 if (isLazy())
1296 {
1297 DataLazy* c=new DataLazy(borrowDataPtr(),SINH);
1298 return Data(c);
1299 }
1300 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sinh);
1301 }
1302
1303 Data
1304 Data::cosh() const
1305 {
1306 if (isLazy())
1307 {
1308 DataLazy* c=new DataLazy(borrowDataPtr(),COSH);
1309 return Data(c);
1310 }
1311 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cosh);
1312 }
1313
1314 Data
1315 Data::tanh() const
1316 {
1317 if (isLazy())
1318 {
1319 DataLazy* c=new DataLazy(borrowDataPtr(),TANH);
1320 return Data(c);
1321 }
1322 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tanh);
1323 }
1324
1325
1326 Data
1327 Data::erf() const
1328 {
1329 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1330 throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");
1331 #else
1332 if (isLazy())
1333 {
1334 DataLazy* c=new DataLazy(borrowDataPtr(),ERF);
1335 return Data(c);
1336 }
1337 return C_TensorUnaryOperation(*this, ::erf);
1338 #endif
1339 }
1340
1341 Data
1342 Data::asinh() const
1343 {
1344 if (isLazy())
1345 {
1346 DataLazy* c=new DataLazy(borrowDataPtr(),ASINH);
1347 return Data(c);
1348 }
1349 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1350 return C_TensorUnaryOperation(*this, escript::asinh_substitute);
1351 #else
1352 return C_TensorUnaryOperation(*this, ::asinh);
1353 #endif
1354 }
1355
1356 Data
1357 Data::acosh() const
1358 {
1359 if (isLazy())
1360 {
1361 DataLazy* c=new DataLazy(borrowDataPtr(),ACOSH);
1362 return Data(c);
1363 }
1364 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1365 return C_TensorUnaryOperation(*this, escript::acosh_substitute);
1366 #else
1367 return C_TensorUnaryOperation(*this, ::acosh);
1368 #endif
1369 }
1370
1371 Data
1372 Data::atanh() const
1373 {
1374 if (isLazy())
1375 {
1376 DataLazy* c=new DataLazy(borrowDataPtr(),ATANH);
1377 return Data(c);
1378 }
1379 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1380 return C_TensorUnaryOperation(*this, escript::atanh_substitute);
1381 #else
1382 return C_TensorUnaryOperation(*this, ::atanh);
1383 #endif
1384 }
1385
1386 Data
1387 Data::log10() const
1388 { if (isLazy())
1389 {
1390 DataLazy* c=new DataLazy(borrowDataPtr(),LOG10);
1391 return Data(c);
1392 }
1393 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log10);
1394 }
1395
1396 Data
1397 Data::log() const
1398 {
1399 if (isLazy())
1400 {
1401 DataLazy* c=new DataLazy(borrowDataPtr(),LOG);
1402 return Data(c);
1403 }
1404 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log);
1405 }
1406
1407 Data
1408 Data::sign() const
1409 {
1410 if (isLazy())
1411 {
1412 DataLazy* c=new DataLazy(borrowDataPtr(),SIGN);
1413 return Data(c);
1414 }
1415 return C_TensorUnaryOperation(*this, escript::fsign);
1416 }
1417
1418 Data
1419 Data::abs() const
1420 {
1421 if (isLazy())
1422 {
1423 DataLazy* c=new DataLazy(borrowDataPtr(),ABS);
1424 return Data(c);
1425 }
1426 return C_TensorUnaryOperation<double (*)(double)>(*this, ::fabs);
1427 }
1428
1429 Data
1430 Data::neg() const
1431 {
1432 if (isLazy())
1433 {
1434 DataLazy* c=new DataLazy(borrowDataPtr(),NEG);
1435 return Data(c);
1436 }
1437 return C_TensorUnaryOperation(*this, negate<double>());
1438 }
1439
1440 Data
1441 Data::pos() const
1442 {
1443 // not doing lazy check here is deliberate.
1444 // since a deep copy of lazy data should be cheap, I'll just let it happen now
1445 Data result;
1446 // perform a deep copy
1447 result.copy(*this);
1448 return result;
1449 }
1450
1451 Data
1452 Data::exp() const
1453 {
1454 if (isLazy())
1455 {
1456 DataLazy* c=new DataLazy(borrowDataPtr(),EXP);
1457 return Data(c);
1458 }
1459 return C_TensorUnaryOperation<double (*)(double)>(*this, ::exp);
1460 }
1461
1462 Data
1463 Data::sqrt() const
1464 {
1465 if (isLazy())
1466 {
1467 DataLazy* c=new DataLazy(borrowDataPtr(),SQRT);
1468 return Data(c);
1469 }
1470 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sqrt);
1471 }
1472
1473 double
1474 Data::Lsup_const() const
1475 {
1476 if (isLazy())
1477 {
1478 throw DataException("Error - cannot compute Lsup for constant lazy data.");
1479 }
1480 return LsupWorker();
1481 }
1482
1483 double
1484 Data::Lsup()
1485 {
1486 if (isLazy())
1487 {
1488 expand();
1489 }
1490 return LsupWorker();
1491 }
1492
1493 double
1494 Data::sup_const() const
1495 {
1496 if (isLazy())
1497 {
1498 throw DataException("Error - cannot compute sup for constant lazy data.");
1499 }
1500 return supWorker();
1501 }
1502
1503 double
1504 Data::sup()
1505 {
1506 if (isLazy())
1507 {
1508 expand();
1509 }
1510 return supWorker();
1511 }
1512
1513 double
1514 Data::inf_const() const
1515 {
1516 if (isLazy())
1517 {
1518 throw DataException("Error - cannot compute inf for constant lazy data.");
1519 }
1520 return infWorker();
1521 }
1522
1523 double
1524 Data::inf()
1525 {
1526 if (isLazy())
1527 {
1528 expand();
1529 }
1530 return infWorker();
1531 }
1532
1533 double
1534 Data::LsupWorker() const
1535 {
1536 double localValue;
1537 //
1538 // set the initial absolute maximum value to zero
1539
1540 AbsMax abs_max_func;
1541 localValue = algorithm(abs_max_func,0);
1542 #ifdef PASO_MPI
1543 double globalValue;
1544 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1545 return globalValue;
1546 #else
1547 return localValue;
1548 #endif
1549 }
1550
1551 double
1552 Data::supWorker() const
1553 {
1554 double localValue;
1555 //
1556 // set the initial maximum value to min possible double
1557 FMax fmax_func;
1558 localValue = algorithm(fmax_func,numeric_limits<double>::max()*-1);
1559 #ifdef PASO_MPI
1560 double globalValue;
1561 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1562 return globalValue;
1563 #else
1564 return localValue;
1565 #endif
1566 }
1567
1568 double
1569 Data::infWorker() const
1570 {
1571 double localValue;
1572 //
1573 // set the initial minimum value to max possible double
1574 FMin fmin_func;
1575 localValue = algorithm(fmin_func,numeric_limits<double>::max());
1576 #ifdef PASO_MPI
1577 double globalValue;
1578 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD );
1579 return globalValue;
1580 #else
1581 return localValue;
1582 #endif
1583 }
1584
1585 /* TODO */
1586 /* global reduction */
1587 Data
1588 Data::maxval() const
1589 {
1590 if (isLazy())
1591 {
1592 Data temp(*this); // to get around the fact that you can't resolve a const Data
1593 temp.resolve();
1594 return temp.maxval();
1595 }
1596 //
1597 // set the initial maximum value to min possible double
1598 FMax fmax_func;
1599 return dp_algorithm(fmax_func,numeric_limits<double>::max()*-1);
1600 }
1601
1602 Data
1603 Data::minval() const
1604 {
1605 if (isLazy())
1606 {
1607 Data temp(*this); // to get around the fact that you can't resolve a const Data
1608 temp.resolve();
1609 return temp.minval();
1610 }
1611 //
1612 // set the initial minimum value to max possible double
1613 FMin fmin_func;
1614 return dp_algorithm(fmin_func,numeric_limits<double>::max());
1615 }
1616
1617 Data
1618 Data::swapaxes(const int axis0, const int axis1) const
1619 {
1620 int axis0_tmp,axis1_tmp;
1621 DataTypes::ShapeType s=getDataPointShape();
1622 DataTypes::ShapeType ev_shape;
1623 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1624 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1625 int rank=getDataPointRank();
1626 if (rank<2) {
1627 throw DataException("Error - Data::swapaxes argument must have at least rank 2.");
1628 }
1629 if (axis0<0 || axis0>rank-1) {
1630 throw DataException("Error - Data::swapaxes: axis0 must be between 0 and rank-1=" + rank-1);
1631 }
1632 if (axis1<0 || axis1>rank-1) {
1633 throw DataException("Error - Data::swapaxes: axis1 must be between 0 and rank-1=" + rank-1);
1634 }
1635 if (axis0 == axis1) {
1636 throw DataException("Error - Data::swapaxes: axis indices must be different.");
1637 }
1638 if (axis0 > axis1) {
1639 axis0_tmp=axis1;
1640 axis1_tmp=axis0;
1641 } else {
1642 axis0_tmp=axis0;
1643 axis1_tmp=axis1;
1644 }
1645 for (int i=0; i<rank; i++) {
1646 if (i == axis0_tmp) {
1647 ev_shape.push_back(s[axis1_tmp]);
1648 } else if (i == axis1_tmp) {
1649 ev_shape.push_back(s[axis0_tmp]);
1650 } else {
1651 ev_shape.push_back(s[i]);
1652 }
1653 }
1654 Data ev(0.,ev_shape,getFunctionSpace());
1655 ev.typeMatchRight(*this);
1656 m_data->swapaxes(ev.m_data.get(), axis0_tmp, axis1_tmp);
1657 return ev;
1658
1659 }
1660
1661 Data
1662 Data::symmetric() const
1663 {
1664 // check input
1665 DataTypes::ShapeType s=getDataPointShape();
1666 if (getDataPointRank()==2) {
1667 if(s[0] != s[1])
1668 throw DataException("Error - Data::symmetric can only be calculated for rank 2 object with equal first and second dimension.");
1669 }
1670 else if (getDataPointRank()==4) {
1671 if(!(s[0] == s[2] && s[1] == s[3]))
1672 throw DataException("Error - Data::symmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1673 }
1674 else {
1675 throw DataException("Error - Data::symmetric can only be calculated for rank 2 or 4 object.");
1676 }
1677 if (isLazy())
1678 {
1679 DataLazy* c=new DataLazy(borrowDataPtr(),SYM);
1680 return Data(c);
1681 }
1682 Data ev(0.,getDataPointShape(),getFunctionSpace());
1683 ev.typeMatchRight(*this);
1684 m_data->symmetric(ev.m_data.get());
1685 return ev;
1686 }
1687
1688 Data
1689 Data::nonsymmetric() const
1690 {
1691 if (isLazy())
1692 {
1693 DataLazy* c=new DataLazy(borrowDataPtr(),NSYM);
1694 return Data(c);
1695 }
1696 // check input
1697 DataTypes::ShapeType s=getDataPointShape();
1698 if (getDataPointRank()==2) {
1699 if(s[0] != s[1])
1700 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 object with equal first and second dimension.");
1701 DataTypes::ShapeType ev_shape;
1702 ev_shape.push_back(s[0]);
1703 ev_shape.push_back(s[1]);
1704 Data ev(0.,ev_shape,getFunctionSpace());
1705 ev.typeMatchRight(*this);
1706 m_data->nonsymmetric(ev.m_data.get());
1707 return ev;
1708 }
1709 else if (getDataPointRank()==4) {
1710 if(!(s[0] == s[2] && s[1] == s[3]))
1711 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1712 DataTypes::ShapeType ev_shape;
1713 ev_shape.push_back(s[0]);
1714 ev_shape.push_back(s[1]);
1715 ev_shape.push_back(s[2]);
1716 ev_shape.push_back(s[3]);
1717 Data ev(0.,ev_shape,getFunctionSpace());
1718 ev.typeMatchRight(*this);
1719 m_data->nonsymmetric(ev.m_data.get());
1720 return ev;
1721 }
1722 else {
1723 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 or 4 object.");
1724 }
1725 }
1726
1727
1728 // Doing a lazy version of this would require some thought.
1729 // First it needs a parameter (which DataLazy doesn't support at the moment).
1730 // (secondly although it does not apply to trace) we can't handle operations which return
1731 // multiple results (like eigenvectors_values) or return values of different shapes to their input
1732 // (like eigenvalues).
1733 Data
1734 Data::trace(int axis_offset) const
1735 {
1736 if (isLazy())
1737 {
1738 Data temp(*this); // to get around the fact that you can't resolve a const Data
1739 temp.resolve();
1740 return temp.trace(axis_offset);
1741 }
1742 DataTypes::ShapeType s=getDataPointShape();
1743 if (getDataPointRank()==2) {
1744 DataTypes::ShapeType ev_shape;
1745 Data ev(0.,ev_shape,getFunctionSpace());
1746 ev.typeMatchRight(*this);
1747 m_data->trace(ev.m_data.get(), axis_offset);
1748 return ev;
1749 }
1750 if (getDataPointRank()==3) {
1751 DataTypes::ShapeType ev_shape;
1752 if (axis_offset==0) {
1753 int s2=s[2];
1754 ev_shape.push_back(s2);
1755 }
1756 else if (axis_offset==1) {
1757 int s0=s[0];
1758 ev_shape.push_back(s0);
1759 }
1760 Data ev(0.,ev_shape,getFunctionSpace());
1761 ev.typeMatchRight(*this);
1762 m_data->trace(ev.m_data.get(), axis_offset);
1763 return ev;
1764 }
1765 if (getDataPointRank()==4) {
1766 DataTypes::ShapeType ev_shape;
1767 if (axis_offset==0) {
1768 ev_shape.push_back(s[2]);
1769 ev_shape.push_back(s[3]);
1770 }
1771 else if (axis_offset==1) {
1772 ev_shape.push_back(s[0]);
1773 ev_shape.push_back(s[3]);
1774 }
1775 else if (axis_offset==2) {
1776 ev_shape.push_back(s[0]);
1777 ev_shape.push_back(s[1]);
1778 }
1779 Data ev(0.,ev_shape,getFunctionSpace());
1780 ev.typeMatchRight(*this);
1781 m_data->trace(ev.m_data.get(), axis_offset);
1782 return ev;
1783 }
1784 else {
1785 throw DataException("Error - Data::trace can only be calculated for rank 2, 3 or 4 object.");
1786 }
1787 }
1788
1789 Data
1790 Data::transpose(int axis_offset) const
1791 {
1792 if (isLazy())
1793 {
1794 Data temp(*this); // to get around the fact that you can't resolve a const Data
1795 temp.resolve();
1796 return temp.transpose(axis_offset);
1797 }
1798 DataTypes::ShapeType s=getDataPointShape();
1799 DataTypes::ShapeType ev_shape;
1800 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1801 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1802 int rank=getDataPointRank();
1803 if (axis_offset<0 || axis_offset>rank) {
1804 throw DataException("Error - Data::transpose must have 0 <= axis_offset <= rank=" + rank);
1805 }
1806 for (int i=0; i<rank; i++) {
1807 int index = (axis_offset+i)%rank;
1808 ev_shape.push_back(s[index]); // Append to new shape
1809 }
1810 Data ev(0.,ev_shape,getFunctionSpace());
1811 ev.typeMatchRight(*this);
1812 m_data->transpose(ev.m_data.get(), axis_offset);
1813 return ev;
1814 }
1815
1816 Data
1817 Data::eigenvalues() const
1818 {
1819 if (isLazy())
1820 {
1821 Data temp(*this); // to get around the fact that you can't resolve a const Data
1822 temp.resolve();
1823 return temp.eigenvalues();
1824 }
1825 // check input
1826 DataTypes::ShapeType s=getDataPointShape();
1827 if (getDataPointRank()!=2)
1828 throw DataException("Error - Data::eigenvalues can only be calculated for rank 2 object.");
1829 if(s[0] != s[1])
1830 throw DataException("Error - Data::eigenvalues can only be calculated for object with equal first and second dimension.");
1831 // create return
1832 DataTypes::ShapeType ev_shape(1,s[0]);
1833 Data ev(0.,ev_shape,getFunctionSpace());
1834 ev.typeMatchRight(*this);
1835 m_data->eigenvalues(ev.m_data.get());
1836 return ev;
1837 }
1838
1839 const boost::python::tuple
1840 Data::eigenvalues_and_eigenvectors(const double tol) const
1841 {
1842 if (isLazy())
1843 {
1844 Data temp(*this); // to get around the fact that you can't resolve a const Data
1845 temp.resolve();
1846 return temp.eigenvalues_and_eigenvectors(tol);
1847 }
1848 DataTypes::ShapeType s=getDataPointShape();
1849 if (getDataPointRank()!=2)
1850 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for rank 2 object.");
1851 if(s[0] != s[1])
1852 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for object with equal first and second dimension.");
1853 // create return
1854 DataTypes::ShapeType ev_shape(1,s[0]);
1855 Data ev(0.,ev_shape,getFunctionSpace());
1856 ev.typeMatchRight(*this);
1857 DataTypes::ShapeType V_shape(2,s[0]);
1858 Data V(0.,V_shape,getFunctionSpace());
1859 V.typeMatchRight(*this);
1860 m_data->eigenvalues_and_eigenvectors(ev.m_data.get(),V.m_data.get(),tol);
1861 return make_tuple(boost::python::object(ev),boost::python::object(V));
1862 }
1863
1864 const boost::python::tuple
1865 Data::minGlobalDataPoint() const
1866 {
1867 // NB: calc_minGlobalDataPoint( had to be split off from minGlobalDataPoint( as boost::make_tuple causes an
1868 // abort (for unknown reasons) if there are openmp directives with it in the
1869 // surrounding function
1870
1871 int DataPointNo;
1872 int ProcNo;
1873 calc_minGlobalDataPoint(ProcNo,DataPointNo);
1874 return make_tuple(ProcNo,DataPointNo);
1875 }
1876
1877 void
1878 Data::calc_minGlobalDataPoint(int& ProcNo,
1879 int& DataPointNo) const
1880 {
1881 if (isLazy())
1882 {
1883 Data temp(*this); // to get around the fact that you can't resolve a const Data
1884 temp.resolve();
1885 return temp.calc_minGlobalDataPoint(ProcNo,DataPointNo);
1886 }
1887 int i,j;
1888 int lowi=0,lowj=0;
1889 double min=numeric_limits<double>::max();
1890
1891 Data temp=minval();
1892
1893 int numSamples=temp.getNumSamples();
1894 int numDPPSample=temp.getNumDataPointsPerSample();
1895
1896 double next,local_min;
1897 int local_lowi=0,local_lowj=0;
1898
1899 #pragma omp parallel private(next,local_min,local_lowi,local_lowj)
1900 {
1901 local_min=min;
1902 #pragma omp for private(i,j) schedule(static)
1903 for (i=0; i<numSamples; i++) {
1904 for (j=0; j<numDPPSample; j++) {
1905 next=temp.getDataAtOffset(temp.getDataOffset(i,j));
1906 if (next<local_min) {
1907 local_min=next;
1908 local_lowi=i;
1909 local_lowj=j;
1910 }
1911 }
1912 }
1913 #pragma omp critical
1914 if (local_min<min) {
1915 min=local_min;
1916 lowi=local_lowi;
1917 lowj=local_lowj;
1918 }
1919 }
1920
1921 #ifdef PASO_MPI
1922 // determine the processor on which the minimum occurs
1923 next = temp.getDataPoint(lowi,lowj);
1924 int lowProc = 0;
1925 double *globalMins = new double[get_MPISize()+1];
1926 int error = MPI_Gather ( &next, 1, MPI_DOUBLE, globalMins, 1, MPI_DOUBLE, 0, get_MPIComm() );
1927
1928 if( get_MPIRank()==0 ){
1929 next = globalMins[lowProc];
1930 for( i=1; i<get_MPISize(); i++ )
1931 if( next>globalMins[i] ){
1932 lowProc = i;
1933 next = globalMins[i];
1934 }
1935 }
1936 MPI_Bcast( &lowProc, 1, MPI_DOUBLE, 0, get_MPIComm() );
1937
1938 delete [] globalMins;
1939 ProcNo = lowProc;
1940 #else
1941 ProcNo = 0;
1942 #endif
1943 DataPointNo = lowj + lowi * numDPPSample;
1944 }
1945
1946 void
1947 Data::saveDX(std::string fileName) const
1948 {
1949 if (isEmpty())
1950 {
1951 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1952 }
1953 if (isLazy())
1954 {
1955 Data temp(*this); // to get around the fact that you can't resolve a const Data
1956 temp.resolve();
1957 temp.saveDX(fileName);
1958 return;
1959 }
1960 boost::python::dict args;
1961 args["data"]=boost::python::object(this);
1962 getDomain()->saveDX(fileName,args);
1963 return;
1964 }
1965
1966 void
1967 Data::saveVTK(std::string fileName) const
1968 {
1969 if (isEmpty())
1970 {
1971 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1972 }
1973 if (isLazy())
1974 {
1975 Data temp(*this); // to get around the fact that you can't resolve a const Data
1976 temp.resolve();
1977 temp.saveVTK(fileName);
1978 return;
1979 }
1980 boost::python::dict args;
1981 args["data"]=boost::python::object(this);
1982 getDomain()->saveVTK(fileName,args);
1983 return;
1984 }
1985
1986 Data&
1987 Data::operator+=(const Data& right)
1988 {
1989 if (isProtected()) {
1990 throw DataException("Error - attempt to update protected Data object.");
1991 }
1992 if (isLazy() || right.isLazy())
1993 {
1994 DataLazy* c=new DataLazy(m_data,right.borrowDataPtr(),ADD); // for lazy + is equivalent to +=
1995 m_data=c->getPtr();
1996 return (*this);
1997 }
1998 else
1999 {
2000 binaryOp(right,plus<double>());
2001 return (*this);
2002 }
2003 }
2004
2005 Data&
2006 Data::operator+=(const boost::python::object& right)
2007 {
2008 if (isProtected()) {
2009 throw DataException("Error - attempt to update protected Data object.");
2010 }
2011 Data tmp(right,getFunctionSpace(),false);
2012 if (isLazy())
2013 {
2014 DataLazy* c=new DataLazy(m_data,tmp.borrowDataPtr(),ADD); // for lazy + is equivalent to +=
2015 m_data=c->getPtr();
2016 return (*this);
2017 }
2018 else
2019 {
2020 binaryOp(tmp,plus<double>());
2021 return (*this);
2022 }
2023 }
2024
2025 // Hmmm, operator= makes a deep copy but the copy constructor does not?
2026 Data&
2027 Data::operator=(const Data& other)
2028 {
2029 copy(other);
2030 return (*this);
2031 }
2032
2033 Data&
2034 Data::operator-=(const Data& right)
2035 {
2036 if (isProtected()) {
2037 throw DataException("Error - attempt to update protected Data object.");
2038 }
2039 if (isLazy() || right.isLazy())
2040 {
2041 DataLazy* c=new DataLazy(m_data,right.borrowDataPtr(),SUB); // for lazy - is equivalent to -=
2042 m_data=c->getPtr();
2043 return (*this);
2044 }
2045 else
2046 {
2047 binaryOp(right,minus<double>());
2048 return (*this);
2049 }
2050 }
2051
2052 Data&
2053 Data::operator-=(const boost::python::object& right)
2054 {
2055 if (isProtected()) {
2056 throw DataException("Error - attempt to update protected Data object.");
2057 }
2058 Data tmp(right,getFunctionSpace(),false);
2059 if (isLazy())
2060 {
2061 DataLazy* c=new DataLazy(m_data,tmp.borrowDataPtr(),SUB); // for lazy - is equivalent to -=
2062 m_data=c->getPtr();
2063 return (*this);
2064 }
2065 else
2066 {
2067 binaryOp(tmp,minus<double>());
2068 return (*this);
2069 }
2070 }
2071
2072 Data&
2073 Data::operator*=(const Data& right)
2074 {
2075 if (isProtected()) {
2076 throw DataException("Error - attempt to update protected Data object.");
2077 }
2078 if (isLazy() || right.isLazy())
2079 {
2080 DataLazy* c=new DataLazy(m_data,right.borrowDataPtr(),MUL); // for lazy * is equivalent to *=
2081 m_data=c->getPtr();
2082 return (*this);
2083 }
2084 else
2085 {
2086 binaryOp(right,multiplies<double>());
2087 return (*this);
2088 }
2089 }
2090
2091 Data&
2092 Data::operator*=(const boost::python::object& right)
2093 {
2094 if (isProtected()) {
2095 throw DataException("Error - attempt to update protected Data object.");
2096 }
2097 Data tmp(right,getFunctionSpace(),false);
2098 if (isLazy())
2099 {
2100 DataLazy* c=new DataLazy(m_data,tmp.borrowDataPtr(),MUL); // for lazy * is equivalent to *=
2101 m_data=c->getPtr();
2102 return (*this);
2103 }
2104 else
2105 {
2106 binaryOp(tmp,multiplies<double>());
2107 return (*this);
2108 }
2109 }
2110
2111 Data&
2112 Data::operator/=(const Data& right)
2113 {
2114 if (isProtected()) {
2115 throw DataException("Error - attempt to update protected Data object.");
2116 }
2117 if (isLazy() || right.isLazy())
2118 {
2119 DataLazy* c=new DataLazy(m_data,right.borrowDataPtr(),DIV); // for lazy / is equivalent to /=
2120 m_data=c->getPtr();
2121 return (*this);
2122 }
2123 else
2124 {
2125 binaryOp(right,divides<double>());
2126 return (*this);
2127 }
2128 }
2129
2130 Data&
2131 Data::operator/=(const boost::python::object& right)
2132 {
2133 if (isProtected()) {
2134 throw DataException("Error - attempt to update protected Data object.");
2135 }
2136 Data tmp(right,getFunctionSpace(),false);
2137 if (isLazy())
2138 {
2139 DataLazy* c=new DataLazy(m_data,tmp.borrowDataPtr(),DIV); // for lazy / is equivalent to /=
2140 m_data=c->getPtr();
2141 return (*this);
2142 }
2143 else
2144 {
2145 binaryOp(tmp,divides<double>());
2146 return (*this);
2147 }
2148 }
2149
2150 Data
2151 Data::rpowO(const boost::python::object& left) const
2152 {
2153 Data left_d(left,*this);
2154 return left_d.powD(*this);
2155 }
2156
2157 Data
2158 Data::powO(const boost::python::object& right) const
2159 {
2160 Data tmp(right,getFunctionSpace(),false);
2161 return powD(tmp);
2162 }
2163
2164 Data
2165 Data::powD(const Data& right) const
2166 {
2167 if (isLazy() || right.isLazy())
2168 {
2169 DataLazy* c=new DataLazy(m_data,right.borrowDataPtr(),POW);
2170 return Data(c);
2171 }
2172 return C_TensorBinaryOperation<double (*)(double, double)>(*this, right, ::pow);
2173 }
2174
2175 //
2176 // NOTE: It is essential to specify the namespace this operator belongs to
2177 Data
2178 escript::operator+(const Data& left, const Data& right)
2179 {
2180 if (left.isLazy() || right.isLazy())
2181 {
2182 DataLazy* c=new DataLazy(left.borrowDataPtr(),right.borrowDataPtr(),ADD);
2183 return Data(c);
2184 }
2185 return C_TensorBinaryOperation(left, right, plus<double>());
2186 }
2187
2188 //
2189 // NOTE: It is essential to specify the namespace this operator belongs to
2190 Data
2191 escript::operator-(const Data& left, const Data& right)
2192 {
2193 if (left.isLazy() || right.isLazy())
2194 {
2195 DataLazy* c=new DataLazy(left.borrowDataPtr(),right.borrowDataPtr(),SUB);
2196 return Data(c);
2197 }
2198 return C_TensorBinaryOperation(left, right, minus<double>());
2199 }
2200
2201 //
2202 // NOTE: It is essential to specify the namespace this operator belongs to
2203 Data
2204 escript::operator*(const Data& left, const Data& right)
2205 {
2206 if (left.isLazy() || right.isLazy())
2207 {
2208 DataLazy* c=new DataLazy(left.borrowDataPtr(),right.borrowDataPtr(),MUL);
2209 return Data(c);
2210 }
2211 return C_TensorBinaryOperation(left, right, multiplies<double>());
2212 }
2213
2214 //
2215 // NOTE: It is essential to specify the namespace this operator belongs to
2216 Data
2217 escript::operator/(const Data& left, const Data& right)
2218 {
2219 if (left.isLazy() || right.isLazy())
2220 {
2221 DataLazy* c=new DataLazy(left.borrowDataPtr(),right.borrowDataPtr(),DIV);
2222 return Data(c);
2223 }
2224 return C_TensorBinaryOperation(left, right, divides<double>());
2225 }
2226
2227 //
2228 // NOTE: It is essential to specify the namespace this operator belongs to
2229 Data
2230 escript::operator+(const Data& left, const boost::python::object& right)
2231 {
2232 if (left.isLazy())
2233 {
2234 DataLazy* c=new DataLazy(left.borrowDataPtr(),Data(right,left.getFunctionSpace(),false).borrowDataPtr(),ADD);
2235 return Data(c);
2236 }
2237 return left+Data(right,left.getFunctionSpace(),false);
2238 }
2239
2240 //
2241 // NOTE: It is essential to specify the namespace this operator belongs to
2242 Data
2243 escript::operator-(const Data& left, const boost::python::object& right)
2244 {
2245 if (left.isLazy())
2246 {
2247 DataLazy* c=new DataLazy(left.borrowDataPtr(),Data(right,left.getFunctionSpace(),false).borrowDataPtr(),SUB);
2248 return Data(c);
2249 }
2250 return left-Data(right,left.getFunctionSpace(),false);
2251 }
2252
2253 //
2254 // NOTE: It is essential to specify the namespace this operator belongs to
2255 Data
2256 escript::operator*(const Data& left, const boost::python::object& right)
2257 {
2258 if (left.isLazy())
2259 {
2260 DataLazy* c=new DataLazy(left.borrowDataPtr(),Data(right,left.getFunctionSpace(),false).borrowDataPtr(),MUL);
2261 return Data(c);
2262 }
2263 return left*Data(right,left.getFunctionSpace(),false);
2264 }
2265
2266 //
2267 // NOTE: It is essential to specify the namespace this operator belongs to
2268 Data
2269 escript::operator/(const Data& left, const boost::python::object& right)
2270 {
2271 if (left.isLazy())
2272 {
2273 DataLazy* c=new DataLazy(left.borrowDataPtr(),Data(right,left.getFunctionSpace(),false).borrowDataPtr(),DIV);
2274 return Data(c);
2275 }
2276 return left/Data(right,left.getFunctionSpace(),false);
2277 }
2278
2279 //
2280 // NOTE: It is essential to specify the namespace this operator belongs to
2281 Data
2282 escript::operator+(const boost::python::object& left, const Data& right)
2283 {
2284 if (right.isLazy())
2285 {
2286 DataLazy* c=new DataLazy(Data(left,right.getFunctionSpace(),false).borrowDataPtr(),right.borrowDataPtr(),ADD);
2287 return Data(c);
2288 }
2289 return Data(left,right.getFunctionSpace(),false)+right;
2290 }
2291
2292 //
2293 // NOTE: It is essential to specify the namespace this operator belongs to
2294 Data
2295 escript::operator-(const boost::python::object& left, const Data& right)
2296 {
2297 if (right.isLazy())
2298 {
2299 DataLazy* c=new DataLazy(Data(left,right.getFunctionSpace(),false).borrowDataPtr(),right.borrowDataPtr(),SUB);
2300 return Data(c);
2301 }
2302 return Data(left,right.getFunctionSpace(),false)-right;
2303 }
2304
2305 //
2306 // NOTE: It is essential to specify the namespace this operator belongs to
2307 Data
2308 escript::operator*(const boost::python::object& left, const Data& right)
2309 {
2310 if (right.isLazy())
2311 {
2312 DataLazy* c=new DataLazy(Data(left,right.getFunctionSpace(),false).borrowDataPtr(),right.borrowDataPtr(),MUL);
2313 return Data(c);
2314 }
2315 return Data(left,right.getFunctionSpace(),false)*right;
2316 }
2317
2318 //
2319 // NOTE: It is essential to specify the namespace this operator belongs to
2320 Data
2321 escript::operator/(const boost::python::object& left, const Data& right)
2322 {
2323 if (right.isLazy())
2324 {
2325 DataLazy* c=new DataLazy(Data(left,right.getFunctionSpace(),false).borrowDataPtr(),right.borrowDataPtr(),DIV);
2326 return Data(c);
2327 }
2328 return Data(left,right.getFunctionSpace(),false)/right;
2329 }
2330
2331
2332 /* TODO */
2333 /* global reduction */
2334 Data
2335 Data::getItem(const boost::python::object& key) const
2336 {
2337
2338 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2339
2340 if (slice_region.size()!=getDataPointRank()) {
2341 throw DataException("Error - slice size does not match Data rank.");
2342 }
2343
2344 return getSlice(slice_region);
2345 }
2346
2347 /* TODO */
2348 /* global reduction */
2349 Data
2350 Data::getSlice(const DataTypes::RegionType& region) const
2351 {
2352 return Data(*this,region);
2353 }
2354
2355 /* TODO */
2356 /* global reduction */
2357 void
2358 Data::setItemO(const boost::python::object& key,
2359 const boost::python::object& value)
2360 {
2361 Data tempData(value,getFunctionSpace());
2362 setItemD(key,tempData);
2363 }
2364
2365 void
2366 Data::setItemD(const boost::python::object& key,
2367 const Data& value)
2368 {
2369 // const DataArrayView& view=getPointDataView();
2370
2371 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2372 if (slice_region.size()!=getDataPointRank()) {
2373 throw DataException("Error - slice size does not match Data rank.");
2374 }
2375 if (getFunctionSpace()!=value.getFunctionSpace()) {
2376 setSlice(Data(value,getFunctionSpace()),slice_region);
2377 } else {
2378 setSlice(value,slice_region);
2379 }
2380 }
2381
2382 void
2383 Data::setSlice(const Data& value,
2384 const DataTypes::RegionType& region)
2385 {
2386 if (isProtected()) {
2387 throw DataException("Error - attempt to update protected Data object.");
2388 }
2389 FORCERESOLVE;
2390 /* if (isLazy())
2391 {
2392 throw DataException("Error - setSlice not permitted on lazy data.");
2393 }*/
2394 Data tempValue(value);
2395 typeMatchLeft(tempValue);
2396 typeMatchRight(tempValue);
2397 getReady()->setSlice(tempValue.m_data.get(),region);
2398 }
2399
2400 void
2401 Data::typeMatchLeft(Data& right) const
2402 {
2403 if (right.isLazy() && !isLazy())
2404 {
2405 right.resolve();
2406 }
2407 if (isExpanded()){
2408 right.expand();
2409 } else if (isTagged()) {
2410 if (right.isConstant()) {
2411 right.tag();
2412 }
2413 }
2414 }
2415
2416 void
2417 Data::typeMatchRight(const Data& right)
2418 {
2419 if (isLazy() && !right.isLazy())
2420 {
2421 resolve();
2422 }
2423 if (isTagged()) {
2424 if (right.isExpanded()) {
2425 expand();
2426 }
2427 } else if (isConstant()) {
2428 if (right.isExpanded()) {
2429 expand();
2430 } else if (right.isTagged()) {
2431 tag();
2432 }
2433 }
2434 }
2435
2436 void
2437 Data::setTaggedValueByName(std::string name,
2438 const boost::python::object& value)
2439 {
2440 if (getFunctionSpace().getDomain()->isValidTagName(name)) {
2441 FORCERESOLVE;
2442 int tagKey=getFunctionSpace().getDomain()->getTag(name);
2443 setTaggedValue(tagKey,value);
2444 }
2445 }
2446 void
2447 Data::setTaggedValue(int tagKey,
2448 const boost::python::object& value)
2449 {
2450 if (isProtected()) {
2451 throw DataException("Error - attempt to update protected Data object.");
2452 }
2453 //
2454 // Ensure underlying data object is of type DataTagged
2455 FORCERESOLVE;
2456 if (isConstant()) tag();
2457 numeric::array asNumArray(value);
2458
2459 // extract the shape of the numarray
2460 DataTypes::ShapeType tempShape;
2461 for (int i=0; i < asNumArray.getrank(); i++) {
2462 tempShape.push_back(extract<int>(asNumArray.getshape()[i]));
2463 }
2464
2465 DataVector temp_data2;
2466 temp_data2.copyFromNumArray(asNumArray);
2467
2468 m_data->setTaggedValue(tagKey,tempShape, temp_data2);
2469 }
2470
2471
2472 void
2473 Data::setTaggedValueFromCPP(int tagKey,
2474 const DataTypes::ShapeType& pointshape,
2475 const DataTypes::ValueType& value,
2476 int dataOffset)
2477 {
2478 if (isProtected()) {
2479 throw DataException("Error - attempt to update protected Data object.");
2480 }
2481 //
2482 // Ensure underlying data object is of type DataTagged
2483 FORCERESOLVE;
2484 if (isConstant()) tag();
2485 //
2486 // Call DataAbstract::setTaggedValue
2487 m_data->setTaggedValue(tagKey,pointshape, value, dataOffset);
2488 }
2489
2490 int
2491 Data::getTagNumber(int dpno)
2492 {
2493 if (isEmpty())
2494 {
2495 throw DataException("Error - operation not permitted on instances of DataEmpty.");
2496 }
2497 return getFunctionSpace().getTagFromDataPointNo(dpno);
2498 }
2499
2500
2501 ostream& escript::operator<<(ostream& o, const Data& data)
2502 {
2503 o << data.toString();
2504 return o;
2505 }
2506
2507 Data
2508 escript::C_GeneralTensorProduct(Data& arg_0,
2509 Data& arg_1,
2510 int axis_offset,
2511 int transpose)
2512 {
2513 // General tensor product: res(SL x SR) = arg_0(SL x SM) * arg_1(SM x SR)
2514 // SM is the product of the last axis_offset entries in arg_0.getShape().
2515
2516 // deal with any lazy data
2517 if (arg_0.isLazy()) {arg_0.resolve();}
2518 if (arg_1.isLazy()) {arg_1.resolve();}
2519
2520 // Interpolate if necessary and find an appropriate function space
2521 Data arg_0_Z, arg_1_Z;
2522 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2523 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2524 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2525 arg_1_Z = Data(arg_1);
2526 }
2527 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2528 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2529 arg_0_Z =Data(arg_0);
2530 }
2531 else {
2532 throw DataException("Error - C_GeneralTensorProduct: arguments have incompatible function spaces.");
2533 }
2534 } else {
2535 arg_0_Z = Data(arg_0);
2536 arg_1_Z = Data(arg_1);
2537 }
2538 // Get rank and shape of inputs
2539 int rank0 = arg_0_Z.getDataPointRank();
2540 int rank1 = arg_1_Z.getDataPointRank();
2541 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
2542 const DataTypes::ShapeType& shape1 = arg_1_Z.getDataPointShape();
2543
2544 // Prepare for the loops of the product and verify compatibility of shapes
2545 int start0=0, start1=0;
2546 if (transpose == 0) {}
2547 else if (transpose == 1) { start0 = axis_offset; }
2548 else if (transpose == 2) { start1 = rank1-axis_offset; }
2549 else { throw DataException("C_GeneralTensorProduct: Error - transpose should be 0, 1 or 2"); }
2550
2551
2552 // Adjust the shapes for transpose
2553 DataTypes::ShapeType tmpShape0(rank0); // pre-sizing the vectors rather
2554 DataTypes::ShapeType tmpShape1(rank1); // than using push_back
2555 for (int i=0; i<rank0; i++) { tmpShape0[i]=shape0[(i+start0)%rank0]; }
2556 for (int i=0; i<rank1; i++) { tmpShape1[i]=shape1[(i+start1)%rank1]; }
2557
2558 #if 0
2559 // For debugging: show shape after transpose
2560 char tmp[100];
2561 std::string shapeStr;
2562 shapeStr = "(";
2563 for (int i=0; i<rank0; i++) { sprintf(tmp, "%d,", tmpShape0[i]); shapeStr += tmp; }
2564 shapeStr += ")";
2565 cout << "C_GeneralTensorProduct: Shape of arg0 is " << shapeStr << endl;
2566 shapeStr = "(";
2567 for (int i=0; i<rank1; i++) { sprintf(tmp, "%d,", tmpShape1[i]); shapeStr += tmp; }
2568 shapeStr += ")";
2569 cout << "C_GeneralTensorProduct: Shape of arg1 is " << shapeStr << endl;
2570 #endif
2571
2572 // Prepare for the loops of the product
2573 int SL=1, SM=1, SR=1;
2574 for (int i=0; i<rank0-axis_offset; i++) {
2575 SL *= tmpShape0[i];
2576 }
2577 for (int i=rank0-axis_offset; i<rank0; i++) {
2578 if (tmpShape0[i] != tmpShape1[i-(rank0-axis_offset)]) {
2579 throw DataException("C_GeneralTensorProduct: Error - incompatible shapes");
2580 }
2581 SM *= tmpShape0[i];
2582 }
2583 for (int i=axis_offset; i<rank1; i++) {
2584 SR *= tmpShape1[i];
2585 }
2586
2587 // Define the shape of the output (rank of shape is the sum of the loop ranges below)
2588 DataTypes::ShapeType shape2(rank0+rank1-2*axis_offset);
2589 { // block to limit the scope of out_index
2590 int out_index=0;
2591 for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z
2592 for (int i=axis_offset; i<rank1; i++, ++out_index) { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z
2593 }
2594
2595 // Declare output Data object
2596 Data res;
2597
2598 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2599 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataConstant output
2600 double *ptr_0 = &(arg_0_Z.getDataAtOffset(0));
2601 double *ptr_1 = &(arg_1_Z.getDataAtOffset(0));
2602 double *ptr_2 = &(res.getDataAtOffset(0));
2603 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2604 }
2605 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2606
2607 // Prepare the DataConstant input
2608 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2609 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2610
2611 // Borrow DataTagged input from Data object
2612 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2613 if (tmp_1==0) { throw DataException("GTP_1 Programming error - casting to DataTagged."); }
2614
2615 // Prepare a DataTagged output 2
2616 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataTagged output
2617 res.tag();
2618 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2619 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2620
2621 // Prepare offset into DataConstant
2622 int offset_0 = tmp_0->getPointOffset(0,0);
2623 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2624 // Get the views
2625 // DataArrayView view_1 = tmp_1->getDefaultValue();
2626 // DataArrayView view_2 = tmp_2->getDefaultValue();
2627 // // Get the pointers to the actual data
2628 // double *ptr_1 = &((view_1.getData())[0]);
2629 // double *ptr_2 = &((view_2.getData())[0]);
2630
2631 double *ptr_1 = &(tmp_1->getDefaultValue(0));
2632 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2633
2634
2635 // Compute an MVP for the default
2636 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2637 // Compute an MVP for each tag
2638 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2639 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2640 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2641 tmp_2->addTag(i->first);
2642 // DataArrayView view_1 = tmp_1->getDataPointByTag(i->first);
2643 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2644 // double *ptr_1 = &view_1.getData(0);
2645 // double *ptr_2 = &view_2.getData(0);
2646
2647 double *ptr_1 = &(tmp_1->getDataByTag(i->first,0));
2648 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2649
2650 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2651 }
2652
2653 }
2654 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2655
2656 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2657 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2658 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2659 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2660 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2661 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2662 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2663 int sampleNo_1,dataPointNo_1;
2664 int numSamples_1 = arg_1_Z.getNumSamples();
2665 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2666 int offset_0 = tmp_0->getPointOffset(0,0);
2667 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2668 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2669 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2670 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2671 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2672 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2673 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2674 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2675 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2676 }
2677 }
2678
2679 }
2680 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2681
2682 // Borrow DataTagged input from Data object
2683 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2684 if (tmp_0==0) { throw DataException("GTP_0 Programming error - casting to DataTagged."); }
2685
2686 // Prepare the DataConstant input
2687 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2688 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2689
2690 // Prepare a DataTagged output 2
2691 res = Data(0.0, shape2, arg_0_Z.getFunctionSpace()); // DataTagged output
2692 res.tag();
2693 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2694 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2695
2696 // Prepare offset into DataConstant
2697 int offset_1 = tmp_1->getPointOffset(0,0);
2698 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2699 // Get the views
2700 // DataArrayView view_0 = tmp_0->getDefaultValue();
2701 // DataArrayView view_2 = tmp_2->getDefaultValue();
2702 // // Get the pointers to the actual data
2703 // double *ptr_0 = &((view_0.getData())[0]);
2704 // double *ptr_2 = &((view_2.getData())[0]);
2705
2706 double *ptr_0 = &(tmp_0->getDefaultValue(0));
2707 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2708
2709 // Compute an MVP for the default
2710 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2711 // Compute an MVP for each tag
2712 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2713 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2714 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2715 // tmp_2->addTaggedValue(i->first,tmp_2->getDefaultValue());
2716 // DataArrayView view_0 = tmp_0->getDataPointByTag(i->first);
2717 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2718 // double *ptr_0 = &view_0.getData(0);
2719 // double *ptr_2 = &view_2.getData(0);
2720
2721 tmp_2->addTag(i->first);
2722 double *ptr_0 = &(tmp_0->getDataByTag(i->first,0));
2723 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2724 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2725 }
2726
2727 }
2728 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2729
2730 // Borrow DataTagged input from Data object
2731 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2732 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2733
2734 // Borrow DataTagged input from Data object
2735 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2736 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2737
2738 // Prepare a DataTagged output 2
2739 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace());
2740 res.tag(); // DataTagged output
2741 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2742 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2743
2744 // // Get the views
2745 // DataArrayView view_0 = tmp_0->getDefaultValue();
2746 // DataArrayView view_1 = tmp_1->getDefaultValue();
2747 // DataArrayView view_2 = tmp_2->getDefaultValue();
2748 // // Get the pointers to the actual data
2749 // double *ptr_0 = &((view_0.getData())[0]);
2750 // double *ptr_1 = &((view_1.getData())[0]);
2751 // double *ptr_2 = &((view_2.getData())[0]);
2752
2753 double *ptr_0 = &(tmp_0->getDefaultValue(0));
2754 double *ptr_1 = &(tmp_1->getDefaultValue(0));
2755 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2756
2757
2758 // Compute an MVP for the default
2759 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2760 // Merge the tags
2761 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2762 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2763 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2764 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2765 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2766 }
2767 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2768 tmp_2->addTag(i->first);
2769 }
2770 // Compute an MVP for each tag
2771 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2772 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2773 // DataArrayView view_0 = tmp_0->getDataPointByTag(i->first);
2774 // DataArrayView view_1 = tmp_1->getDataPointByTag(i->first);
2775 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2776 // double *ptr_0 = &view_0.getData(0);
2777 // double *ptr_1 = &view_1.getData(0);
2778 // double *ptr_2 = &view_2.getData(0);
2779
2780 double *ptr_0 = &(tmp_0->getDataByTag(i->first,0));
2781 double *ptr_1 = &(tmp_1->getDataByTag(i->first,0));
2782 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2783
2784 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2785 }
2786
2787 }
2788 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2789
2790 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2791 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2792 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2793 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2794 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2795 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2796 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2797 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2798 int sampleNo_0,dataPointNo_0;
2799 int numSamples_0 = arg_0_Z.getNumSamples();
2800 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2801 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2802 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2803 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2804 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2805 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2806 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2807 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2808 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2809 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2810 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2811 }
2812 }
2813
2814 }
2815 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2816
2817 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2818 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2819 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2820 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2821 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2822 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2823 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2824 int sampleNo_0,dataPointNo_0;
2825 int numSamples_0 = arg_0_Z.getNumSamples();
2826 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2827 int offset_1 = tmp_1->getPointOffset(0,0);
2828 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2829 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2830 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2831 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2832 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2833 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2834 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2835 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2836 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2837 }
2838 }
2839
2840
2841 }
2842 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2843
2844 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2845 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2846 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2847 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2848 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2849 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2850 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2851 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2852 int sampleNo_0,dataPointNo_0;
2853 int numSamples_0 = arg_0_Z.getNumSamples();
2854 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2855 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2856 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2857 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2858 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2859 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2860 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2861 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2862 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2863 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2864 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2865 }
2866 }
2867
2868 }
2869 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2870
2871 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2872 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2873 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2874 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2875 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2876 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2877 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2878 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2879 int sampleNo_0,dataPointNo_0;
2880 int numSamples_0 = arg_0_Z.getNumSamples();
2881 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2882 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2883 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2884 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2885 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2886 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2887 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2888 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2889 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2890 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2891 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2892 }
2893 }
2894
2895 }
2896 else {
2897 throw DataException("Error - C_GeneralTensorProduct: unknown combination of inputs");
2898 }
2899
2900 return res;
2901 }
2902
2903 DataAbstract*
2904 Data::borrowData() const
2905 {
2906 return m_data.get();
2907 }
2908
2909 // Not all that happy about returning a non-const from a const
2910 DataAbstract_ptr
2911 Data::borrowDataPtr() const
2912 {
2913 return m_data;
2914 }
2915
2916 // Not all that happy about returning a non-const from a const
2917 DataReady_ptr
2918 Data::borrowReadyPtr() const
2919 {
2920 DataReady_ptr dr=dynamic_pointer_cast<DataReady>(m_data);
2921 EsysAssert((dr!=0), "Error - casting to DataReady.");
2922 return dr;
2923 }
2924
2925 std::string
2926 Data::toString() const
2927 {
2928 if (!m_data->isEmpty() &&
2929 getNumDataPoints()*getDataPointSize()>escriptParams.getInt("TOO_MANY_LINES"))
2930 {
2931 stringstream temp;
2932 temp << "Summary: inf="<< inf_const() << " sup=" << sup_const() << " data points=" << getNumDataPoints();
2933 return temp.str();
2934 }
2935 return m_data->toString();
2936 }
2937
2938
2939
2940 DataTypes::ValueType::const_reference
2941 Data::getDataAtOffset(DataTypes::ValueType::size_type i) const
2942 {
2943 if (isLazy())
2944 {
2945 throw DataException("Programmer error - getDataAtOffset not permitted on lazy data (object is const which prevents resolving).");
2946 }
2947 return getReady()->getDataAtOffset(i);
2948 }
2949
2950
2951 DataTypes::ValueType::reference
2952 Data::getDataAtOffset(DataTypes::ValueType::size_type i)
2953 {
2954 // if (isLazy())
2955 // {
2956 // throw DataException("getDataAtOffset not permitted on lazy data.");
2957 // }
2958 FORCERESOLVE;
2959 return getReady()->getDataAtOffset(i);
2960 }
2961
2962 DataTypes::ValueType::const_reference
2963 Data::getDataPoint(int sampleNo, int dataPointNo) const
2964 {
2965 if (!isReady())
2966 {
2967 throw DataException("Programmer error - getDataPoint() not permitted on Lazy Data (object is const which prevents resolving).");
2968 }
2969 else
2970 {
2971 const DataReady* dr=getReady();
2972 return dr->getDataAtOffset(dr->getPointOffset(sampleNo, dataPointNo));
2973 }
2974 }
2975
2976
2977 DataTypes::ValueType::reference
2978 Data::getDataPoint(int sampleNo, int dataPointNo)
2979 {
2980 FORCERESOLVE;
2981 if (!isReady())
2982 {
2983 throw DataException("Programmer error - getDataPoint() not permitted on Lazy Data.");
2984 }
2985 else
2986 {
2987 DataReady* dr=getReady();
2988 return dr->getDataAtOffset(dr->getPointOffset(sampleNo, dataPointNo));
2989 }
2990 }
2991
2992
2993 /* Member functions specific to the MPI implementation */
2994
2995 void
2996 Data::print()
2997 {
2998 int i,j;
2999
3000 printf( "Data is %dX%d\n", getNumSamples(), getNumDataPointsPerSample() );
3001 for( i=0; i<getNumSamples(); i++ )
3002 {
3003 printf( "[%6d]", i );
3004 for( j=0; j<getNumDataPointsPerSample(); j++ )
3005 printf( "\t%10.7g", (getSampleData(i))[j] );
3006 printf( "\n" );
3007 }
3008 }
3009 void
3010 Data::dump(const std::string fileName) const
3011 {
3012 try
3013 {
3014 if (isLazy())
3015 {
3016 Data temp(*this); // this is to get a non-const object which we can resolve
3017 temp.resolve();
3018 temp.dump(fileName);
3019 }
3020 else
3021 {
3022 return m_data->dump(fileName);
3023 }
3024 }
3025 catch (exception& e)
3026 {
3027 cout << e.what() << endl;
3028 }
3029 }
3030
3031 int
3032 Data::get_MPISize() const
3033 {
3034 int size;
3035 #ifdef PASO_MPI
3036 int error;
3037 error = MPI_Comm_size( get_MPIComm(), &size );
3038 #else
3039 size = 1;
3040 #endif
3041 return size;
3042 }
3043
3044 int
3045 Data::get_MPIRank() const
3046 {
3047 int rank;
3048 #ifdef PASO_MPI
3049 int error;
3050 error = MPI_Comm_rank( get_MPIComm(), &rank );
3051 #else
3052 rank = 0;
3053 #endif
3054 return rank;
3055 }
3056
3057 MPI_Comm
3058 Data::get_MPIComm() const
3059 {
3060 #ifdef PASO_MPI
3061 return MPI_COMM_WORLD;
3062 #else
3063 return -1;
3064 #endif
3065 }
3066
3067

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26