/[escript]/branches/arrexp_2137_win/escript/src/Data.cpp
ViewVC logotype

Contents of /branches/arrexp_2137_win/escript/src/Data.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2212 - (show annotations)
Wed Jan 14 00:15:00 2009 UTC (10 years, 3 months ago) by jfenwick
File size: 80727 byte(s)
Executive summary:

This commit adds copy on write checks to operations involving shared data. 

Changes:

new #defines:
~~~~~~~~~~~~~
Data.cpp has ASSIGNMENT_MEANS_DEEPCOPY (defaults to undefined).
Defining this will put the data = operator back to making deep copies instead
of sharing data (now the default.)

Data:
~~~~~
. Added exclusiveWrite method to copy the underlying data if it is shared.
. Some operators which took python objects now call the c++ versions intead of duplicating code.

DataAbstract and offspring:
~~~~~~~~~~~~~~~~~~~~~~~~~~~
. Added method to determine whether the data is currently shared.
. Added getVectorRO to children of DataReady.
. Added getTagRO.

. Operations which modify values in place (or return modifiable pointers) now use
a macro to check for sharing. In the case where a modification attempt is detected, it throws an exception. In the future, I will enable this only for debugging.

. This shold not really have been required but the compiler was not choosing the use the const version as I would have liked. Besides, this makes things explict.

. Moved (and de-inlined) getVector in DataConstant (It was virtual in a parent class).

Unit tests:
~~~~~~~~~~~
Added both python and c++ unit tests to check known cases of sharing and "inplace"
modification operations.

General:
~~~~~~~~
Removed some commented out code.

1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2008 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 #include "Data.h"
16
17 #include "DataExpanded.h"
18 #include "DataConstant.h"
19 #include "DataTagged.h"
20 #include "DataEmpty.h"
21 #include "DataLazy.h"
22 #include "FunctionSpaceFactory.h"
23 #include "AbstractContinuousDomain.h"
24 #include "UnaryFuncs.h"
25 #include "FunctionSpaceException.h"
26 #include "EscriptParams.h"
27
28 extern "C" {
29 #include "esysUtils/blocktimer.h"
30 }
31
32 #include <fstream>
33 #include <algorithm>
34 #include <vector>
35 #include <functional>
36 #include <sstream> // so we can throw messages about ranks
37
38 #include <boost/python/dict.hpp>
39 #include <boost/python/extract.hpp>
40 #include <boost/python/long.hpp>
41
42 #include "WrappedArray.h"
43
44 using namespace std;
45 using namespace boost::python;
46 using namespace boost;
47 using namespace escript;
48
49 // ensure the current object is not a DataLazy
50 // The idea was that we could add an optional warning whenever a resolve is forced
51 #define FORCERESOLVE if (isLazy()) {resolve();}
52
53 // Do not use the following unless you want to make copies on assignment rather than
54 // share data. CopyOnWrite should make this unnescessary.
55 // #define ASSIGNMENT_MEANS_DEEPCOPY
56
57 namespace
58 {
59
60 // This should be safer once the DataC RO changes have been brought in
61 template <class ARR>
62 boost::python::tuple
63 pointToTuple( const DataTypes::ShapeType& shape,ARR v)
64 {
65 using namespace boost::python;
66 using boost::python::list;
67 int rank=shape.size();
68 if (rank==0)
69 {
70 return make_tuple(v[0]);
71 }
72 else if (rank==1)
73 {
74 list l;
75 for (size_t i=0;i<shape[0];++i)
76 {
77 l.append(v[i]);
78 }
79 return tuple(l);
80 }
81 else if (rank==2)
82 {
83 list lj;
84 for (size_t j=0;j<shape[0];++j)
85 {
86 list li;
87 for (size_t i=0;i<shape[1];++i)
88 {
89 li.append(v[DataTypes::getRelIndex(shape,j,i)]);
90 }
91 lj.append(tuple(li));
92 }
93 return tuple(lj);
94 }
95 else if (rank==3)
96 {
97 list lk;
98 for (size_t k=0;k<shape[0];++k)
99 {
100 list lj;
101 for (size_t j=0;j<shape[1];++j)
102 {
103 list li;
104 for (size_t i=0;i<shape[2];++i)
105 {
106 li.append(v[DataTypes::getRelIndex(shape,k,j,i)]);
107 }
108 lj.append(tuple(li));
109 }
110 lk.append(tuple(lj));
111 }
112 return tuple(lk);
113 }
114 else if (rank==4)
115 {
116 list ll;
117 for (size_t l=0;l<shape[0];++l)
118 {
119 list lk;
120 for (size_t k=0;k<shape[1];++k)
121 {
122 list lj;
123 for (size_t j=0;j<shape[2];++j)
124 {
125 list li;
126 for (size_t i=0;i<shape[3];++i)
127 {
128 li.append(v[DataTypes::getRelIndex(shape,l,k,j,i)]);
129 }
130 lj.append(tuple(li));
131 }
132 lk.append(tuple(lj));
133 }
134 ll.append(tuple(lk));
135 }
136 return tuple(ll);
137 }
138 else
139 throw DataException("Unknown rank in pointToTuple.");
140 }
141
142 } // anonymous namespace
143
144 Data::Data()
145 {
146 //
147 // Default data is type DataEmpty
148 DataAbstract* temp=new DataEmpty();
149 m_data=temp->getPtr();
150 m_protected=false;
151 }
152
153 Data::Data(double value,
154 const tuple& shape,
155 const FunctionSpace& what,
156 bool expanded)
157 {
158 DataTypes::ShapeType dataPointShape;
159 for (int i = 0; i < shape.attr("__len__")(); ++i) {
160 dataPointShape.push_back(extract<const int>(shape[i]));
161 }
162
163 int len = DataTypes::noValues(dataPointShape);
164 DataVector temp_data(len,value,len);
165 initialise(temp_data, dataPointShape, what, expanded);
166 m_protected=false;
167 }
168
169 Data::Data(double value,
170 const DataTypes::ShapeType& dataPointShape,
171 const FunctionSpace& what,
172 bool expanded)
173 {
174 int len = DataTypes::noValues(dataPointShape);
175
176 DataVector temp_data(len,value,len);
177 // DataArrayView temp_dataView(temp_data, dataPointShape);
178
179 // initialise(temp_dataView, what, expanded);
180 initialise(temp_data, dataPointShape, what, expanded);
181
182 m_protected=false;
183 }
184
185 Data::Data(const Data& inData)
186 {
187 m_data=inData.m_data;
188 m_protected=inData.isProtected();
189 }
190
191
192 Data::Data(const Data& inData,
193 const DataTypes::RegionType& region)
194 {
195 DataAbstract_ptr dat=inData.m_data;
196 if (inData.isLazy())
197 {
198 dat=inData.m_data->resolve();
199 }
200 else
201 {
202 dat=inData.m_data;
203 }
204 //
205 // Create Data which is a slice of another Data
206 DataAbstract* tmp = dat->getSlice(region);
207 m_data=DataAbstract_ptr(tmp);
208 m_protected=false;
209 }
210
211 Data::Data(const Data& inData,
212 const FunctionSpace& functionspace)
213 {
214 if (inData.isEmpty())
215 {
216 throw DataException("Error - will not interpolate for instances of DataEmpty.");
217 }
218 if (inData.getFunctionSpace()==functionspace) {
219 m_data=inData.m_data;
220 }
221 else
222 {
223
224 if (inData.isConstant()) { // for a constant function, we just need to use the new function space
225 if (!inData.probeInterpolation(functionspace))
226 { // Even though this is constant, we still need to check whether interpolation is allowed
227 throw FunctionSpaceException("Cannot interpolate across to the domain of the specified FunctionSpace. (DataConstant)");
228 }
229 // if the data is not lazy, this will just be a cast to DataReady
230 DataReady_ptr dr=inData.m_data->resolve();
231 DataConstant* dc=new DataConstant(functionspace,inData.m_data->getShape(),dr->getVectorRO());
232 m_data=DataAbstract_ptr(dc);
233 } else {
234 Data tmp(0,inData.getDataPointShape(),functionspace,true);
235 // Note: Must use a reference or pointer to a derived object
236 // in order to get polymorphic behaviour. Shouldn't really
237 // be able to create an instance of AbstractDomain but that was done
238 // as a boost:python work around which may no longer be required.
239 /*const AbstractDomain& inDataDomain=inData.getDomain();*/
240 const_Domain_ptr inDataDomain=inData.getDomain();
241 if (inDataDomain==functionspace.getDomain()) {
242 inDataDomain->interpolateOnDomain(tmp,inData);
243 } else {
244 inDataDomain->interpolateACross(tmp,inData);
245 }
246 m_data=tmp.m_data;
247 }
248 }
249 m_protected=false;
250 }
251
252 Data::Data(DataAbstract* underlyingdata)
253 {
254 // m_data=shared_ptr<DataAbstract>(underlyingdata);
255 m_data=underlyingdata->getPtr();
256 m_protected=false;
257 }
258
259 Data::Data(DataAbstract_ptr underlyingdata)
260 {
261 m_data=underlyingdata;
262 m_protected=false;
263 }
264
265
266 // Data::Data(const numeric::array& value,
267 // const FunctionSpace& what,
268 // bool expanded)
269 // {
270 // initialise(value,what,expanded);
271 // m_protected=false;
272 // }
273
274 Data::Data(const DataTypes::ValueType& value,
275 const DataTypes::ShapeType& shape,
276 const FunctionSpace& what,
277 bool expanded)
278 {
279 initialise(value,shape,what,expanded);
280 m_protected=false;
281 }
282
283
284 Data::Data(const object& value,
285 const FunctionSpace& what,
286 bool expanded)
287 {
288 //numeric::array asNumArray(value);
289 WrappedArray w(value);
290 initialise(w,what,expanded);
291 m_protected=false;
292 }
293
294
295 Data::Data(const object& value,
296 const Data& other)
297 {
298 WrappedArray w(value);
299
300 // extract the shape of the numarray
301 const DataTypes::ShapeType& tempShape=w.getShape();
302 if (w.getRank()==0) {
303
304
305 // get the space for the data vector
306 int len1 = DataTypes::noValues(tempShape);
307 DataVector temp_data(len1, 0.0, len1);
308 temp_data.copyFromArray(w);
309
310 int len = DataTypes::noValues(other.getDataPointShape());
311
312 DataVector temp2_data(len, temp_data[0], len);
313 DataConstant* t=new DataConstant(other.getFunctionSpace(),other.getDataPointShape(),temp2_data);
314 m_data=DataAbstract_ptr(t);
315
316 } else {
317 //
318 // Create a DataConstant with the same sample shape as other
319 DataConstant* t=new DataConstant(w,other.getFunctionSpace());
320 m_data=DataAbstract_ptr(t);
321 }
322 m_protected=false;
323 }
324
325 Data::~Data()
326 {
327
328 }
329
330
331 void Data::initialise(const WrappedArray& value,
332 const FunctionSpace& what,
333 bool expanded)
334 {
335 //
336 // Construct a Data object of the appropriate type.
337 // Construct the object first as there seems to be a bug which causes
338 // undefined behaviour if an exception is thrown during construction
339 // within the shared_ptr constructor.
340 if (expanded) {
341 DataAbstract* temp=new DataExpanded(value, what);
342 m_data=temp->getPtr();
343 } else {
344 DataAbstract* temp=new DataConstant(value, what);
345 m_data=temp->getPtr();
346 }
347 }
348
349
350 void
351 Data::initialise(const DataTypes::ValueType& value,
352 const DataTypes::ShapeType& shape,
353 const FunctionSpace& what,
354 bool expanded)
355 {
356 //
357 // Construct a Data object of the appropriate type.
358 // Construct the object first as there seems to be a bug which causes
359 // undefined behaviour if an exception is thrown during construction
360 // within the shared_ptr constructor.
361 if (expanded) {
362 DataAbstract* temp=new DataExpanded(what, shape, value);
363 // boost::shared_ptr<DataAbstract> temp_data(temp);
364 // m_data=temp_data;
365 m_data=temp->getPtr();
366 } else {
367 DataAbstract* temp=new DataConstant(what, shape, value);
368 // boost::shared_ptr<DataAbstract> temp_data(temp);
369 // m_data=temp_data;
370 m_data=temp->getPtr();
371 }
372 }
373
374
375 // void
376 // Data::CompareDebug(const Data& rd)
377 // {
378 // using namespace std;
379 // bool mismatch=false;
380 // std::cout << "Comparing left and right" << endl;
381 // const DataTagged* left=dynamic_cast<DataTagged*>(m_data.get());
382 // const DataTagged* right=dynamic_cast<DataTagged*>(rd.m_data.get());
383 //
384 // if (left==0)
385 // {
386 // cout << "left arg is not a DataTagged\n";
387 // return;
388 // }
389 //
390 // if (right==0)
391 // {
392 // cout << "right arg is not a DataTagged\n";
393 // return;
394 // }
395 // cout << "Num elements=" << left->getVector().size() << ":" << right->getVector().size() << std::endl;
396 // cout << "Shapes ";
397 // if (left->getShape()==right->getShape())
398 // {
399 // cout << "ok\n";
400 // }
401 // else
402 // {
403 // cout << "Problem: shapes do not match\n";
404 // mismatch=true;
405 // }
406 // int lim=left->getVector().size();
407 // if (right->getVector().size()) lim=right->getVector().size();
408 // for (int i=0;i<lim;++i)
409 // {
410 // if (left->getVector()[i]!=right->getVector()[i])
411 // {
412 // cout << "[" << i << "] value mismatch " << left->getVector()[i] << ":" << right->getVector()[i] << endl;
413 // mismatch=true;
414 // }
415 // }
416 //
417 // // still need to check the tag map
418 // // also need to watch what is happening to function spaces, are they copied or what?
419 //
420 // const DataTagged::DataMapType& mapleft=left->getTagLookup();
421 // const DataTagged::DataMapType& mapright=right->getTagLookup();
422 //
423 // if (mapleft.size()!=mapright.size())
424 // {
425 // cout << "Maps are different sizes " << mapleft.size() << ":" << mapright.size() << endl;
426 // mismatch=true;
427 // cout << "Left map\n";
428 // DataTagged::DataMapType::const_iterator i,j;
429 // for (i=mapleft.begin();i!=mapleft.end();++i) {
430 // cout << "(" << i->first << "=>" << i->second << ")\n";
431 // }
432 // cout << "Right map\n";
433 // for (i=mapright.begin();i!=mapright.end();++i) {
434 // cout << "(" << i->first << "=>" << i->second << ")\n";
435 // }
436 // cout << "End map\n";
437 //
438 // }
439 //
440 // DataTagged::DataMapType::const_iterator i,j;
441 // for (i=mapleft.begin(),j=mapright.begin();i!=mapleft.end() && j!=mapright.end();++i,++j) {
442 // if ((i->first!=j->first) || (i->second!=j->second))
443 // {
444 // cout << "(" << i->first << "=>" << i->second << ")";
445 // cout << ":(" << j->first << "=>" << j->second << ") ";
446 // mismatch=true;
447 // }
448 // }
449 // if (mismatch)
450 // {
451 // cout << "#Mismatch\n";
452 // }
453 // }
454
455 escriptDataC
456 Data::getDataC()
457 {
458 escriptDataC temp;
459 temp.m_dataPtr=(void*)this;
460 return temp;
461 }
462
463 escriptDataC
464 Data::getDataC() const
465 {
466 escriptDataC temp;
467 temp.m_dataPtr=(void*)this;
468 return temp;
469 }
470
471 size_t
472 Data::getSampleBufferSize() const
473 {
474 return m_data->getSampleBufferSize();
475 }
476
477
478 const boost::python::tuple
479 Data::getShapeTuple() const
480 {
481 const DataTypes::ShapeType& shape=getDataPointShape();
482 switch(getDataPointRank()) {
483 case 0:
484 return make_tuple();
485 case 1:
486 return make_tuple(long_(shape[0]));
487 case 2:
488 return make_tuple(long_(shape[0]),long_(shape[1]));
489 case 3:
490 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]));
491 case 4:
492 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]),long_(shape[3]));
493 default:
494 throw DataException("Error - illegal Data rank.");
495 }
496 }
497
498
499 // The different name is needed because boost has trouble with overloaded functions.
500 // It can't work out what type the function is based soley on its name.
501 // There are ways to fix this involving creating function pointer variables for each form
502 // but there doesn't seem to be a need given that the methods have the same name from the python point of view
503 Data*
504 Data::copySelf()
505 {
506 DataAbstract* temp=m_data->deepCopy();
507 return new Data(temp);
508 }
509
510 void
511 Data::copy(const Data& other)
512 {
513 DataAbstract* temp=other.m_data->deepCopy();
514 DataAbstract_ptr p=temp->getPtr();
515 m_data=p;
516 }
517
518
519 Data
520 Data::delay()
521 {
522 DataLazy* dl=new DataLazy(m_data);
523 return Data(dl);
524 }
525
526 void
527 Data::delaySelf()
528 {
529 if (!isLazy())
530 {
531 m_data=(new DataLazy(m_data))->getPtr();
532 }
533 }
534
535 void
536 Data::setToZero()
537 {
538 if (isEmpty())
539 {
540 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
541 }
542 exclusiveWrite();
543 m_data->setToZero();
544 }
545
546 void
547 Data::copyWithMask(const Data& other,
548 const Data& mask)
549 {
550 // 1. Interpolate if required so all Datas use the same FS as this
551 // 2. Tag or Expand so that all Data's are the same type
552 // 3. Iterate over the data vectors copying values where mask is >0
553 if (other.isEmpty() || mask.isEmpty())
554 {
555 throw DataException("Error - copyWithMask not permitted using instances of DataEmpty.");
556 }
557 Data other2(other);
558 Data mask2(mask);
559 other2.resolve();
560 mask2.resolve();
561 this->resolve();
562 FunctionSpace myFS=getFunctionSpace();
563 FunctionSpace oFS=other2.getFunctionSpace();
564 FunctionSpace mFS=mask2.getFunctionSpace();
565 if (oFS!=myFS)
566 {
567 if (other2.probeInterpolation(myFS))
568 {
569 other2=other2.interpolate(myFS);
570 }
571 else
572 {
573 throw DataException("Error - copyWithMask: other FunctionSpace is not compatible with this one.");
574 }
575 }
576 if (mFS!=myFS)
577 {
578 if (mask2.probeInterpolation(myFS))
579 {
580 mask2=mask2.interpolate(myFS);
581 }
582 else
583 {
584 throw DataException("Error - copyWithMask: mask FunctionSpace is not compatible with this one.");
585 }
586 }
587 // Ensure that all args have the same type
588 if (this->isExpanded() || mask2.isExpanded() || other2.isExpanded())
589 {
590 this->expand();
591 other2.expand();
592 mask2.expand();
593 }
594 else if (this->isTagged() || mask2.isTagged() || other2.isTagged())
595 {
596 this->tag();
597 other2.tag();
598 mask2.tag();
599 }
600 else if (this->isConstant() && mask2.isConstant() && other2.isConstant())
601 {
602 }
603 else
604 {
605 throw DataException("Error - Unknown DataAbstract passed to copyWithMask.");
606 }
607 // Now we iterate over the elements
608 DataVector& self=getReady()->getVector();
609 const DataVector& ovec=other2.getReady()->getVectorRO();
610 const DataVector& mvec=mask2.getReady()->getVectorRO();
611 if ((self.size()!=ovec.size()) || (self.size()!=mvec.size()))
612 {
613 throw DataException("Error - size mismatch in arguments to copyWithMask.");
614 }
615 size_t num_points=self.size();
616
617 // OPENMP 3.0 allows unsigned loop vars.
618 #if defined(_OPENMP) && (_OPENMP < 200805)
619 long i;
620 #else
621 size_t i;
622 #endif
623 #pragma omp parallel for private(i) schedule(static)
624 for (i=0;i<num_points;++i)
625 {
626 if (mvec[i]>0)
627 {
628 self[i]=ovec[i];
629 }
630 }
631 }
632
633
634
635 bool
636 Data::isExpanded() const
637 {
638 DataExpanded* temp=dynamic_cast<DataExpanded*>(m_data.get());
639 return (temp!=0);
640 }
641
642 bool
643 Data::actsExpanded() const
644 {
645 return m_data->actsExpanded();
646
647 }
648
649
650 bool
651 Data::isTagged() const
652 {
653 DataTagged* temp=dynamic_cast<DataTagged*>(m_data.get());
654 return (temp!=0);
655 }
656
657 bool
658 Data::isEmpty() const
659 {
660 DataEmpty* temp=dynamic_cast<DataEmpty*>(m_data.get());
661 return (temp!=0);
662 }
663
664 bool
665 Data::isConstant() const
666 {
667 DataConstant* temp=dynamic_cast<DataConstant*>(m_data.get());
668 return (temp!=0);
669 }
670
671 bool
672 Data::isLazy() const
673 {
674 return m_data->isLazy();
675 }
676
677 // at the moment this is synonymous with !isLazy() but that could change
678 bool
679 Data::isReady() const
680 {
681 return (dynamic_cast<DataReady*>(m_data.get())!=0);
682 }
683
684
685 void
686 Data::setProtection()
687 {
688 m_protected=true;
689 }
690
691 bool
692 Data::isProtected() const
693 {
694 return m_protected;
695 }
696
697
698
699 void
700 Data::expand()
701 {
702 if (isConstant()) {
703 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
704 DataAbstract* temp=new DataExpanded(*tempDataConst);
705 // shared_ptr<DataAbstract> temp_data(temp);
706 // m_data=temp_data;
707 m_data=temp->getPtr();
708 } else if (isTagged()) {
709 DataTagged* tempDataTag=dynamic_cast<DataTagged*>(m_data.get());
710 DataAbstract* temp=new DataExpanded(*tempDataTag);
711 // shared_ptr<DataAbstract> temp_data(temp);
712 // m_data=temp_data;
713 m_data=temp->getPtr();
714 } else if (isExpanded()) {
715 //
716 // do nothing
717 } else if (isEmpty()) {
718 throw DataException("Error - Expansion of DataEmpty not possible.");
719 } else if (isLazy()) {
720 resolve();
721 expand(); // resolve might not give us expanded data
722 } else {
723 throw DataException("Error - Expansion not implemented for this Data type.");
724 }
725 }
726
727 void
728 Data::tag()
729 {
730 if (isConstant()) {
731 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
732 DataAbstract* temp=new DataTagged(*tempDataConst);
733 // shared_ptr<DataAbstract> temp_data(temp);
734 // m_data=temp_data;
735 m_data=temp->getPtr();
736 } else if (isTagged()) {
737 // do nothing
738 } else if (isExpanded()) {
739 throw DataException("Error - Creating tag data from DataExpanded not possible.");
740 } else if (isEmpty()) {
741 throw DataException("Error - Creating tag data from DataEmpty not possible.");
742 } else if (isLazy()) {
743 DataAbstract_ptr res=m_data->resolve();
744 if (m_data->isExpanded())
745 {
746 throw DataException("Error - data would resolve to DataExpanded, tagging is not possible.");
747 }
748 m_data=res;
749 tag();
750 } else {
751 throw DataException("Error - Tagging not implemented for this Data type.");
752 }
753 }
754
755 void
756 Data::resolve()
757 {
758 if (isLazy())
759 {
760 m_data=m_data->resolve();
761 }
762 }
763
764
765 Data
766 Data::oneOver() const
767 {
768 if (isLazy())
769 {
770 DataLazy* c=new DataLazy(borrowDataPtr(),RECIP);
771 return Data(c);
772 }
773 return C_TensorUnaryOperation(*this, bind1st(divides<double>(),1.));
774 }
775
776 Data
777 Data::wherePositive() const
778 {
779 if (isLazy())
780 {
781 DataLazy* c=new DataLazy(borrowDataPtr(),GZ);
782 return Data(c);
783 }
784 return C_TensorUnaryOperation(*this, bind2nd(greater<double>(),0.0));
785 }
786
787 Data
788 Data::whereNegative() const
789 {
790 if (isLazy())
791 {
792 DataLazy* c=new DataLazy(borrowDataPtr(),LZ);
793 return Data(c);
794 }
795 return C_TensorUnaryOperation(*this, bind2nd(less<double>(),0.0));
796 }
797
798 Data
799 Data::whereNonNegative() const
800 {
801 if (isLazy())
802 {
803 DataLazy* c=new DataLazy(borrowDataPtr(),GEZ);
804 return Data(c);
805 }
806 return C_TensorUnaryOperation(*this, bind2nd(greater_equal<double>(),0.0));
807 }
808
809 Data
810 Data::whereNonPositive() const
811 {
812 if (isLazy())
813 {
814 DataLazy* c=new DataLazy(borrowDataPtr(),LEZ);
815 return Data(c);
816 }
817 return C_TensorUnaryOperation(*this, bind2nd(less_equal<double>(),0.0));
818 }
819
820 Data
821 Data::whereZero(double tol) const
822 {
823 Data dataAbs=abs();
824 return C_TensorUnaryOperation(dataAbs, bind2nd(less_equal<double>(),tol));
825 }
826
827 Data
828 Data::whereNonZero(double tol) const
829 {
830 Data dataAbs=abs();
831 return C_TensorUnaryOperation(dataAbs, bind2nd(greater<double>(),tol));
832 }
833
834 Data
835 Data::interpolate(const FunctionSpace& functionspace) const
836 {
837 return Data(*this,functionspace);
838 }
839
840 bool
841 Data::probeInterpolation(const FunctionSpace& functionspace) const
842 {
843 return getFunctionSpace().probeInterpolation(functionspace);
844 // if (getFunctionSpace()==functionspace) {
845 // return true;
846 // } else {
847 // const_Domain_ptr domain=getDomain();
848 // if (*domain==*functionspace.getDomain()) {
849 // return domain->probeInterpolationOnDomain(getFunctionSpace().getTypeCode(),functionspace.getTypeCode());
850 // } else {
851 // return domain->probeInterpolationACross(getFunctionSpace().getTypeCode(),*(functionspace.getDomain()),functionspace.getTypeCode());
852 // }
853 // }
854 }
855
856 Data
857 Data::gradOn(const FunctionSpace& functionspace) const
858 {
859 if (isEmpty())
860 {
861 throw DataException("Error - operation not permitted on instances of DataEmpty.");
862 }
863 double blocktimer_start = blocktimer_time();
864 if (functionspace.getDomain()!=getDomain())
865 throw DataException("Error - gradient cannot be calculated on different domains.");
866 DataTypes::ShapeType grad_shape=getDataPointShape();
867 grad_shape.push_back(functionspace.getDim());
868 Data out(0.0,grad_shape,functionspace,true);
869 getDomain()->setToGradient(out,*this);
870 blocktimer_increment("grad()", blocktimer_start);
871 return out;
872 }
873
874 Data
875 Data::grad() const
876 {
877 if (isEmpty())
878 {
879 throw DataException("Error - operation not permitted on instances of DataEmpty.");
880 }
881 return gradOn(escript::function(*getDomain()));
882 }
883
884 int
885 Data::getDataPointSize() const
886 {
887 return m_data->getNoValues();
888 }
889
890 DataTypes::ValueType::size_type
891 Data::getLength() const
892 {
893 return m_data->getLength();
894 }
895
896 const
897 boost::python::numeric :: array
898 Data:: getValueOfDataPoint(int dataPointNo)
899 {
900 return boost::python::numeric::array(getValueOfDataPointAsTuple(dataPointNo));
901 }
902
903 const boost::python::object
904 Data::getValueOfDataPointAsTuple(int dataPointNo)
905 {
906 FORCERESOLVE
907 if (getNumDataPointsPerSample()>0) {
908 int sampleNo = dataPointNo/getNumDataPointsPerSample();
909 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
910 //
911 // Check a valid sample number has been supplied
912 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
913 throw DataException("Error - Data::getValueOfDataPointAsTuple: invalid sampleNo.");
914 }
915
916 //
917 // Check a valid data point number has been supplied
918 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
919 throw DataException("Error - Data::getValueOfDataPointAsTuple: invalid dataPointNoInSample.");
920 }
921 // TODO: global error handling
922 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
923 return pointToTuple(getDataPointShape(),&(getDataAtOffset(offset)));
924 }
925 else
926 {
927 // The old method would return an empty numArray of the given shape
928 // I'm going to throw an exception because if we have zero points per sample we have problems
929 throw DataException("Error - need at least 1 datapoint per sample.");
930 }
931
932
933 }
934
935
936 void
937 Data::setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object)
938 {
939 // this will throw if the value cannot be represented
940 setValueOfDataPointToArray(dataPointNo,py_object);
941 }
942
943 void
944 Data::setValueOfDataPointToArray(int dataPointNo, const boost::python::object& obj)
945 {
946 if (isProtected()) {
947 throw DataException("Error - attempt to update protected Data object.");
948 }
949 FORCERESOLVE;
950
951 WrappedArray w(obj);
952 //
953 // check rank
954 if (static_cast<unsigned int>(w.getRank())<getDataPointRank())
955 throw DataException("Rank of numarray does not match Data object rank");
956
957 //
958 // check shape of num_array
959 for (unsigned int i=0; i<getDataPointRank(); i++) {
960 if (w.getShape()[i]!=getDataPointShape()[i])
961 throw DataException("Shape of numarray does not match Data object rank");
962 }
963 //
964 // make sure data is expanded:
965 //
966 if (!isExpanded()) {
967 expand();
968 }
969 if (getNumDataPointsPerSample()>0) {
970 int sampleNo = dataPointNo/getNumDataPointsPerSample();
971 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
972 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,w);
973 } else {
974 m_data->copyToDataPoint(-1, 0,w);
975 }
976 }
977
978 void
979 Data::setValueOfDataPoint(int dataPointNo, const double value)
980 {
981 if (isProtected()) {
982 throw DataException("Error - attempt to update protected Data object.");
983 }
984 //
985 // make sure data is expanded:
986 FORCERESOLVE;
987 if (!isExpanded()) {
988 expand();
989 }
990 if (getNumDataPointsPerSample()>0) {
991 int sampleNo = dataPointNo/getNumDataPointsPerSample();
992 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
993 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,value);
994 } else {
995 m_data->copyToDataPoint(-1, 0,value);
996 }
997 }
998
999 const
1000 boost::python::numeric::array
1001 Data::getValueOfGlobalDataPoint(int procNo, int dataPointNo)
1002 {
1003 return boost::python::numeric::array(getValueOfGlobalDataPointAsTuple(procNo, dataPointNo));
1004 }
1005
1006 const
1007 boost::python::object
1008 Data::getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo)
1009 {
1010 // This could be lazier than it is now
1011 FORCERESOLVE;
1012
1013 // copy datapoint into a buffer
1014 // broadcast buffer to all nodes
1015 // convert buffer to tuple
1016 // return tuple
1017
1018 const DataTypes::ShapeType& dataPointShape = getDataPointShape();
1019 size_t length=DataTypes::noValues(dataPointShape);
1020
1021 // added for the MPI communication
1022 double *tmpData = new double[length];
1023
1024 // updated for the MPI case
1025 if( get_MPIRank()==procNo ){
1026 if (getNumDataPointsPerSample()>0) {
1027 int sampleNo = dataPointNo/getNumDataPointsPerSample();
1028 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1029 //
1030 // Check a valid sample number has been supplied
1031 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
1032 throw DataException("Error - Data::convertToNumArray: invalid sampleNo.");
1033 }
1034
1035 //
1036 // Check a valid data point number has been supplied
1037 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
1038 throw DataException("Error - Data::convertToNumArray: invalid dataPointNoInSample.");
1039 }
1040 // TODO: global error handling
1041 // create a view of the data if it is stored locally
1042 //DataArrayView dataPointView = getDataPoint(sampleNo, dataPointNoInSample);
1043 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
1044
1045 memcpy(tmpData,&(getDataAtOffset(offset)),length*sizeof(double));
1046 }
1047 }
1048 #ifdef PASO_MPI
1049 // broadcast the data to all other processes
1050 MPI_Bcast( tmpData, length, MPI_DOUBLE, procNo, get_MPIComm() );
1051 #endif
1052
1053 boost::python::tuple t=pointToTuple(dataPointShape,tmpData);
1054 delete [] tmpData;
1055 //
1056 // return the loaded array
1057 return t;
1058
1059 }
1060
1061
1062 boost::python::object
1063 Data::integrateToTuple_const() const
1064 {
1065 if (isLazy())
1066 {
1067 throw DataException("Error - cannot integrate for constant lazy data.");
1068 }
1069 return integrateWorker();
1070 }
1071
1072 boost::python::object
1073 Data::integrateToTuple()
1074 {
1075 if (isLazy())
1076 {
1077 expand();
1078 }
1079 return integrateWorker();
1080
1081 }
1082
1083
1084 boost::python::object
1085 Data::integrate_const() const
1086 {
1087 if (isLazy())
1088 {
1089 throw DataException("Error - cannot integrate for constant lazy data.");
1090 }
1091 return boost::python::numeric::array(integrateWorker());
1092 }
1093
1094 boost::python::object
1095 Data::integrate()
1096 {
1097 if (isLazy())
1098 {
1099 expand();
1100 }
1101 return boost::python::numeric::array(integrateWorker());
1102 }
1103
1104
1105
1106 boost::python::object
1107 Data::integrateWorker() const
1108 {
1109 DataTypes::ShapeType shape = getDataPointShape();
1110 int dataPointSize = getDataPointSize();
1111
1112 //
1113 // calculate the integral values
1114 vector<double> integrals(dataPointSize);
1115 vector<double> integrals_local(dataPointSize);
1116 const AbstractContinuousDomain* dom=dynamic_cast<const AbstractContinuousDomain*>(getDomain().get());
1117 if (dom==0)
1118 {
1119 throw DataException("Can not integrate over non-continuous domains.");
1120 }
1121 #ifdef PASO_MPI
1122 dom->setToIntegrals(integrals_local,*this);
1123 // Global sum: use an array instead of a vector because elements of array are guaranteed to be contiguous in memory
1124 double *tmp = new double[dataPointSize];
1125 double *tmp_local = new double[dataPointSize];
1126 for (int i=0; i<dataPointSize; i++) { tmp_local[i] = integrals_local[i]; }
1127 MPI_Allreduce( &tmp_local[0], &tmp[0], dataPointSize, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD );
1128 for (int i=0; i<dataPointSize; i++) { integrals[i] = tmp[i]; }
1129 tuple result=pointToTuple(shape,tmp);
1130 delete[] tmp;
1131 delete[] tmp_local;
1132 #else
1133 dom->setToIntegrals(integrals,*this);
1134 /* double *tmp = new double[dataPointSize];
1135 for (int i=0; i<dataPointSize; i++) { tmp[i]=integrals[i]; }*/
1136 tuple result=pointToTuple(shape,integrals);
1137 // delete tmp;
1138 #endif
1139
1140
1141 return result;
1142 }
1143
1144 Data
1145 Data::sin() const
1146 {
1147 if (isLazy())
1148 {
1149 DataLazy* c=new DataLazy(borrowDataPtr(),SIN);
1150 return Data(c);
1151 }
1152 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sin);
1153 }
1154
1155 Data
1156 Data::cos() const
1157 {
1158 if (isLazy())
1159 {
1160 DataLazy* c=new DataLazy(borrowDataPtr(),COS);
1161 return Data(c);
1162 }
1163 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cos);
1164 }
1165
1166 Data
1167 Data::tan() const
1168 {
1169 if (isLazy())
1170 {
1171 DataLazy* c=new DataLazy(borrowDataPtr(),TAN);
1172 return Data(c);
1173 }
1174 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tan);
1175 }
1176
1177 Data
1178 Data::asin() const
1179 {
1180 if (isLazy())
1181 {
1182 DataLazy* c=new DataLazy(borrowDataPtr(),ASIN);
1183 return Data(c);
1184 }
1185 return C_TensorUnaryOperation<double (*)(double)>(*this, ::asin);
1186 }
1187
1188 Data
1189 Data::acos() const
1190 {
1191 if (isLazy())
1192 {
1193 DataLazy* c=new DataLazy(borrowDataPtr(),ACOS);
1194 return Data(c);
1195 }
1196 return C_TensorUnaryOperation<double (*)(double)>(*this, ::acos);
1197 }
1198
1199
1200 Data
1201 Data::atan() const
1202 {
1203 if (isLazy())
1204 {
1205 DataLazy* c=new DataLazy(borrowDataPtr(),ATAN);
1206 return Data(c);
1207 }
1208 return C_TensorUnaryOperation<double (*)(double)>(*this, ::atan);
1209 }
1210
1211 Data
1212 Data::sinh() const
1213 {
1214 if (isLazy())
1215 {
1216 DataLazy* c=new DataLazy(borrowDataPtr(),SINH);
1217 return Data(c);
1218 }
1219 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sinh);
1220 }
1221
1222 Data
1223 Data::cosh() const
1224 {
1225 if (isLazy())
1226 {
1227 DataLazy* c=new DataLazy(borrowDataPtr(),COSH);
1228 return Data(c);
1229 }
1230 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cosh);
1231 }
1232
1233 Data
1234 Data::tanh() const
1235 {
1236 if (isLazy())
1237 {
1238 DataLazy* c=new DataLazy(borrowDataPtr(),TANH);
1239 return Data(c);
1240 }
1241 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tanh);
1242 }
1243
1244
1245 Data
1246 Data::erf() const
1247 {
1248 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1249 throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");
1250 #else
1251 if (isLazy())
1252 {
1253 DataLazy* c=new DataLazy(borrowDataPtr(),ERF);
1254 return Data(c);
1255 }
1256 return C_TensorUnaryOperation(*this, ::erf);
1257 #endif
1258 }
1259
1260 Data
1261 Data::asinh() const
1262 {
1263 if (isLazy())
1264 {
1265 DataLazy* c=new DataLazy(borrowDataPtr(),ASINH);
1266 return Data(c);
1267 }
1268 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1269 return C_TensorUnaryOperation(*this, escript::asinh_substitute);
1270 #else
1271 return C_TensorUnaryOperation(*this, ::asinh);
1272 #endif
1273 }
1274
1275 Data
1276 Data::acosh() const
1277 {
1278 if (isLazy())
1279 {
1280 DataLazy* c=new DataLazy(borrowDataPtr(),ACOSH);
1281 return Data(c);
1282 }
1283 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1284 return C_TensorUnaryOperation(*this, escript::acosh_substitute);
1285 #else
1286 return C_TensorUnaryOperation(*this, ::acosh);
1287 #endif
1288 }
1289
1290 Data
1291 Data::atanh() const
1292 {
1293 if (isLazy())
1294 {
1295 DataLazy* c=new DataLazy(borrowDataPtr(),ATANH);
1296 return Data(c);
1297 }
1298 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1299 return C_TensorUnaryOperation(*this, escript::atanh_substitute);
1300 #else
1301 return C_TensorUnaryOperation(*this, ::atanh);
1302 #endif
1303 }
1304
1305 Data
1306 Data::log10() const
1307 { if (isLazy())
1308 {
1309 DataLazy* c=new DataLazy(borrowDataPtr(),LOG10);
1310 return Data(c);
1311 }
1312 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log10);
1313 }
1314
1315 Data
1316 Data::log() const
1317 {
1318 if (isLazy())
1319 {
1320 DataLazy* c=new DataLazy(borrowDataPtr(),LOG);
1321 return Data(c);
1322 }
1323 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log);
1324 }
1325
1326 Data
1327 Data::sign() const
1328 {
1329 if (isLazy())
1330 {
1331 DataLazy* c=new DataLazy(borrowDataPtr(),SIGN);
1332 return Data(c);
1333 }
1334 return C_TensorUnaryOperation(*this, escript::fsign);
1335 }
1336
1337 Data
1338 Data::abs() const
1339 {
1340 if (isLazy())
1341 {
1342 DataLazy* c=new DataLazy(borrowDataPtr(),ABS);
1343 return Data(c);
1344 }
1345 return C_TensorUnaryOperation<double (*)(double)>(*this, ::fabs);
1346 }
1347
1348 Data
1349 Data::neg() const
1350 {
1351 if (isLazy())
1352 {
1353 DataLazy* c=new DataLazy(borrowDataPtr(),NEG);
1354 return Data(c);
1355 }
1356 return C_TensorUnaryOperation(*this, negate<double>());
1357 }
1358
1359 Data
1360 Data::pos() const
1361 {
1362 // not doing lazy check here is deliberate.
1363 // since a deep copy of lazy data should be cheap, I'll just let it happen now
1364 Data result;
1365 // perform a deep copy
1366 result.copy(*this);
1367 return result;
1368 }
1369
1370 Data
1371 Data::exp() const
1372 {
1373 if (isLazy())
1374 {
1375 DataLazy* c=new DataLazy(borrowDataPtr(),EXP);
1376 return Data(c);
1377 }
1378 return C_TensorUnaryOperation<double (*)(double)>(*this, ::exp);
1379 }
1380
1381 Data
1382 Data::sqrt() const
1383 {
1384 if (isLazy())
1385 {
1386 DataLazy* c=new DataLazy(borrowDataPtr(),SQRT);
1387 return Data(c);
1388 }
1389 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sqrt);
1390 }
1391
1392 double
1393 Data::Lsup_const() const
1394 {
1395 if (isLazy())
1396 {
1397 throw DataException("Error - cannot compute Lsup for constant lazy data.");
1398 }
1399 return LsupWorker();
1400 }
1401
1402 double
1403 Data::Lsup()
1404 {
1405 if (isLazy())
1406 {
1407 resolve();
1408 }
1409 return LsupWorker();
1410 }
1411
1412 double
1413 Data::sup_const() const
1414 {
1415 if (isLazy())
1416 {
1417 throw DataException("Error - cannot compute sup for constant lazy data.");
1418 }
1419 return supWorker();
1420 }
1421
1422 double
1423 Data::sup()
1424 {
1425 if (isLazy())
1426 {
1427 resolve();
1428 }
1429 return supWorker();
1430 }
1431
1432 double
1433 Data::inf_const() const
1434 {
1435 if (isLazy())
1436 {
1437 throw DataException("Error - cannot compute inf for constant lazy data.");
1438 }
1439 return infWorker();
1440 }
1441
1442 double
1443 Data::inf()
1444 {
1445 if (isLazy())
1446 {
1447 resolve();
1448 }
1449 return infWorker();
1450 }
1451
1452 double
1453 Data::LsupWorker() const
1454 {
1455 double localValue;
1456 //
1457 // set the initial absolute maximum value to zero
1458
1459 AbsMax abs_max_func;
1460 localValue = algorithm(abs_max_func,0);
1461 #ifdef PASO_MPI
1462 double globalValue;
1463 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1464 return globalValue;
1465 #else
1466 return localValue;
1467 #endif
1468 }
1469
1470 double
1471 Data::supWorker() const
1472 {
1473 double localValue;
1474 //
1475 // set the initial maximum value to min possible double
1476 FMax fmax_func;
1477 localValue = algorithm(fmax_func,numeric_limits<double>::max()*-1);
1478 #ifdef PASO_MPI
1479 double globalValue;
1480 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1481 return globalValue;
1482 #else
1483 return localValue;
1484 #endif
1485 }
1486
1487 double
1488 Data::infWorker() const
1489 {
1490 double localValue;
1491 //
1492 // set the initial minimum value to max possible double
1493 FMin fmin_func;
1494 localValue = algorithm(fmin_func,numeric_limits<double>::max());
1495 #ifdef PASO_MPI
1496 double globalValue;
1497 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD );
1498 return globalValue;
1499 #else
1500 return localValue;
1501 #endif
1502 }
1503
1504 /* TODO */
1505 /* global reduction */
1506 Data
1507 Data::maxval() const
1508 {
1509 if (isLazy())
1510 {
1511 Data temp(*this); // to get around the fact that you can't resolve a const Data
1512 temp.resolve();
1513 return temp.maxval();
1514 }
1515 //
1516 // set the initial maximum value to min possible double
1517 FMax fmax_func;
1518 return dp_algorithm(fmax_func,numeric_limits<double>::max()*-1);
1519 }
1520
1521 Data
1522 Data::minval() const
1523 {
1524 if (isLazy())
1525 {
1526 Data temp(*this); // to get around the fact that you can't resolve a const Data
1527 temp.resolve();
1528 return temp.minval();
1529 }
1530 //
1531 // set the initial minimum value to max possible double
1532 FMin fmin_func;
1533 return dp_algorithm(fmin_func,numeric_limits<double>::max());
1534 }
1535
1536 Data
1537 Data::swapaxes(const int axis0, const int axis1) const
1538 {
1539 if (isLazy())
1540 {
1541 Data temp(*this);
1542 temp.resolve();
1543 return temp.swapaxes(axis0,axis1);
1544 }
1545 int axis0_tmp,axis1_tmp;
1546 DataTypes::ShapeType s=getDataPointShape();
1547 DataTypes::ShapeType ev_shape;
1548 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1549 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1550 int rank=getDataPointRank();
1551 if (rank<2) {
1552 throw DataException("Error - Data::swapaxes argument must have at least rank 2.");
1553 }
1554 if (axis0<0 || axis0>rank-1) {
1555 throw DataException("Error - Data::swapaxes: axis0 must be between 0 and rank-1=" + rank-1);
1556 }
1557 if (axis1<0 || axis1>rank-1) {
1558 throw DataException("Error - Data::swapaxes: axis1 must be between 0 and rank-1=" + rank-1);
1559 }
1560 if (axis0 == axis1) {
1561 throw DataException("Error - Data::swapaxes: axis indices must be different.");
1562 }
1563 if (axis0 > axis1) {
1564 axis0_tmp=axis1;
1565 axis1_tmp=axis0;
1566 } else {
1567 axis0_tmp=axis0;
1568 axis1_tmp=axis1;
1569 }
1570 for (int i=0; i<rank; i++) {
1571 if (i == axis0_tmp) {
1572 ev_shape.push_back(s[axis1_tmp]);
1573 } else if (i == axis1_tmp) {
1574 ev_shape.push_back(s[axis0_tmp]);
1575 } else {
1576 ev_shape.push_back(s[i]);
1577 }
1578 }
1579 Data ev(0.,ev_shape,getFunctionSpace());
1580 ev.typeMatchRight(*this);
1581 m_data->swapaxes(ev.m_data.get(), axis0_tmp, axis1_tmp);
1582 return ev;
1583
1584 }
1585
1586 Data
1587 Data::symmetric() const
1588 {
1589 // check input
1590 DataTypes::ShapeType s=getDataPointShape();
1591 if (getDataPointRank()==2) {
1592 if(s[0] != s[1])
1593 throw DataException("Error - Data::symmetric can only be calculated for rank 2 object with equal first and second dimension.");
1594 }
1595 else if (getDataPointRank()==4) {
1596 if(!(s[0] == s[2] && s[1] == s[3]))
1597 throw DataException("Error - Data::symmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1598 }
1599 else {
1600 throw DataException("Error - Data::symmetric can only be calculated for rank 2 or 4 object.");
1601 }
1602 if (isLazy())
1603 {
1604 DataLazy* c=new DataLazy(borrowDataPtr(),SYM);
1605 return Data(c);
1606 }
1607 Data ev(0.,getDataPointShape(),getFunctionSpace());
1608 ev.typeMatchRight(*this);
1609 m_data->symmetric(ev.m_data.get());
1610 return ev;
1611 }
1612
1613 Data
1614 Data::nonsymmetric() const
1615 {
1616 if (isLazy())
1617 {
1618 DataLazy* c=new DataLazy(borrowDataPtr(),NSYM);
1619 return Data(c);
1620 }
1621 // check input
1622 DataTypes::ShapeType s=getDataPointShape();
1623 if (getDataPointRank()==2) {
1624 if(s[0] != s[1])
1625 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 object with equal first and second dimension.");
1626 DataTypes::ShapeType ev_shape;
1627 ev_shape.push_back(s[0]);
1628 ev_shape.push_back(s[1]);
1629 Data ev(0.,ev_shape,getFunctionSpace());
1630 ev.typeMatchRight(*this);
1631 m_data->nonsymmetric(ev.m_data.get());
1632 return ev;
1633 }
1634 else if (getDataPointRank()==4) {
1635 if(!(s[0] == s[2] && s[1] == s[3]))
1636 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1637 DataTypes::ShapeType ev_shape;
1638 ev_shape.push_back(s[0]);
1639 ev_shape.push_back(s[1]);
1640 ev_shape.push_back(s[2]);
1641 ev_shape.push_back(s[3]);
1642 Data ev(0.,ev_shape,getFunctionSpace());
1643 ev.typeMatchRight(*this);
1644 m_data->nonsymmetric(ev.m_data.get());
1645 return ev;
1646 }
1647 else {
1648 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 or 4 object.");
1649 }
1650 }
1651
1652 Data
1653 Data::trace(int axis_offset) const
1654 {
1655 if (isLazy())
1656 {
1657 DataLazy* c=new DataLazy(borrowDataPtr(),TRACE,axis_offset);
1658 return Data(c);
1659 }
1660 DataTypes::ShapeType s=getDataPointShape();
1661 if (getDataPointRank()==2) {
1662 DataTypes::ShapeType ev_shape;
1663 Data ev(0.,ev_shape,getFunctionSpace());
1664 ev.typeMatchRight(*this);
1665 m_data->trace(ev.m_data.get(), axis_offset);
1666 return ev;
1667 }
1668 if (getDataPointRank()==3) {
1669 DataTypes::ShapeType ev_shape;
1670 if (axis_offset==0) {
1671 int s2=s[2];
1672 ev_shape.push_back(s2);
1673 }
1674 else if (axis_offset==1) {
1675 int s0=s[0];
1676 ev_shape.push_back(s0);
1677 }
1678 Data ev(0.,ev_shape,getFunctionSpace());
1679 ev.typeMatchRight(*this);
1680 m_data->trace(ev.m_data.get(), axis_offset);
1681 return ev;
1682 }
1683 if (getDataPointRank()==4) {
1684 DataTypes::ShapeType ev_shape;
1685 if (axis_offset==0) {
1686 ev_shape.push_back(s[2]);
1687 ev_shape.push_back(s[3]);
1688 }
1689 else if (axis_offset==1) {
1690 ev_shape.push_back(s[0]);
1691 ev_shape.push_back(s[3]);
1692 }
1693 else if (axis_offset==2) {
1694 ev_shape.push_back(s[0]);
1695 ev_shape.push_back(s[1]);
1696 }
1697 Data ev(0.,ev_shape,getFunctionSpace());
1698 ev.typeMatchRight(*this);
1699 m_data->trace(ev.m_data.get(), axis_offset);
1700 return ev;
1701 }
1702 else {
1703 throw DataException("Error - Data::trace can only be calculated for rank 2, 3 or 4 object.");
1704 }
1705 }
1706
1707 Data
1708 Data::transpose(int axis_offset) const
1709 {
1710 if (isLazy())
1711 {
1712 DataLazy* c=new DataLazy(borrowDataPtr(),TRANS,axis_offset);
1713 return Data(c);
1714 }
1715 DataTypes::ShapeType s=getDataPointShape();
1716 DataTypes::ShapeType ev_shape;
1717 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1718 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1719 int rank=getDataPointRank();
1720 if (axis_offset<0 || axis_offset>rank) {
1721 throw DataException("Error - Data::transpose must have 0 <= axis_offset <= rank=" + rank);
1722 }
1723 for (int i=0; i<rank; i++) {
1724 int index = (axis_offset+i)%rank;
1725 ev_shape.push_back(s[index]); // Append to new shape
1726 }
1727 Data ev(0.,ev_shape,getFunctionSpace());
1728 ev.typeMatchRight(*this);
1729 m_data->transpose(ev.m_data.get(), axis_offset);
1730 return ev;
1731 }
1732
1733 Data
1734 Data::eigenvalues() const
1735 {
1736 if (isLazy())
1737 {
1738 Data temp(*this); // to get around the fact that you can't resolve a const Data
1739 temp.resolve();
1740 return temp.eigenvalues();
1741 }
1742 // check input
1743 DataTypes::ShapeType s=getDataPointShape();
1744 if (getDataPointRank()!=2)
1745 throw DataException("Error - Data::eigenvalues can only be calculated for rank 2 object.");
1746 if(s[0] != s[1])
1747 throw DataException("Error - Data::eigenvalues can only be calculated for object with equal first and second dimension.");
1748 // create return
1749 DataTypes::ShapeType ev_shape(1,s[0]);
1750 Data ev(0.,ev_shape,getFunctionSpace());
1751 ev.typeMatchRight(*this);
1752 m_data->eigenvalues(ev.m_data.get());
1753 return ev;
1754 }
1755
1756 const boost::python::tuple
1757 Data::eigenvalues_and_eigenvectors(const double tol) const
1758 {
1759 if (isLazy())
1760 {
1761 Data temp(*this); // to get around the fact that you can't resolve a const Data
1762 temp.resolve();
1763 return temp.eigenvalues_and_eigenvectors(tol);
1764 }
1765 DataTypes::ShapeType s=getDataPointShape();
1766 if (getDataPointRank()!=2)
1767 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for rank 2 object.");
1768 if(s[0] != s[1])
1769 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for object with equal first and second dimension.");
1770 // create return
1771 DataTypes::ShapeType ev_shape(1,s[0]);
1772 Data ev(0.,ev_shape,getFunctionSpace());
1773 ev.typeMatchRight(*this);
1774 DataTypes::ShapeType V_shape(2,s[0]);
1775 Data V(0.,V_shape,getFunctionSpace());
1776 V.typeMatchRight(*this);
1777 m_data->eigenvalues_and_eigenvectors(ev.m_data.get(),V.m_data.get(),tol);
1778 return make_tuple(boost::python::object(ev),boost::python::object(V));
1779 }
1780
1781 const boost::python::tuple
1782 Data::minGlobalDataPoint() const
1783 {
1784 // NB: calc_minGlobalDataPoint( had to be split off from minGlobalDataPoint( as boost::make_tuple causes an
1785 // abort (for unknown reasons) if there are openmp directives with it in the
1786 // surrounding function
1787
1788 int DataPointNo;
1789 int ProcNo;
1790 calc_minGlobalDataPoint(ProcNo,DataPointNo);
1791 return make_tuple(ProcNo,DataPointNo);
1792 }
1793
1794 void
1795 Data::calc_minGlobalDataPoint(int& ProcNo,
1796 int& DataPointNo) const
1797 {
1798 if (isLazy())
1799 {
1800 Data temp(*this); // to get around the fact that you can't resolve a const Data
1801 temp.resolve();
1802 return temp.calc_minGlobalDataPoint(ProcNo,DataPointNo);
1803 }
1804 int i,j;
1805 int lowi=0,lowj=0;
1806 double min=numeric_limits<double>::max();
1807
1808 Data temp=minval();
1809
1810 int numSamples=temp.getNumSamples();
1811 int numDPPSample=temp.getNumDataPointsPerSample();
1812
1813 double next,local_min;
1814 int local_lowi=0,local_lowj=0;
1815
1816 #pragma omp parallel firstprivate(local_lowi,local_lowj) private(next,local_min)
1817 {
1818 local_min=min;
1819 #pragma omp for private(i,j) schedule(static)
1820 for (i=0; i<numSamples; i++) {
1821 for (j=0; j<numDPPSample; j++) {
1822 next=temp.getDataAtOffset(temp.getDataOffset(i,j));
1823 if (next<local_min) {
1824 local_min=next;
1825 local_lowi=i;
1826 local_lowj=j;
1827 }
1828 }
1829 }
1830 #pragma omp critical
1831 if (local_min<min) {
1832 min=local_min;
1833 lowi=local_lowi;
1834 lowj=local_lowj;
1835 }
1836 }
1837
1838 #ifdef PASO_MPI
1839 // determine the processor on which the minimum occurs
1840 next = temp.getDataPoint(lowi,lowj);
1841 int lowProc = 0;
1842 double *globalMins = new double[get_MPISize()+1];
1843 int error;
1844 error = MPI_Gather ( &next, 1, MPI_DOUBLE, globalMins, 1, MPI_DOUBLE, 0, get_MPIComm() );
1845
1846 if( get_MPIRank()==0 ){
1847 next = globalMins[lowProc];
1848 for( i=1; i<get_MPISize(); i++ )
1849 if( next>globalMins[i] ){
1850 lowProc = i;
1851 next = globalMins[i];
1852 }
1853 }
1854 MPI_Bcast( &lowProc, 1, MPI_DOUBLE, 0, get_MPIComm() );
1855
1856 delete [] globalMins;
1857 ProcNo = lowProc;
1858 #else
1859 ProcNo = 0;
1860 #endif
1861 DataPointNo = lowj + lowi * numDPPSample;
1862 }
1863
1864 void
1865 Data::saveDX(std::string fileName) const
1866 {
1867 if (isEmpty())
1868 {
1869 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1870 }
1871 if (isLazy())
1872 {
1873 Data temp(*this); // to get around the fact that you can't resolve a const Data
1874 temp.resolve();
1875 temp.saveDX(fileName);
1876 return;
1877 }
1878 boost::python::dict args;
1879 args["data"]=boost::python::object(this);
1880 getDomain()->saveDX(fileName,args);
1881 return;
1882 }
1883
1884 void
1885 Data::saveVTK(std::string fileName) const
1886 {
1887 if (isEmpty())
1888 {
1889 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1890 }
1891 if (isLazy())
1892 {
1893 Data temp(*this); // to get around the fact that you can't resolve a const Data
1894 temp.resolve();
1895 temp.saveVTK(fileName);
1896 return;
1897 }
1898 boost::python::dict args;
1899 args["data"]=boost::python::object(this);
1900 getDomain()->saveVTK(fileName,args);
1901 return;
1902 }
1903
1904 Data&
1905 Data::operator+=(const Data& right)
1906 {
1907 if (isProtected()) {
1908 throw DataException("Error - attempt to update protected Data object.");
1909 }
1910 if (isLazy() || right.isLazy())
1911 {
1912 DataLazy* c=new DataLazy(m_data,right.borrowDataPtr(),ADD); // for lazy + is equivalent to +=
1913 m_data=c->getPtr();
1914 return (*this);
1915 }
1916 else
1917 {
1918 exclusiveWrite(); // Since Lazy data does not modify its leaves we only need to worry here
1919 binaryOp(right,plus<double>());
1920 return (*this);
1921 }
1922 }
1923
1924 Data&
1925 Data::operator+=(const boost::python::object& right)
1926 {
1927 if (isProtected()) {
1928 throw DataException("Error - attempt to update protected Data object.");
1929 }
1930 Data tmp(right,getFunctionSpace(),false);
1931 (*this)+=tmp;
1932 return *this;
1933 }
1934
1935 // Hmmm, operator= makes a deep copy but the copy constructor does not?
1936 Data&
1937 Data::operator=(const Data& other)
1938 {
1939 #if defined ASSIGNMENT_MEANS_DEEPCOPY
1940 // This should not be used.
1941 // Just leaving this here until I have completed transition
1942 copy(other);
1943 #else
1944 m_protected=false; // since any changes should be caught by exclusiveWrite();
1945 m_data=other.m_data;
1946 #endif
1947 return (*this);
1948 }
1949
1950 Data&
1951 Data::operator-=(const Data& right)
1952 {
1953 if (isProtected()) {
1954 throw DataException("Error - attempt to update protected Data object.");
1955 }
1956 if (isLazy() || right.isLazy())
1957 {
1958 DataLazy* c=new DataLazy(m_data,right.borrowDataPtr(),SUB); // for lazy - is equivalent to -=
1959 m_data=c->getPtr();
1960 return (*this);
1961 }
1962 else
1963 {
1964 exclusiveWrite();
1965 binaryOp(right,minus<double>());
1966 return (*this);
1967 }
1968 }
1969
1970 Data&
1971 Data::operator-=(const boost::python::object& right)
1972 {
1973 if (isProtected()) {
1974 throw DataException("Error - attempt to update protected Data object.");
1975 }
1976 Data tmp(right,getFunctionSpace(),false);
1977 (*this)-=tmp;
1978 return (*this);
1979 }
1980
1981 Data&
1982 Data::operator*=(const Data& right)
1983 {
1984 if (isProtected()) {
1985 throw DataException("Error - attempt to update protected Data object.");
1986 }
1987 if (isLazy() || right.isLazy())
1988 {
1989 DataLazy* c=new DataLazy(m_data,right.borrowDataPtr(),MUL); // for lazy * is equivalent to *=
1990 m_data=c->getPtr();
1991 return (*this);
1992 }
1993 else
1994 {
1995 exclusiveWrite();
1996 binaryOp(right,multiplies<double>());
1997 return (*this);
1998 }
1999 }
2000
2001 Data&
2002 Data::operator*=(const boost::python::object& right)
2003 {
2004 if (isProtected()) {
2005 throw DataException("Error - attempt to update protected Data object.");
2006 }
2007 Data tmp(right,getFunctionSpace(),false);
2008 (*this)*=tmp;
2009 return (*this);
2010 }
2011
2012 Data&
2013 Data::operator/=(const Data& right)
2014 {
2015 if (isProtected()) {
2016 throw DataException("Error - attempt to update protected Data object.");
2017 }
2018 if (isLazy() || right.isLazy())
2019 {
2020 DataLazy* c=new DataLazy(m_data,right.borrowDataPtr(),DIV); // for lazy / is equivalent to /=
2021 m_data=c->getPtr();
2022 return (*this);
2023 }
2024 else
2025 {
2026 exclusiveWrite();
2027 binaryOp(right,divides<double>());
2028 return (*this);
2029 }
2030 }
2031
2032 Data&
2033 Data::operator/=(const boost::python::object& right)
2034 {
2035 if (isProtected()) {
2036 throw DataException("Error - attempt to update protected Data object.");
2037 }
2038 Data tmp(right,getFunctionSpace(),false);
2039 (*this)/=tmp;
2040 return (*this);
2041 }
2042
2043 Data
2044 Data::rpowO(const boost::python::object& left) const
2045 {
2046 Data left_d(left,*this);
2047 return left_d.powD(*this);
2048 }
2049
2050 Data
2051 Data::powO(const boost::python::object& right) const
2052 {
2053 Data tmp(right,getFunctionSpace(),false);
2054 return powD(tmp);
2055 }
2056
2057 Data
2058 Data::powD(const Data& right) const
2059 {
2060 if (isLazy() || right.isLazy())
2061 {
2062 DataLazy* c=new DataLazy(m_data,right.borrowDataPtr(),POW);
2063 return Data(c);
2064 }
2065 return C_TensorBinaryOperation<double (*)(double, double)>(*this, right, ::pow);
2066 }
2067
2068 //
2069 // NOTE: It is essential to specify the namespace this operator belongs to
2070 Data
2071 escript::operator+(const Data& left, const Data& right)
2072 {
2073 if (left.isLazy() || right.isLazy())
2074 {
2075 DataLazy* c=new DataLazy(left.borrowDataPtr(),right.borrowDataPtr(),ADD);
2076 return Data(c);
2077 }
2078 return C_TensorBinaryOperation(left, right, plus<double>());
2079 }
2080
2081 //
2082 // NOTE: It is essential to specify the namespace this operator belongs to
2083 Data
2084 escript::operator-(const Data& left, const Data& right)
2085 {
2086 if (left.isLazy() || right.isLazy())
2087 {
2088 DataLazy* c=new DataLazy(left.borrowDataPtr(),right.borrowDataPtr(),SUB);
2089 return Data(c);
2090 }
2091 return C_TensorBinaryOperation(left, right, minus<double>());
2092 }
2093
2094 //
2095 // NOTE: It is essential to specify the namespace this operator belongs to
2096 Data
2097 escript::operator*(const Data& left, const Data& right)
2098 {
2099 if (left.isLazy() || right.isLazy())
2100 {
2101 DataLazy* c=new DataLazy(left.borrowDataPtr(),right.borrowDataPtr(),MUL);
2102 return Data(c);
2103 }
2104 return C_TensorBinaryOperation(left, right, multiplies<double>());
2105 }
2106
2107 //
2108 // NOTE: It is essential to specify the namespace this operator belongs to
2109 Data
2110 escript::operator/(const Data& left, const Data& right)
2111 {
2112 if (left.isLazy() || right.isLazy())
2113 {
2114 DataLazy* c=new DataLazy(left.borrowDataPtr(),right.borrowDataPtr(),DIV);
2115 return Data(c);
2116 }
2117 return C_TensorBinaryOperation(left, right, divides<double>());
2118 }
2119
2120 //
2121 // NOTE: It is essential to specify the namespace this operator belongs to
2122 Data
2123 escript::operator+(const Data& left, const boost::python::object& right)
2124 {
2125 if (left.isLazy())
2126 {
2127 DataLazy* c=new DataLazy(left.borrowDataPtr(),Data(right,left.getFunctionSpace(),false).borrowDataPtr(),ADD);
2128 return Data(c);
2129 }
2130 return left+Data(right,left.getFunctionSpace(),false);
2131 }
2132
2133 //
2134 // NOTE: It is essential to specify the namespace this operator belongs to
2135 Data
2136 escript::operator-(const Data& left, const boost::python::object& right)
2137 {
2138 if (left.isLazy())
2139 {
2140 DataLazy* c=new DataLazy(left.borrowDataPtr(),Data(right,left.getFunctionSpace(),false).borrowDataPtr(),SUB);
2141 return Data(c);
2142 }
2143 return left-Data(right,left.getFunctionSpace(),false);
2144 }
2145
2146 //
2147 // NOTE: It is essential to specify the namespace this operator belongs to
2148 Data
2149 escript::operator*(const Data& left, const boost::python::object& right)
2150 {
2151 if (left.isLazy())
2152 {
2153 DataLazy* c=new DataLazy(left.borrowDataPtr(),Data(right,left.getFunctionSpace(),false).borrowDataPtr(),MUL);
2154 return Data(c);
2155 }
2156 return left*Data(right,left.getFunctionSpace(),false);
2157 }
2158
2159 //
2160 // NOTE: It is essential to specify the namespace this operator belongs to
2161 Data
2162 escript::operator/(const Data& left, const boost::python::object& right)
2163 {
2164 if (left.isLazy())
2165 {
2166 DataLazy* c=new DataLazy(left.borrowDataPtr(),Data(right,left.getFunctionSpace(),false).borrowDataPtr(),DIV);
2167 return Data(c);
2168 }
2169 return left/Data(right,left.getFunctionSpace(),false);
2170 }
2171
2172 //
2173 // NOTE: It is essential to specify the namespace this operator belongs to
2174 Data
2175 escript::operator+(const boost::python::object& left, const Data& right)
2176 {
2177 if (right.isLazy())
2178 {
2179 DataLazy* c=new DataLazy(Data(left,right.getFunctionSpace(),false).borrowDataPtr(),right.borrowDataPtr(),ADD);
2180 return Data(c);
2181 }
2182 return Data(left,right.getFunctionSpace(),false)+right;
2183 }
2184
2185 //
2186 // NOTE: It is essential to specify the namespace this operator belongs to
2187 Data
2188 escript::operator-(const boost::python::object& left, const Data& right)
2189 {
2190 if (right.isLazy())
2191 {
2192 DataLazy* c=new DataLazy(Data(left,right.getFunctionSpace(),false).borrowDataPtr(),right.borrowDataPtr(),SUB);
2193 return Data(c);
2194 }
2195 return Data(left,right.getFunctionSpace(),false)-right;
2196 }
2197
2198 //
2199 // NOTE: It is essential to specify the namespace this operator belongs to
2200 Data
2201 escript::operator*(const boost::python::object& left, const Data& right)
2202 {
2203 if (right.isLazy())
2204 {
2205 DataLazy* c=new DataLazy(Data(left,right.getFunctionSpace(),false).borrowDataPtr(),right.borrowDataPtr(),MUL);
2206 return Data(c);
2207 }
2208 return Data(left,right.getFunctionSpace(),false)*right;
2209 }
2210
2211 //
2212 // NOTE: It is essential to specify the namespace this operator belongs to
2213 Data
2214 escript::operator/(const boost::python::object& left, const Data& right)
2215 {
2216 if (right.isLazy())
2217 {
2218 DataLazy* c=new DataLazy(Data(left,right.getFunctionSpace(),false).borrowDataPtr(),right.borrowDataPtr(),DIV);
2219 return Data(c);
2220 }
2221 return Data(left,right.getFunctionSpace(),false)/right;
2222 }
2223
2224
2225 /* TODO */
2226 /* global reduction */
2227 Data
2228 Data::getItem(const boost::python::object& key) const
2229 {
2230
2231 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2232
2233 if (slice_region.size()!=getDataPointRank()) {
2234 throw DataException("Error - slice size does not match Data rank.");
2235 }
2236
2237 return getSlice(slice_region);
2238 }
2239
2240 /* TODO */
2241 /* global reduction */
2242 Data
2243 Data::getSlice(const DataTypes::RegionType& region) const
2244 {
2245 return Data(*this,region);
2246 }
2247
2248 /* TODO */
2249 /* global reduction */
2250 void
2251 Data::setItemO(const boost::python::object& key,
2252 const boost::python::object& value)
2253 {
2254 Data tempData(value,getFunctionSpace());
2255 setItemD(key,tempData);
2256 }
2257
2258 void
2259 Data::setItemD(const boost::python::object& key,
2260 const Data& value)
2261 {
2262 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2263 if (slice_region.size()!=getDataPointRank()) {
2264 throw DataException("Error - slice size does not match Data rank.");
2265 }
2266 exclusiveWrite();
2267 if (getFunctionSpace()!=value.getFunctionSpace()) {
2268 setSlice(Data(value,getFunctionSpace()),slice_region);
2269 } else {
2270 setSlice(value,slice_region);
2271 }
2272 }
2273
2274 void
2275 Data::setSlice(const Data& value,
2276 const DataTypes::RegionType& region)
2277 {
2278 if (isProtected()) {
2279 throw DataException("Error - attempt to update protected Data object.");
2280 }
2281 FORCERESOLVE;
2282 /* if (isLazy())
2283 {
2284 throw DataException("Error - setSlice not permitted on lazy data.");
2285 }*/
2286 exclusiveWrite(); // In case someone finds a way to call this without going through setItemD
2287 Data tempValue(value);
2288 typeMatchLeft(tempValue);
2289 typeMatchRight(tempValue);
2290 getReady()->setSlice(tempValue.m_data.get(),region);
2291 }
2292
2293 void
2294 Data::typeMatchLeft(Data& right) const
2295 {
2296 if (right.isLazy() && !isLazy())
2297 {
2298 right.resolve();
2299 }
2300 if (isExpanded()){
2301 right.expand();
2302 } else if (isTagged()) {
2303 if (right.isConstant()) {
2304 right.tag();
2305 }
2306 }
2307 }
2308
2309 void
2310 Data::typeMatchRight(const Data& right)
2311 {
2312 if (isLazy() && !right.isLazy())
2313 {
2314 resolve();
2315 }
2316 if (isTagged()) {
2317 if (right.isExpanded()) {
2318 expand();
2319 }
2320 } else if (isConstant()) {
2321 if (right.isExpanded()) {
2322 expand();
2323 } else if (right.isTagged()) {
2324 tag();
2325 }
2326 }
2327 }
2328
2329 void
2330 Data::setTaggedValueByName(std::string name,
2331 const boost::python::object& value)
2332 {
2333 if (getFunctionSpace().getDomain()->isValidTagName(name)) {
2334 FORCERESOLVE;
2335 exclusiveWrite();
2336 int tagKey=getFunctionSpace().getDomain()->getTag(name);
2337 setTaggedValue(tagKey,value);
2338 }
2339 }
2340
2341 void
2342 Data::setTaggedValue(int tagKey,
2343 const boost::python::object& value)
2344 {
2345 if (isProtected()) {
2346 throw DataException("Error - attempt to update protected Data object.");
2347 }
2348 //
2349 // Ensure underlying data object is of type DataTagged
2350 FORCERESOLVE;
2351 exclusiveWrite();
2352 if (isConstant()) tag();
2353 WrappedArray w(value);
2354
2355 DataVector temp_data2;
2356 temp_data2.copyFromArray(w);
2357
2358 m_data->setTaggedValue(tagKey,w.getShape(), temp_data2);
2359 }
2360
2361
2362 void
2363 Data::setTaggedValueFromCPP(int tagKey,
2364 const DataTypes::ShapeType& pointshape,
2365 const DataTypes::ValueType& value,
2366 int dataOffset)
2367 {
2368 if (isProtected()) {
2369 throw DataException("Error - attempt to update protected Data object.");
2370 }
2371 //
2372 // Ensure underlying data object is of type DataTagged
2373 FORCERESOLVE;
2374 if (isConstant()) tag();
2375 exclusiveWrite();
2376 //
2377 // Call DataAbstract::setTaggedValue
2378 m_data->setTaggedValue(tagKey,pointshape, value, dataOffset);
2379 }
2380
2381 int
2382 Data::getTagNumber(int dpno)
2383 {
2384 if (isEmpty())
2385 {
2386 throw DataException("Error - operation not permitted on instances of DataEmpty.");
2387 }
2388 return getFunctionSpace().getTagFromDataPointNo(dpno);
2389 }
2390
2391
2392 ostream& escript::operator<<(ostream& o, const Data& data)
2393 {
2394 o << data.toString();
2395 return o;
2396 }
2397
2398 Data
2399 escript::C_GeneralTensorProduct(Data& arg_0,
2400 Data& arg_1,
2401 int axis_offset,
2402 int transpose)
2403 {
2404 // General tensor product: res(SL x SR) = arg_0(SL x SM) * arg_1(SM x SR)
2405 // SM is the product of the last axis_offset entries in arg_0.getShape().
2406
2407 // deal with any lazy data
2408 // if (arg_0.isLazy()) {arg_0.resolve();}
2409 // if (arg_1.isLazy()) {arg_1.resolve();}
2410 if (arg_0.isLazy() || arg_1.isLazy())
2411 {
2412 DataLazy* c=new DataLazy(arg_0.borrowDataPtr(), arg_1.borrowDataPtr(), PROD, axis_offset,transpose);
2413 return Data(c);
2414 }
2415
2416 // Interpolate if necessary and find an appropriate function space
2417 Data arg_0_Z, arg_1_Z;
2418 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2419 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2420 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2421 arg_1_Z = Data(arg_1);
2422 }
2423 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2424 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2425 arg_0_Z =Data(arg_0);
2426 }
2427 else {
2428 throw DataException("Error - C_GeneralTensorProduct: arguments have incompatible function spaces.");
2429 }
2430 } else {
2431 arg_0_Z = Data(arg_0);
2432 arg_1_Z = Data(arg_1);
2433 }
2434 // Get rank and shape of inputs
2435 int rank0 = arg_0_Z.getDataPointRank();
2436 int rank1 = arg_1_Z.getDataPointRank();
2437 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
2438 const DataTypes::ShapeType& shape1 = arg_1_Z.getDataPointShape();
2439
2440 // Prepare for the loops of the product and verify compatibility of shapes
2441 int start0=0, start1=0;
2442 if (transpose == 0) {}
2443 else if (transpose == 1) { start0 = axis_offset; }
2444 else if (transpose == 2) { start1 = rank1-axis_offset; }
2445 else { throw DataException("C_GeneralTensorProduct: Error - transpose should be 0, 1 or 2"); }
2446
2447
2448 // Adjust the shapes for transpose
2449 DataTypes::ShapeType tmpShape0(rank0); // pre-sizing the vectors rather
2450 DataTypes::ShapeType tmpShape1(rank1); // than using push_back
2451 for (int i=0; i<rank0; i++) { tmpShape0[i]=shape0[(i+start0)%rank0]; }
2452 for (int i=0; i<rank1; i++) { tmpShape1[i]=shape1[(i+start1)%rank1]; }
2453
2454 #if 0
2455 // For debugging: show shape after transpose
2456 char tmp[100];
2457 std::string shapeStr;
2458 shapeStr = "(";
2459 for (int i=0; i<rank0; i++) { sprintf(tmp, "%d,", tmpShape0[i]); shapeStr += tmp; }
2460 shapeStr += ")";
2461 cout << "C_GeneralTensorProduct: Shape of arg0 is " << shapeStr << endl;
2462 shapeStr = "(";
2463 for (int i=0; i<rank1; i++) { sprintf(tmp, "%d,", tmpShape1[i]); shapeStr += tmp; }
2464 shapeStr += ")";
2465 cout << "C_GeneralTensorProduct: Shape of arg1 is " << shapeStr << endl;
2466 #endif
2467
2468 // Prepare for the loops of the product
2469 int SL=1, SM=1, SR=1;
2470 for (int i=0; i<rank0-axis_offset; i++) {
2471 SL *= tmpShape0[i];
2472 }
2473 for (int i=rank0-axis_offset; i<rank0; i++) {
2474 if (tmpShape0[i] != tmpShape1[i-(rank0-axis_offset)]) {
2475 throw DataException("C_GeneralTensorProduct: Error - incompatible shapes");
2476 }
2477 SM *= tmpShape0[i];
2478 }
2479 for (int i=axis_offset; i<rank1; i++) {
2480 SR *= tmpShape1[i];
2481 }
2482
2483 // Define the shape of the output (rank of shape is the sum of the loop ranges below)
2484 DataTypes::ShapeType shape2(rank0+rank1-2*axis_offset);
2485 { // block to limit the scope of out_index
2486 int out_index=0;
2487 for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z
2488 for (int i=axis_offset; i<rank1; i++, ++out_index) { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z
2489 }
2490
2491 if (shape2.size()>ESCRIPT_MAX_DATA_RANK)
2492 {
2493 ostringstream os;
2494 os << "C_GeneralTensorProduct: Error - Attempt to create a rank " << shape2.size() << " object. The maximum rank is " << ESCRIPT_MAX_DATA_RANK << ".";
2495 throw DataException(os.str());
2496 }
2497
2498 // Declare output Data object
2499 Data res;
2500
2501 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2502 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataConstant output
2503 double *ptr_0 = &(arg_0_Z.getDataAtOffset(0));
2504 double *ptr_1 = &(arg_1_Z.getDataAtOffset(0));
2505 double *ptr_2 = &(res.getDataAtOffset(0));
2506 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2507 }
2508 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2509
2510 // Prepare the DataConstant input
2511 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2512 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2513
2514 // Borrow DataTagged input from Data object
2515 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2516 if (tmp_1==0) { throw DataException("GTP_1 Programming error - casting to DataTagged."); }
2517
2518 // Prepare a DataTagged output 2
2519 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataTagged output
2520 res.tag();
2521 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2522 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2523
2524 // Prepare offset into DataConstant
2525 int offset_0 = tmp_0->getPointOffset(0,0);
2526 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2527 // Get the views
2528 // DataArrayView view_1 = tmp_1->getDefaultValue();
2529 // DataArrayView view_2 = tmp_2->getDefaultValue();
2530 // // Get the pointers to the actual data
2531 // double *ptr_1 = &((view_1.getData())[0]);
2532 // double *ptr_2 = &((view_2.getData())[0]);
2533
2534 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2535 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2536
2537
2538 // Compute an MVP for the default
2539 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2540 // Compute an MVP for each tag
2541 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2542 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2543 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2544 tmp_2->addTag(i->first);
2545 // DataArrayView view_1 = tmp_1->getDataPointByTag(i->first);
2546 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2547 // double *ptr_1 = &view_1.getData(0);
2548 // double *ptr_2 = &view_2.getData(0);
2549
2550 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2551 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2552
2553 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2554 }
2555
2556 }
2557 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2558
2559 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2560 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2561 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2562 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2563 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2564 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2565 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2566 int sampleNo_1,dataPointNo_1;
2567 int numSamples_1 = arg_1_Z.getNumSamples();
2568 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2569 int offset_0 = tmp_0->getPointOffset(0,0);
2570 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2571 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2572 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2573 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2574 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2575 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2576 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2577 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2578 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2579 }
2580 }
2581
2582 }
2583 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2584
2585 // Borrow DataTagged input from Data object
2586 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2587 if (tmp_0==0) { throw DataException("GTP_0 Programming error - casting to DataTagged."); }
2588
2589 // Prepare the DataConstant input
2590 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2591 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2592
2593 // Prepare a DataTagged output 2
2594 res = Data(0.0, shape2, arg_0_Z.getFunctionSpace()); // DataTagged output
2595 res.tag();
2596 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2597 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2598
2599 // Prepare offset into DataConstant
2600 int offset_1 = tmp_1->getPointOffset(0,0);
2601 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2602 // Get the views
2603 // DataArrayView view_0 = tmp_0->getDefaultValue();
2604 // DataArrayView view_2 = tmp_2->getDefaultValue();
2605 // // Get the pointers to the actual data
2606 // double *ptr_0 = &((view_0.getData())[0]);
2607 // double *ptr_2 = &((view_2.getData())[0]);
2608
2609 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2610 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2611
2612 // Compute an MVP for the default
2613 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2614 // Compute an MVP for each tag
2615 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2616 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2617 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2618 // tmp_2->addTaggedValue(i->first,tmp_2->getDefaultValue());
2619 // DataArrayView view_0 = tmp_0->getDataPointByTag(i->first);
2620 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2621 // double *ptr_0 = &view_0.getData(0);
2622 // double *ptr_2 = &view_2.getData(0);
2623
2624 tmp_2->addTag(i->first);
2625 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2626 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2627 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2628 }
2629
2630 }
2631 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2632
2633 // Borrow DataTagged input from Data object
2634 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2635 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2636
2637 // Borrow DataTagged input from Data object
2638 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2639 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2640
2641 // Prepare a DataTagged output 2
2642 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace());
2643 res.tag(); // DataTagged output
2644 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2645 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2646
2647 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2648 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2649 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2650
2651
2652 // Compute an MVP for the default
2653 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2654 // Merge the tags
2655 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2656 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2657 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2658 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2659 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2660 }
2661 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2662 tmp_2->addTag(i->first);
2663 }
2664 // Compute an MVP for each tag
2665 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2666 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2667 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2668 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2669 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2670
2671 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2672 }
2673
2674 }
2675 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2676
2677 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2678 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2679 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2680 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2681 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2682 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2683 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2684 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2685 int sampleNo_0,dataPointNo_0;
2686 int numSamples_0 = arg_0_Z.getNumSamples();
2687 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2688 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2689 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2690 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2691 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2692 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2693 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2694 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2695 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2696 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2697 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2698 }
2699 }
2700
2701 }
2702 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2703
2704 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2705 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2706 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2707 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2708 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2709 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2710 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2711 int sampleNo_0,dataPointNo_0;
2712 int numSamples_0 = arg_0_Z.getNumSamples();
2713 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2714 int offset_1 = tmp_1->getPointOffset(0,0);
2715 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2716 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2717 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2718 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2719 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2720 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2721 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2722 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2723 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2724 }
2725 }
2726
2727
2728 }
2729 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2730
2731 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2732 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2733 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2734 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2735 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2736 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2737 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2738 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2739 int sampleNo_0,dataPointNo_0;
2740 int numSamples_0 = arg_0_Z.getNumSamples();
2741 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2742 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2743 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2744 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2745 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2746 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2747 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2748 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2749 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2750 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2751 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2752 }
2753 }
2754
2755 }
2756 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2757
2758 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2759 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2760 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2761 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2762 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2763 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2764 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2765 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2766 int sampleNo_0,dataPointNo_0;
2767 int numSamples_0 = arg_0_Z.getNumSamples();
2768 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2769 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2770 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2771 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2772 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2773 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2774 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2775 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2776 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2777 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2778 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2779 }
2780 }
2781
2782 }
2783 else {
2784 throw DataException("Error - C_GeneralTensorProduct: unknown combination of inputs");
2785 }
2786
2787 return res;
2788 }
2789
2790 DataAbstract*
2791 Data::borrowData() const
2792 {
2793 return m_data.get();
2794 }
2795
2796 // Not all that happy about returning a non-const from a const
2797 DataAbstract_ptr
2798 Data::borrowDataPtr() const
2799 {
2800 return m_data;
2801 }
2802
2803 // Not all that happy about returning a non-const from a const
2804 DataReady_ptr
2805 Data::borrowReadyPtr() const
2806 {
2807 DataReady_ptr dr=dynamic_pointer_cast<DataReady>(m_data);
2808 EsysAssert((dr!=0), "Error - casting to DataReady.");
2809 return dr;
2810 }
2811
2812 std::string
2813 Data::toString() const
2814 {
2815 if (!m_data->isEmpty() &&
2816 !m_data->isLazy() &&
2817 getLength()>escriptParams.getInt("TOO_MANY_LINES"))
2818 {
2819 stringstream temp;
2820 temp << "Summary: inf="<< inf_const() << " sup=" << sup_const() << " data points=" << getNumDataPoints();
2821 return temp.str();
2822 }
2823 return m_data->toString();
2824 }
2825
2826
2827
2828 DataTypes::ValueType::const_reference
2829 Data::getDataAtOffset(DataTypes::ValueType::size_type i) const
2830 {
2831 if (isLazy())
2832 {
2833 throw DataException("Programmer error - getDataAtOffset not permitted on lazy data (object is const which prevents resolving).");
2834 }
2835 return getReady()->getDataAtOffset(i);
2836 }
2837
2838
2839 DataTypes::ValueType::reference
2840 Data::getDataAtOffset(DataTypes::ValueType::size_type i)
2841 {
2842 FORCERESOLVE;
2843 exclusiveWrite();
2844 return getReady()->getDataAtOffset(i);
2845 }
2846
2847 DataTypes::ValueType::const_reference
2848 Data::getDataPoint(int sampleNo, int dataPointNo) const
2849 {
2850 if (!isReady())
2851 {
2852 throw DataException("Programmer error - getDataPoint() not permitted on Lazy Data (object is const which prevents resolving).");
2853 }
2854 else
2855 {
2856 const DataReady* dr=getReady();
2857 return dr->getDataAtOffset(dr->getPointOffset(sampleNo, dataPointNo));
2858 }
2859 }
2860
2861
2862 DataTypes::ValueType::reference
2863 Data::getDataPoint(int sampleNo, int dataPointNo)
2864 {
2865 FORCERESOLVE;
2866 if (!isReady())
2867 {
2868 throw DataException("Programmer error - getDataPoint() not permitted on Lazy Data.");
2869 }
2870 else
2871 {
2872 exclusiveWrite();
2873 DataReady* dr=getReady();
2874 return dr->getDataAtOffset(dr->getPointOffset(sampleNo, dataPointNo));
2875 }
2876 }
2877
2878 DataTypes::ValueType*
2879 Data::allocSampleBuffer() const
2880 {
2881 if (isLazy())
2882 {
2883 return new DataTypes::ValueType(getSampleBufferSize());
2884 }
2885 else
2886 {
2887 return NULL;
2888 }
2889 }
2890
2891 void
2892 Data::freeSampleBuffer(DataTypes::ValueType* buffer)
2893 {
2894 if (buffer!=0)
2895 {
2896 delete buffer;
2897 }
2898 }
2899
2900
2901 /* Member functions specific to the MPI implementation */
2902
2903 void
2904 Data::print()
2905 {
2906 int i,j;
2907
2908 printf( "Data is %dX%d\n", getNumSamples(), getNumDataPointsPerSample() );
2909 for( i=0; i<getNumSamples(); i++ )
2910 {
2911 printf( "[%6d]", i );
2912 for( j=0; j<getNumDataPointsPerSample(); j++ )
2913 printf( "\t%10.7g", (getSampleDataRW(i))[j] ); // doesn't really need RW access
2914 printf( "\n" );
2915 }
2916 }
2917 void
2918 Data::dump(const std::string fileName) const
2919 {
2920 try
2921 {
2922 if (isLazy())
2923 {
2924 Data temp(*this); // this is to get a non-const object which we can resolve
2925 temp.resolve();
2926 temp.dump(fileName);
2927 }
2928 else
2929 {
2930 return m_data->dump(fileName);
2931 }
2932 }
2933 catch (exception& e)
2934 {
2935 cout << e.what() << endl;
2936 }
2937 }
2938
2939 int
2940 Data::get_MPISize() const
2941 {
2942 int size;
2943 #ifdef PASO_MPI
2944 int error;
2945 error = MPI_Comm_size( get_MPIComm(), &size );
2946 #else
2947 size = 1;
2948 #endif
2949 return size;
2950 }
2951
2952 int
2953 Data::get_MPIRank() const
2954 {
2955 int rank;
2956 #ifdef PASO_MPI
2957 int error;
2958 error = MPI_Comm_rank( get_MPIComm(), &rank );
2959 #else
2960 rank = 0;
2961 #endif
2962 return rank;
2963 }
2964
2965 MPI_Comm
2966 Data::get_MPIComm() const
2967 {
2968 #ifdef PASO_MPI
2969 return MPI_COMM_WORLD;
2970 #else
2971 return -1;
2972 #endif
2973 }
2974
2975

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26