/[escript]/branches/arrexp_2137_win_merge/escript/src/Data.cpp
ViewVC logotype

Contents of /branches/arrexp_2137_win_merge/escript/src/Data.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2222 - (show annotations)
Tue Jan 20 04:52:39 2009 UTC (11 years, 1 month ago) by jfenwick
File size: 78710 byte(s)
Saving work
1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2008 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 #include "Data.h"
16
17 #include "DataExpanded.h"
18 #include "DataConstant.h"
19 #include "DataTagged.h"
20 #include "DataEmpty.h"
21 #include "DataLazy.h"
22 #include "FunctionSpaceFactory.h"
23 #include "AbstractContinuousDomain.h"
24 #include "UnaryFuncs.h"
25 #include "FunctionSpaceException.h"
26 #include "EscriptParams.h"
27
28 extern "C" {
29 #include "esysUtils/blocktimer.h"
30 }
31
32 #include <fstream>
33 #include <algorithm>
34 #include <vector>
35 #include <functional>
36 #include <sstream> // so we can throw messages about ranks
37
38 #include <boost/python/dict.hpp>
39 #include <boost/python/extract.hpp>
40 #include <boost/python/long.hpp>
41 #include "WrappedArray.h"
42
43 using namespace std;
44 using namespace boost::python;
45 using namespace boost;
46 using namespace escript;
47
48 // ensure the current object is not a DataLazy
49 // The idea was that we could add an optional warning whenever a resolve is forced
50 #define FORCERESOLVE if (isLazy()) {resolve();}
51 #define AUTOLAZYON escriptParams.getAUTOLAZY()
52 #define MAKELAZYOP(X) if (isLazy() || (AUTOLAZYON && m_data->isExpanded())) \
53 {\
54 DataLazy* c=new DataLazy(borrowDataPtr(),X);\
55 return Data(c);\
56 }
57 #define MAKELAZYOPOFF(X,Y) if (isLazy() || (AUTOLAZYON && m_data->isExpanded())) \
58 {\
59 DataLazy* c=new DataLazy(borrowDataPtr(),X,Y);\
60 return Data(c);\
61 }
62
63 #define MAKELAZYBINSELF(R,X) if (isLazy() || R.isLazy() || (AUTOLAZYON && (isExpanded() || R.isExpanded()))) \
64 {\
65 DataLazy* c=new DataLazy(m_data,R.borrowDataPtr(),X);\
66 /* m_data=c->getPtr();*/ set_m_data(c->getPtr());\
67 return (*this);\
68 }
69
70 // like the above but returns a new data rather than *this
71 #define MAKELAZYBIN(R,X) if (isLazy() || R.isLazy() || (AUTOLAZYON && (isExpanded() || R.isExpanded()))) \
72 {\
73 DataLazy* c=new DataLazy(m_data,R.borrowDataPtr(),X);\
74 return Data(c);\
75 }
76
77 #define MAKELAZYBIN2(L,R,X) if (L.isLazy() || R.isLazy() || (AUTOLAZYON && (L.isExpanded() || R.isExpanded()))) \
78 {\
79 DataLazy* c=new DataLazy(L.borrowDataPtr(),R.borrowDataPtr(),X);\
80 return Data(c);\
81 }
82
83 // Do not use the following unless you want to make copies on assignment rather than
84 // share data. CopyOnWrite should make this unnescessary.
85 // #define ASSIGNMENT_MEANS_DEEPCOPY
86
87 namespace
88 {
89
90 // This should be safer once the DataC RO changes have been brought in
91 template <class ARR>
92 boost::python::tuple
93 pointToTuple( const DataTypes::ShapeType& shape,ARR v)
94 {
95 using namespace boost::python;
96 using boost::python::list;
97 int rank=shape.size();
98 if (rank==0)
99 {
100 return make_tuple(v[0]);
101 }
102 else if (rank==1)
103 {
104 list l;
105 for (size_t i=0;i<shape[0];++i)
106 {
107 l.append(v[i]);
108 }
109 return tuple(l);
110 }
111 else if (rank==2)
112 {
113 list lj;
114 for (size_t j=0;j<shape[0];++j)
115 {
116 list li;
117 for (size_t i=0;i<shape[1];++i)
118 {
119 li.append(v[DataTypes::getRelIndex(shape,j,i)]);
120 }
121 lj.append(tuple(li));
122 }
123 return tuple(lj);
124 }
125 else if (rank==3)
126 {
127 list lk;
128 for (size_t k=0;k<shape[0];++k)
129 {
130 list lj;
131 for (size_t j=0;j<shape[1];++j)
132 {
133 list li;
134 for (size_t i=0;i<shape[2];++i)
135 {
136 li.append(v[DataTypes::getRelIndex(shape,k,j,i)]);
137 }
138 lj.append(tuple(li));
139 }
140 lk.append(tuple(lj));
141 }
142 return tuple(lk);
143 }
144 else if (rank==4)
145 {
146 list ll;
147 for (size_t l=0;l<shape[0];++l)
148 {
149 list lk;
150 for (size_t k=0;k<shape[1];++k)
151 {
152 list lj;
153 for (size_t j=0;j<shape[2];++j)
154 {
155 list li;
156 for (size_t i=0;i<shape[3];++i)
157 {
158 li.append(v[DataTypes::getRelIndex(shape,l,k,j,i)]);
159 }
160 lj.append(tuple(li));
161 }
162 lk.append(tuple(lj));
163 }
164 ll.append(tuple(lk));
165 }
166 return tuple(ll);
167 }
168 else
169 throw DataException("Unknown rank in pointToTuple.");
170 }
171
172 } // anonymous namespace
173
174 Data::Data()
175 : m_shared(false), m_lazy(false)
176 {
177 //
178 // Default data is type DataEmpty
179 DataAbstract* temp=new DataEmpty();
180 // m_data=temp->getPtr();
181 set_m_data(temp->getPtr());
182 m_protected=false;
183 }
184
185 Data::Data(double value,
186 const tuple& shape,
187 const FunctionSpace& what,
188 bool expanded)
189 : m_shared(false), m_lazy(false)
190 {
191 DataTypes::ShapeType dataPointShape;
192 for (int i = 0; i < shape.attr("__len__")(); ++i) {
193 dataPointShape.push_back(extract<const int>(shape[i]));
194 }
195
196 int len = DataTypes::noValues(dataPointShape);
197 DataVector temp_data(len,value,len);
198 initialise(temp_data, dataPointShape, what, expanded);
199 m_protected=false;
200 }
201
202 Data::Data(double value,
203 const DataTypes::ShapeType& dataPointShape,
204 const FunctionSpace& what,
205 bool expanded)
206 : m_shared(false), m_lazy(false)
207 {
208 int len = DataTypes::noValues(dataPointShape);
209
210 DataVector temp_data(len,value,len);
211 // DataArrayView temp_dataView(temp_data, dataPointShape);
212
213 // initialise(temp_dataView, what, expanded);
214 initialise(temp_data, dataPointShape, what, expanded);
215
216 m_protected=false;
217 }
218
219 Data::Data(const Data& inData)
220 : m_shared(false), m_lazy(false)
221 {
222 // m_data=inData.m_data;
223 set_m_data(inData.m_data);
224 m_protected=inData.isProtected();
225 }
226
227
228 Data::Data(const Data& inData,
229 const DataTypes::RegionType& region)
230 : m_shared(false), m_lazy(false)
231 {
232 DataAbstract_ptr dat=inData.m_data;
233 if (inData.isLazy())
234 {
235 dat=inData.m_data->resolve();
236 }
237 else
238 {
239 dat=inData.m_data;
240 }
241 //
242 // Create Data which is a slice of another Data
243 DataAbstract* tmp = dat->getSlice(region);
244 // m_data=DataAbstract_ptr(tmp);
245 set_m_data(DataAbstract_ptr(tmp));
246 m_protected=false;
247
248 }
249
250 Data::Data(const Data& inData,
251 const FunctionSpace& functionspace)
252 : m_shared(false), m_lazy(false)
253 {
254 if (inData.isEmpty())
255 {
256 throw DataException("Error - will not interpolate for instances of DataEmpty.");
257 }
258 if (inData.getFunctionSpace()==functionspace) {
259 // m_data=inData.m_data;
260 set_m_data(inData.m_data);
261 }
262 else
263 {
264
265 if (inData.isConstant()) { // for a constant function, we just need to use the new function space
266 if (!inData.probeInterpolation(functionspace))
267 { // Even though this is constant, we still need to check whether interpolation is allowed
268 throw FunctionSpaceException("Cannot interpolate across to the domain of the specified FunctionSpace. (DataConstant)");
269 }
270 // if the data is not lazy, this will just be a cast to DataReady
271 DataReady_ptr dr=inData.m_data->resolve();
272 DataConstant* dc=new DataConstant(functionspace,inData.m_data->getShape(),dr->getVectorRO());
273 // m_data=DataAbstract_ptr(dc);
274 set_m_data(DataAbstract_ptr(dc));
275 } else {
276 Data tmp(0,inData.getDataPointShape(),functionspace,true);
277 // Note: Must use a reference or pointer to a derived object
278 // in order to get polymorphic behaviour. Shouldn't really
279 // be able to create an instance of AbstractDomain but that was done
280 // as a boost:python work around which may no longer be required.
281 /*const AbstractDomain& inDataDomain=inData.getDomain();*/
282 const_Domain_ptr inDataDomain=inData.getDomain();
283 if (inDataDomain==functionspace.getDomain()) {
284 inDataDomain->interpolateOnDomain(tmp,inData);
285 } else {
286 inDataDomain->interpolateACross(tmp,inData);
287 }
288 // m_data=tmp.m_data;
289 set_m_data(tmp.m_data);
290 }
291 }
292 m_protected=false;
293 }
294
295 Data::Data(DataAbstract* underlyingdata)
296 : m_shared(false), m_lazy(false)
297 {
298 // m_data=shared_ptr<DataAbstract>(underlyingdata);
299 m_data=underlyingdata->getPtr();
300 m_protected=false;
301 }
302
303 Data::Data(DataAbstract_ptr underlyingdata)
304 : m_shared(false), m_lazy(false)
305 {
306 // m_data=underlyingdata;
307 set_m_data(underlyingdata);
308 m_protected=false;
309 }
310
311 Data::Data(const DataTypes::ValueType& value,
312 const DataTypes::ShapeType& shape,
313 const FunctionSpace& what,
314 bool expanded)
315 : m_shared(false), m_lazy(false)
316 {
317 initialise(value,shape,what,expanded);
318 m_protected=false;
319 }
320
321
322 Data::Data(const object& value,
323 const FunctionSpace& what,
324 bool expanded)
325 : m_shared(false), m_lazy(false)
326 {
327 //numeric::array asNumArray(value);
328 WrappedArray w(value);
329 initialise(w,what,expanded);
330 m_protected=false;
331 }
332
333
334 Data::Data(const object& value,
335 const Data& other)
336 : m_shared(false), m_lazy(false)
337 {
338 WrappedArray w(value);
339
340 // extract the shape of the numarray
341 const DataTypes::ShapeType& tempShape=w.getShape();
342 if (w.getRank()==0) {
343
344
345 // get the space for the data vector
346 int len1 = DataTypes::noValues(tempShape);
347 DataVector temp_data(len1, 0.0, len1);
348 temp_data.copyFromArray(w,1);
349
350 int len = DataTypes::noValues(other.getDataPointShape());
351
352 DataVector temp2_data(len, temp_data[0], len);
353 DataConstant* t=new DataConstant(other.getFunctionSpace(),other.getDataPointShape(),temp2_data);
354 // m_data=DataAbstract_ptr(t);
355 set_m_data(DataAbstract_ptr(t));
356
357 } else {
358 //
359 // Create a DataConstant with the same sample shape as other
360 DataConstant* t=new DataConstant(w,other.getFunctionSpace());
361 // m_data=DataAbstract_ptr(t);
362 set_m_data(DataAbstract_ptr(t));
363 }
364 m_protected=false;
365 }
366
367 Data::~Data()
368 {
369 set_m_data(DataAbstract_ptr());
370 }
371
372
373 // only call in thread safe contexts.
374 // This method should be atomic
375 void Data::set_m_data(DataAbstract_ptr p)
376 {
377 if (m_data.get()!=0) // release old ownership
378 {
379 m_data->removeOwner(this);
380 }
381 m_shared=false;
382 if (p.get()!=0)
383 {
384 m_data=p;
385 m_data->addOwner(this);
386 m_lazy=m_data->isLazy();
387 }
388 }
389
390 void Data::initialise(const WrappedArray& value,
391 const FunctionSpace& what,
392 bool expanded)
393 {
394 //
395 // Construct a Data object of the appropriate type.
396 // Construct the object first as there seems to be a bug which causes
397 // undefined behaviour if an exception is thrown during construction
398 // within the shared_ptr constructor.
399 if (expanded) {
400 DataAbstract* temp=new DataExpanded(value, what);
401 // m_data=temp->getPtr();
402 set_m_data(temp->getPtr());
403 } else {
404 DataAbstract* temp=new DataConstant(value, what);
405 // m_data=temp->getPtr();
406 set_m_data(temp->getPtr());
407 }
408 }
409
410
411 void
412 Data::initialise(const DataTypes::ValueType& value,
413 const DataTypes::ShapeType& shape,
414 const FunctionSpace& what,
415 bool expanded)
416 {
417 //
418 // Construct a Data object of the appropriate type.
419 // Construct the object first as there seems to be a bug which causes
420 // undefined behaviour if an exception is thrown during construction
421 // within the shared_ptr constructor.
422 if (expanded) {
423 DataAbstract* temp=new DataExpanded(what, shape, value);
424 // m_data=temp->getPtr();
425 set_m_data(temp->getPtr());
426 } else {
427 DataAbstract* temp=new DataConstant(what, shape, value);
428 // m_data=temp->getPtr();
429 set_m_data(temp->getPtr());
430 }
431 }
432
433
434 // void
435 // Data::CompareDebug(const Data& rd)
436 // {
437 // using namespace std;
438 // bool mismatch=false;
439 // std::cout << "Comparing left and right" << endl;
440 // const DataTagged* left=dynamic_cast<DataTagged*>(m_data.get());
441 // const DataTagged* right=dynamic_cast<DataTagged*>(rd.m_data.get());
442 //
443 // if (left==0)
444 // {
445 // cout << "left arg is not a DataTagged\n";
446 // return;
447 // }
448 //
449 // if (right==0)
450 // {
451 // cout << "right arg is not a DataTagged\n";
452 // return;
453 // }
454 // cout << "Num elements=" << left->getVector().size() << ":" << right->getVector().size() << std::endl;
455 // cout << "Shapes ";
456 // if (left->getShape()==right->getShape())
457 // {
458 // cout << "ok\n";
459 // }
460 // else
461 // {
462 // cout << "Problem: shapes do not match\n";
463 // mismatch=true;
464 // }
465 // int lim=left->getVector().size();
466 // if (right->getVector().size()) lim=right->getVector().size();
467 // for (int i=0;i<lim;++i)
468 // {
469 // if (left->getVector()[i]!=right->getVector()[i])
470 // {
471 // cout << "[" << i << "] value mismatch " << left->getVector()[i] << ":" << right->getVector()[i] << endl;
472 // mismatch=true;
473 // }
474 // }
475 //
476 // // still need to check the tag map
477 // // also need to watch what is happening to function spaces, are they copied or what?
478 //
479 // const DataTagged::DataMapType& mapleft=left->getTagLookup();
480 // const DataTagged::DataMapType& mapright=right->getTagLookup();
481 //
482 // if (mapleft.size()!=mapright.size())
483 // {
484 // cout << "Maps are different sizes " << mapleft.size() << ":" << mapright.size() << endl;
485 // mismatch=true;
486 // cout << "Left map\n";
487 // DataTagged::DataMapType::const_iterator i,j;
488 // for (i=mapleft.begin();i!=mapleft.end();++i) {
489 // cout << "(" << i->first << "=>" << i->second << ")\n";
490 // }
491 // cout << "Right map\n";
492 // for (i=mapright.begin();i!=mapright.end();++i) {
493 // cout << "(" << i->first << "=>" << i->second << ")\n";
494 // }
495 // cout << "End map\n";
496 //
497 // }
498 //
499 // DataTagged::DataMapType::const_iterator i,j;
500 // for (i=mapleft.begin(),j=mapright.begin();i!=mapleft.end() && j!=mapright.end();++i,++j) {
501 // if ((i->first!=j->first) || (i->second!=j->second))
502 // {
503 // cout << "(" << i->first << "=>" << i->second << ")";
504 // cout << ":(" << j->first << "=>" << j->second << ") ";
505 // mismatch=true;
506 // }
507 // }
508 // if (mismatch)
509 // {
510 // cout << "#Mismatch\n";
511 // }
512 // }
513
514 escriptDataC
515 Data::getDataC()
516 {
517 escriptDataC temp;
518 temp.m_dataPtr=(void*)this;
519 return temp;
520 }
521
522 escriptDataC
523 Data::getDataC() const
524 {
525 escriptDataC temp;
526 temp.m_dataPtr=(void*)this;
527 return temp;
528 }
529
530 size_t
531 Data::getSampleBufferSize() const
532 {
533 return m_data->getSampleBufferSize();
534 }
535
536
537 const boost::python::tuple
538 Data::getShapeTuple() const
539 {
540 const DataTypes::ShapeType& shape=getDataPointShape();
541 switch(getDataPointRank()) {
542 case 0:
543 return make_tuple();
544 case 1:
545 return make_tuple(long_(shape[0]));
546 case 2:
547 return make_tuple(long_(shape[0]),long_(shape[1]));
548 case 3:
549 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]));
550 case 4:
551 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]),long_(shape[3]));
552 default:
553 throw DataException("Error - illegal Data rank.");
554 }
555 }
556
557
558 // The different name is needed because boost has trouble with overloaded functions.
559 // It can't work out what type the function is based soley on its name.
560 // There are ways to fix this involving creating function pointer variables for each form
561 // but there doesn't seem to be a need given that the methods have the same name from the python point of view
562 Data
563 Data::copySelf()
564 {
565 DataAbstract* temp=m_data->deepCopy();
566 return Data(temp);
567 }
568
569 void
570 Data::copy(const Data& other)
571 {
572 DataAbstract* temp=other.m_data->deepCopy();
573 DataAbstract_ptr p=temp->getPtr();
574 // m_data=p;
575 set_m_data(p);
576 }
577
578
579 Data
580 Data::delay()
581 {
582 DataLazy* dl=new DataLazy(m_data);
583 return Data(dl);
584 }
585
586 void
587 Data::delaySelf()
588 {
589 if (!isLazy())
590 {
591 // m_data=(new DataLazy(m_data))->getPtr();
592 set_m_data((new DataLazy(m_data))->getPtr());
593 }
594 }
595
596 void
597 Data::setToZero()
598 {
599 if (isEmpty())
600 {
601 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
602 }
603 exclusiveWrite();
604 m_data->setToZero();
605 }
606
607 void
608 Data::copyWithMask(const Data& other,
609 const Data& mask)
610 {
611 // 1. Interpolate if required so all Datas use the same FS as this
612 // 2. Tag or Expand so that all Data's are the same type
613 // 3. Iterate over the data vectors copying values where mask is >0
614 if (other.isEmpty() || mask.isEmpty())
615 {
616 throw DataException("Error - copyWithMask not permitted using instances of DataEmpty.");
617 }
618 Data other2(other);
619 Data mask2(mask);
620 other2.resolve();
621 mask2.resolve();
622 this->resolve();
623 FunctionSpace myFS=getFunctionSpace();
624 FunctionSpace oFS=other2.getFunctionSpace();
625 FunctionSpace mFS=mask2.getFunctionSpace();
626 if (oFS!=myFS)
627 {
628 if (other2.probeInterpolation(myFS))
629 {
630 other2=other2.interpolate(myFS);
631 }
632 else
633 {
634 throw DataException("Error - copyWithMask: other FunctionSpace is not compatible with this one.");
635 }
636 }
637 if (mFS!=myFS)
638 {
639 if (mask2.probeInterpolation(myFS))
640 {
641 mask2=mask2.interpolate(myFS);
642 }
643 else
644 {
645 throw DataException("Error - copyWithMask: mask FunctionSpace is not compatible with this one.");
646 }
647 }
648 // Ensure that all args have the same type
649 if (this->isExpanded() || mask2.isExpanded() || other2.isExpanded())
650 {
651 this->expand();
652 other2.expand();
653 mask2.expand();
654 }
655 else if (this->isTagged() || mask2.isTagged() || other2.isTagged())
656 {
657 this->tag();
658 other2.tag();
659 mask2.tag();
660 }
661 else if (this->isConstant() && mask2.isConstant() && other2.isConstant())
662 {
663 }
664 else
665 {
666 throw DataException("Error - Unknown DataAbstract passed to copyWithMask.");
667 }
668 exclusiveWrite();
669 // Now we iterate over the elements
670 DataVector& self=getReady()->getVector();
671 const DataVector& ovec=other2.getReady()->getVectorRO();
672 const DataVector& mvec=mask2.getReady()->getVectorRO();
673 if ((self.size()!=ovec.size()) || (self.size()!=mvec.size()))
674 {
675 throw DataException("Error - size mismatch in arguments to copyWithMask.");
676 }
677 size_t num_points=self.size();
678
679 // OPENMP 3.0 allows unsigned loop vars.
680 #if defined(_OPENMP) && (_OPENMP < 200805)
681 long i;
682 #else
683 size_t i;
684 #endif
685 #pragma omp parallel for private(i) schedule(static)
686 for (i=0;i<num_points;++i)
687 {
688 if (mvec[i]>0)
689 {
690 self[i]=ovec[i];
691 }
692 }
693 }
694
695
696
697 bool
698 Data::isExpanded() const
699 {
700 DataExpanded* temp=dynamic_cast<DataExpanded*>(m_data.get());
701 return (temp!=0);
702 }
703
704 bool
705 Data::actsExpanded() const
706 {
707 return m_data->actsExpanded();
708
709 }
710
711
712 bool
713 Data::isTagged() const
714 {
715 DataTagged* temp=dynamic_cast<DataTagged*>(m_data.get());
716 return (temp!=0);
717 }
718
719 bool
720 Data::isEmpty() const
721 {
722 DataEmpty* temp=dynamic_cast<DataEmpty*>(m_data.get());
723 return (temp!=0);
724 }
725
726 bool
727 Data::isConstant() const
728 {
729 DataConstant* temp=dynamic_cast<DataConstant*>(m_data.get());
730 return (temp!=0);
731 }
732
733 bool
734 Data::isLazy() const
735 {
736 return m_data->isLazy();
737 }
738
739 // at the moment this is synonymous with !isLazy() but that could change
740 bool
741 Data::isReady() const
742 {
743 return (dynamic_cast<DataReady*>(m_data.get())!=0);
744 }
745
746
747 void
748 Data::setProtection()
749 {
750 m_protected=true;
751 }
752
753 bool
754 Data::isProtected() const
755 {
756 return m_protected;
757 }
758
759
760
761 void
762 Data::expand()
763 {
764 if (isConstant()) {
765 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
766 DataAbstract* temp=new DataExpanded(*tempDataConst);
767 // m_data=temp->getPtr();
768 set_m_data(temp->getPtr());
769 } else if (isTagged()) {
770 DataTagged* tempDataTag=dynamic_cast<DataTagged*>(m_data.get());
771 DataAbstract* temp=new DataExpanded(*tempDataTag);
772 // m_data=temp->getPtr();
773 set_m_data(temp->getPtr());
774 } else if (isExpanded()) {
775 //
776 // do nothing
777 } else if (isEmpty()) {
778 throw DataException("Error - Expansion of DataEmpty not possible.");
779 } else if (isLazy()) {
780 resolve();
781 expand(); // resolve might not give us expanded data
782 } else {
783 throw DataException("Error - Expansion not implemented for this Data type.");
784 }
785 }
786
787 void
788 Data::tag()
789 {
790 if (isConstant()) {
791 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
792 DataAbstract* temp=new DataTagged(*tempDataConst);
793 // m_data=temp->getPtr();
794 set_m_data(temp->getPtr());
795 } else if (isTagged()) {
796 // do nothing
797 } else if (isExpanded()) {
798 throw DataException("Error - Creating tag data from DataExpanded not possible.");
799 } else if (isEmpty()) {
800 throw DataException("Error - Creating tag data from DataEmpty not possible.");
801 } else if (isLazy()) {
802 DataAbstract_ptr res=m_data->resolve();
803 if (m_data->isExpanded())
804 {
805 throw DataException("Error - data would resolve to DataExpanded, tagging is not possible.");
806 }
807 // m_data=res;
808 set_m_data(res);
809 tag();
810 } else {
811 throw DataException("Error - Tagging not implemented for this Data type.");
812 }
813 }
814
815 void
816 Data::resolve()
817 {
818 if (isLazy())
819 {
820 // m_data=m_data->resolve();
821 set_m_data(m_data->resolve());
822 }
823 }
824
825 void
826 Data::requireWrite()
827 {
828 // if (omp_in_parallel())
829 // {
830 // throw DataException("Programming error. Please do not run requireWrite() in multi-threaded sections.");
831 // }
832 resolve();
833 exclusiveWrite();
834 }
835
836 Data
837 Data::oneOver() const
838 {
839 MAKELAZYOP(RECIP)
840 return C_TensorUnaryOperation(*this, bind1st(divides<double>(),1.));
841 }
842
843 Data
844 Data::wherePositive() const
845 {
846 MAKELAZYOP(GZ)
847 return C_TensorUnaryOperation(*this, bind2nd(greater<double>(),0.0));
848 }
849
850 Data
851 Data::whereNegative() const
852 {
853 MAKELAZYOP(LZ)
854 return C_TensorUnaryOperation(*this, bind2nd(less<double>(),0.0));
855 }
856
857 Data
858 Data::whereNonNegative() const
859 {
860 MAKELAZYOP(GEZ)
861 return C_TensorUnaryOperation(*this, bind2nd(greater_equal<double>(),0.0));
862 }
863
864 Data
865 Data::whereNonPositive() const
866 {
867 MAKELAZYOP(LEZ)
868 return C_TensorUnaryOperation(*this, bind2nd(less_equal<double>(),0.0));
869 }
870
871 Data
872 Data::whereZero(double tol) const
873 {
874 // Data dataAbs=abs();
875 // return C_TensorUnaryOperation(dataAbs, bind2nd(less_equal<double>(),tol));
876 MAKELAZYOPOFF(EZ,tol)
877 return C_TensorUnaryOperation(*this, bind2nd(AbsLTE(),tol));
878
879 }
880
881 Data
882 Data::whereNonZero(double tol) const
883 {
884 // Data dataAbs=abs();
885 // return C_TensorUnaryOperation(dataAbs, bind2nd(greater<double>(),tol));
886 MAKELAZYOPOFF(NEZ,tol)
887 return C_TensorUnaryOperation(*this, bind2nd(AbsGT(),tol));
888
889 }
890
891 Data
892 Data::interpolate(const FunctionSpace& functionspace) const
893 {
894 return Data(*this,functionspace);
895 }
896
897 bool
898 Data::probeInterpolation(const FunctionSpace& functionspace) const
899 {
900 return getFunctionSpace().probeInterpolation(functionspace);
901 }
902
903 Data
904 Data::gradOn(const FunctionSpace& functionspace) const
905 {
906 if (isEmpty())
907 {
908 throw DataException("Error - operation not permitted on instances of DataEmpty.");
909 }
910 double blocktimer_start = blocktimer_time();
911 if (functionspace.getDomain()!=getDomain())
912 throw DataException("Error - gradient cannot be calculated on different domains.");
913 DataTypes::ShapeType grad_shape=getDataPointShape();
914 grad_shape.push_back(functionspace.getDim());
915 Data out(0.0,grad_shape,functionspace,true);
916 getDomain()->setToGradient(out,*this);
917 blocktimer_increment("grad()", blocktimer_start);
918 return out;
919 }
920
921 Data
922 Data::grad() const
923 {
924 if (isEmpty())
925 {
926 throw DataException("Error - operation not permitted on instances of DataEmpty.");
927 }
928 return gradOn(escript::function(*getDomain()));
929 }
930
931 int
932 Data::getDataPointSize() const
933 {
934 return m_data->getNoValues();
935 }
936
937 DataTypes::ValueType::size_type
938 Data::getLength() const
939 {
940 return m_data->getLength();
941 }
942
943 const
944 boost::python::numeric :: array
945 Data:: getValueOfDataPoint(int dataPointNo)
946 {
947 return boost::python::numeric::array(getValueOfDataPointAsTuple(dataPointNo));
948 }
949
950 const boost::python::object
951 Data::getValueOfDataPointAsTuple(int dataPointNo)
952 {
953 FORCERESOLVE
954 if (getNumDataPointsPerSample()>0) {
955 int sampleNo = dataPointNo/getNumDataPointsPerSample();
956 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
957 //
958 // Check a valid sample number has been supplied
959 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
960 throw DataException("Error - Data::getValueOfDataPointAsTuple: invalid sampleNo.");
961 }
962
963 //
964 // Check a valid data point number has been supplied
965 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
966 throw DataException("Error - Data::getValueOfDataPointAsTuple: invalid dataPointNoInSample.");
967 }
968 // TODO: global error handling
969 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
970 return pointToTuple(getDataPointShape(),&(getDataAtOffset(offset)));
971 }
972 else
973 {
974 // The old method would return an empty numArray of the given shape
975 // I'm going to throw an exception because if we have zero points per sample we have problems
976 throw DataException("Error - need at least 1 datapoint per sample.");
977 }
978
979 }
980
981
982 void
983 Data::setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object)
984 {
985 // this will throw if the value cannot be represented
986 setValueOfDataPointToArray(dataPointNo,py_object);
987 }
988
989 void
990 Data::setValueOfDataPointToArray(int dataPointNo, const boost::python::object& obj)
991 {
992 if (isProtected()) {
993 throw DataException("Error - attempt to update protected Data object.");
994 }
995 FORCERESOLVE;
996
997 WrappedArray w(obj);
998 //
999 // check rank
1000 if (static_cast<unsigned int>(w.getRank())<getDataPointRank())
1001 throw DataException("Rank of numarray does not match Data object rank");
1002
1003 //
1004 // check shape of num_array
1005 for (unsigned int i=0; i<getDataPointRank(); i++) {
1006 if (w.getShape()[i]!=getDataPointShape()[i])
1007 throw DataException("Shape of numarray does not match Data object rank");
1008 }
1009 //
1010 // make sure data is expanded:
1011 //
1012 if (!isExpanded()) {
1013 expand();
1014 }
1015 if (getNumDataPointsPerSample()>0) {
1016 int sampleNo = dataPointNo/getNumDataPointsPerSample();
1017 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1018 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,w);
1019 } else {
1020 m_data->copyToDataPoint(-1, 0,w);
1021 }
1022 }
1023
1024 void
1025 Data::setValueOfDataPoint(int dataPointNo, const double value)
1026 {
1027 if (isProtected()) {
1028 throw DataException("Error - attempt to update protected Data object.");
1029 }
1030 //
1031 // make sure data is expanded:
1032 FORCERESOLVE;
1033 if (!isExpanded()) {
1034 expand();
1035 }
1036 if (getNumDataPointsPerSample()>0) {
1037 int sampleNo = dataPointNo/getNumDataPointsPerSample();
1038 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1039 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,value);
1040 } else {
1041 m_data->copyToDataPoint(-1, 0,value);
1042 }
1043 }
1044
1045 const
1046 boost::python::numeric::array
1047 Data::getValueOfGlobalDataPoint(int procNo, int dataPointNo)
1048 {
1049 return boost::python::numeric::array(getValueOfGlobalDataPointAsTuple(procNo, dataPointNo));
1050 }
1051
1052 const
1053 boost::python::object
1054 Data::getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo)
1055 {
1056 // This could be lazier than it is now
1057 FORCERESOLVE;
1058
1059 // copy datapoint into a buffer
1060 // broadcast buffer to all nodes
1061 // convert buffer to tuple
1062 // return tuple
1063
1064 const DataTypes::ShapeType& dataPointShape = getDataPointShape();
1065 size_t length=DataTypes::noValues(dataPointShape);
1066
1067 // added for the MPI communication
1068 double *tmpData = new double[length];
1069
1070 // updated for the MPI case
1071 if( get_MPIRank()==procNo ){
1072 if (getNumDataPointsPerSample()>0) {
1073 int sampleNo = dataPointNo/getNumDataPointsPerSample();
1074 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1075 //
1076 // Check a valid sample number has been supplied
1077 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
1078 throw DataException("Error - Data::convertToNumArray: invalid sampleNo.");
1079 }
1080
1081 //
1082 // Check a valid data point number has been supplied
1083 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
1084 throw DataException("Error - Data::convertToNumArray: invalid dataPointNoInSample.");
1085 }
1086 // TODO: global error handling
1087 // create a view of the data if it is stored locally
1088 //DataArrayView dataPointView = getDataPoint(sampleNo, dataPointNoInSample);
1089 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
1090
1091 memcpy(tmpData,&(getDataAtOffset(offset)),length*sizeof(double));
1092 }
1093 }
1094 #ifdef PASO_MPI
1095 // broadcast the data to all other processes
1096 MPI_Bcast( tmpData, length, MPI_DOUBLE, procNo, get_MPIComm() );
1097 #endif
1098
1099 boost::python::tuple t=pointToTuple(dataPointShape,tmpData);
1100 delete [] tmpData;
1101 //
1102 // return the loaded array
1103 return t;
1104
1105 }
1106
1107
1108 boost::python::object
1109 Data::integrateToTuple_const() const
1110 {
1111 if (isLazy())
1112 {
1113 throw DataException("Error - cannot integrate for constant lazy data.");
1114 }
1115 return integrateWorker();
1116 }
1117
1118 boost::python::object
1119 Data::integrateToTuple()
1120 {
1121 if (isLazy())
1122 {
1123 expand();
1124 }
1125 return integrateWorker();
1126
1127 }
1128
1129
1130 boost::python::object
1131 Data::integrate_const() const
1132 {
1133 if (isLazy())
1134 {
1135 throw DataException("Error - cannot integrate for constant lazy data.");
1136 }
1137 return boost::python::numeric::array(integrateWorker());
1138 }
1139
1140 boost::python::object
1141 Data::integrate()
1142 {
1143 if (isLazy())
1144 {
1145 expand();
1146 }
1147 return boost::python::numeric::array(integrateWorker());
1148 }
1149
1150
1151
1152 boost::python::object
1153 Data::integrateWorker() const
1154 {
1155 DataTypes::ShapeType shape = getDataPointShape();
1156 int dataPointSize = getDataPointSize();
1157
1158 //
1159 // calculate the integral values
1160 vector<double> integrals(dataPointSize);
1161 vector<double> integrals_local(dataPointSize);
1162 const AbstractContinuousDomain* dom=dynamic_cast<const AbstractContinuousDomain*>(getDomain().get());
1163 if (dom==0)
1164 {
1165 throw DataException("Can not integrate over non-continuous domains.");
1166 }
1167 #ifdef PASO_MPI
1168 dom->setToIntegrals(integrals_local,*this);
1169 // Global sum: use an array instead of a vector because elements of array are guaranteed to be contiguous in memory
1170 double *tmp = new double[dataPointSize];
1171 double *tmp_local = new double[dataPointSize];
1172 for (int i=0; i<dataPointSize; i++) { tmp_local[i] = integrals_local[i]; }
1173 MPI_Allreduce( &tmp_local[0], &tmp[0], dataPointSize, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD );
1174 for (int i=0; i<dataPointSize; i++) { integrals[i] = tmp[i]; }
1175 tuple result=pointToTuple(shape,tmp);
1176 delete[] tmp;
1177 delete[] tmp_local;
1178 #else
1179 dom->setToIntegrals(integrals,*this);
1180 /* double *tmp = new double[dataPointSize];
1181 for (int i=0; i<dataPointSize; i++) { tmp[i]=integrals[i]; }*/
1182 tuple result=pointToTuple(shape,integrals);
1183 // delete tmp;
1184 #endif
1185
1186
1187 return result;
1188 }
1189
1190 Data
1191 Data::sin() const
1192 {
1193 MAKELAZYOP(SIN)
1194 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sin);
1195 }
1196
1197 Data
1198 Data::cos() const
1199 {
1200 MAKELAZYOP(COS)
1201 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cos);
1202 }
1203
1204 Data
1205 Data::tan() const
1206 {
1207 MAKELAZYOP(TAN)
1208 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tan);
1209 }
1210
1211 Data
1212 Data::asin() const
1213 {
1214 MAKELAZYOP(ASIN)
1215 return C_TensorUnaryOperation<double (*)(double)>(*this, ::asin);
1216 }
1217
1218 Data
1219 Data::acos() const
1220 {
1221 MAKELAZYOP(ACOS)
1222 return C_TensorUnaryOperation<double (*)(double)>(*this, ::acos);
1223 }
1224
1225
1226 Data
1227 Data::atan() const
1228 {
1229 MAKELAZYOP(ATAN)
1230 return C_TensorUnaryOperation<double (*)(double)>(*this, ::atan);
1231 }
1232
1233 Data
1234 Data::sinh() const
1235 {
1236 MAKELAZYOP(SINH)
1237 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sinh);
1238 }
1239
1240 Data
1241 Data::cosh() const
1242 {
1243 MAKELAZYOP(COSH)
1244 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cosh);
1245 }
1246
1247 Data
1248 Data::tanh() const
1249 {
1250 MAKELAZYOP(TANH)
1251 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tanh);
1252 }
1253
1254
1255 Data
1256 Data::erf() const
1257 {
1258 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1259 throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");
1260 #else
1261 MAKELAZYOP(ERF)
1262 return C_TensorUnaryOperation(*this, ::erf);
1263 #endif
1264 }
1265
1266 Data
1267 Data::asinh() const
1268 {
1269 MAKELAZYOP(ASINH)
1270 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1271 return C_TensorUnaryOperation(*this, escript::asinh_substitute);
1272 #else
1273 return C_TensorUnaryOperation(*this, ::asinh);
1274 #endif
1275 }
1276
1277 Data
1278 Data::acosh() const
1279 {
1280 MAKELAZYOP(ACOSH)
1281 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1282 return C_TensorUnaryOperation(*this, escript::acosh_substitute);
1283 #else
1284 return C_TensorUnaryOperation(*this, ::acosh);
1285 #endif
1286 }
1287
1288 Data
1289 Data::atanh() const
1290 {
1291 MAKELAZYOP(ATANH)
1292 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1293 return C_TensorUnaryOperation(*this, escript::atanh_substitute);
1294 #else
1295 return C_TensorUnaryOperation(*this, ::atanh);
1296 #endif
1297 }
1298
1299 Data
1300 Data::log10() const
1301 {
1302 MAKELAZYOP(LOG10)
1303 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log10);
1304 }
1305
1306 Data
1307 Data::log() const
1308 {
1309 MAKELAZYOP(LOG)
1310 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log);
1311 }
1312
1313 Data
1314 Data::sign() const
1315 {
1316 MAKELAZYOP(SIGN)
1317 return C_TensorUnaryOperation(*this, escript::fsign);
1318 }
1319
1320 Data
1321 Data::abs() const
1322 {
1323 MAKELAZYOP(ABS)
1324 return C_TensorUnaryOperation<double (*)(double)>(*this, ::fabs);
1325 }
1326
1327 Data
1328 Data::neg() const
1329 {
1330 MAKELAZYOP(NEG)
1331 return C_TensorUnaryOperation(*this, negate<double>());
1332 }
1333
1334 Data
1335 Data::pos() const
1336 {
1337 // not doing lazy check here is deliberate.
1338 // since a deep copy of lazy data should be cheap, I'll just let it happen now
1339 Data result;
1340 // perform a deep copy
1341 result.copy(*this);
1342 return result;
1343 }
1344
1345 Data
1346 Data::exp() const
1347 {
1348 MAKELAZYOP(EXP)
1349 return C_TensorUnaryOperation<double (*)(double)>(*this, ::exp);
1350 }
1351
1352 Data
1353 Data::sqrt() const
1354 {
1355 MAKELAZYOP(SQRT)
1356 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sqrt);
1357 }
1358
1359 double
1360 Data::Lsup_const() const
1361 {
1362 if (isLazy())
1363 {
1364 throw DataException("Error - cannot compute Lsup for constant lazy data.");
1365 }
1366 return LsupWorker();
1367 }
1368
1369 double
1370 Data::Lsup()
1371 {
1372 if (isLazy())
1373 {
1374 resolve();
1375 }
1376 return LsupWorker();
1377 }
1378
1379 double
1380 Data::sup_const() const
1381 {
1382 if (isLazy())
1383 {
1384 throw DataException("Error - cannot compute sup for constant lazy data.");
1385 }
1386 return supWorker();
1387 }
1388
1389 double
1390 Data::sup()
1391 {
1392 if (isLazy())
1393 {
1394 resolve();
1395 }
1396 return supWorker();
1397 }
1398
1399 double
1400 Data::inf_const() const
1401 {
1402 if (isLazy())
1403 {
1404 throw DataException("Error - cannot compute inf for constant lazy data.");
1405 }
1406 return infWorker();
1407 }
1408
1409 double
1410 Data::inf()
1411 {
1412 if (isLazy())
1413 {
1414 resolve();
1415 }
1416 return infWorker();
1417 }
1418
1419 double
1420 Data::LsupWorker() const
1421 {
1422 double localValue;
1423 //
1424 // set the initial absolute maximum value to zero
1425
1426 AbsMax abs_max_func;
1427 localValue = algorithm(abs_max_func,0);
1428 #ifdef PASO_MPI
1429 double globalValue;
1430 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1431 return globalValue;
1432 #else
1433 return localValue;
1434 #endif
1435 }
1436
1437 double
1438 Data::supWorker() const
1439 {
1440 double localValue;
1441 //
1442 // set the initial maximum value to min possible double
1443 FMax fmax_func;
1444 localValue = algorithm(fmax_func,numeric_limits<double>::max()*-1);
1445 #ifdef PASO_MPI
1446 double globalValue;
1447 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1448 return globalValue;
1449 #else
1450 return localValue;
1451 #endif
1452 }
1453
1454 double
1455 Data::infWorker() const
1456 {
1457 double localValue;
1458 //
1459 // set the initial minimum value to max possible double
1460 FMin fmin_func;
1461 localValue = algorithm(fmin_func,numeric_limits<double>::max());
1462 #ifdef PASO_MPI
1463 double globalValue;
1464 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD );
1465 return globalValue;
1466 #else
1467 return localValue;
1468 #endif
1469 }
1470
1471 /* TODO */
1472 /* global reduction */
1473 Data
1474 Data::maxval() const
1475 {
1476 if (isLazy())
1477 {
1478 Data temp(*this); // to get around the fact that you can't resolve a const Data
1479 temp.resolve();
1480 return temp.maxval();
1481 }
1482 //
1483 // set the initial maximum value to min possible double
1484 FMax fmax_func;
1485 return dp_algorithm(fmax_func,numeric_limits<double>::max()*-1);
1486 }
1487
1488 Data
1489 Data::minval() const
1490 {
1491 if (isLazy())
1492 {
1493 Data temp(*this); // to get around the fact that you can't resolve a const Data
1494 temp.resolve();
1495 return temp.minval();
1496 }
1497 //
1498 // set the initial minimum value to max possible double
1499 FMin fmin_func;
1500 return dp_algorithm(fmin_func,numeric_limits<double>::max());
1501 }
1502
1503 Data
1504 Data::swapaxes(const int axis0, const int axis1) const
1505 {
1506 if (isLazy())
1507 {
1508 Data temp(*this);
1509 temp.resolve();
1510 return temp.swapaxes(axis0,axis1);
1511 }
1512 int axis0_tmp,axis1_tmp;
1513 DataTypes::ShapeType s=getDataPointShape();
1514 DataTypes::ShapeType ev_shape;
1515 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1516 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1517 int rank=getDataPointRank();
1518 if (rank<2) {
1519 throw DataException("Error - Data::swapaxes argument must have at least rank 2.");
1520 }
1521 if (axis0<0 || axis0>rank-1) {
1522 throw DataException("Error - Data::swapaxes: axis0 must be between 0 and rank-1=" + rank-1);
1523 }
1524 if (axis1<0 || axis1>rank-1) {
1525 throw DataException("Error - Data::swapaxes: axis1 must be between 0 and rank-1=" + rank-1);
1526 }
1527 if (axis0 == axis1) {
1528 throw DataException("Error - Data::swapaxes: axis indices must be different.");
1529 }
1530 if (axis0 > axis1) {
1531 axis0_tmp=axis1;
1532 axis1_tmp=axis0;
1533 } else {
1534 axis0_tmp=axis0;
1535 axis1_tmp=axis1;
1536 }
1537 for (int i=0; i<rank; i++) {
1538 if (i == axis0_tmp) {
1539 ev_shape.push_back(s[axis1_tmp]);
1540 } else if (i == axis1_tmp) {
1541 ev_shape.push_back(s[axis0_tmp]);
1542 } else {
1543 ev_shape.push_back(s[i]);
1544 }
1545 }
1546 Data ev(0.,ev_shape,getFunctionSpace());
1547 ev.typeMatchRight(*this);
1548 m_data->swapaxes(ev.m_data.get(), axis0_tmp, axis1_tmp);
1549 return ev;
1550
1551 }
1552
1553 Data
1554 Data::symmetric() const
1555 {
1556 // check input
1557 DataTypes::ShapeType s=getDataPointShape();
1558 if (getDataPointRank()==2) {
1559 if(s[0] != s[1])
1560 throw DataException("Error - Data::symmetric can only be calculated for rank 2 object with equal first and second dimension.");
1561 }
1562 else if (getDataPointRank()==4) {
1563 if(!(s[0] == s[2] && s[1] == s[3]))
1564 throw DataException("Error - Data::symmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1565 }
1566 else {
1567 throw DataException("Error - Data::symmetric can only be calculated for rank 2 or 4 object.");
1568 }
1569 MAKELAZYOP(SYM)
1570 Data ev(0.,getDataPointShape(),getFunctionSpace());
1571 ev.typeMatchRight(*this);
1572 m_data->symmetric(ev.m_data.get());
1573 return ev;
1574 }
1575
1576 Data
1577 Data::nonsymmetric() const
1578 {
1579 MAKELAZYOP(NSYM)
1580 // check input
1581 DataTypes::ShapeType s=getDataPointShape();
1582 if (getDataPointRank()==2) {
1583 if(s[0] != s[1])
1584 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 object with equal first and second dimension.");
1585 DataTypes::ShapeType ev_shape;
1586 ev_shape.push_back(s[0]);
1587 ev_shape.push_back(s[1]);
1588 Data ev(0.,ev_shape,getFunctionSpace());
1589 ev.typeMatchRight(*this);
1590 m_data->nonsymmetric(ev.m_data.get());
1591 return ev;
1592 }
1593 else if (getDataPointRank()==4) {
1594 if(!(s[0] == s[2] && s[1] == s[3]))
1595 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1596 DataTypes::ShapeType ev_shape;
1597 ev_shape.push_back(s[0]);
1598 ev_shape.push_back(s[1]);
1599 ev_shape.push_back(s[2]);
1600 ev_shape.push_back(s[3]);
1601 Data ev(0.,ev_shape,getFunctionSpace());
1602 ev.typeMatchRight(*this);
1603 m_data->nonsymmetric(ev.m_data.get());
1604 return ev;
1605 }
1606 else {
1607 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 or 4 object.");
1608 }
1609 }
1610
1611 Data
1612 Data::trace(int axis_offset) const
1613 {
1614 MAKELAZYOPOFF(TRACE,axis_offset)
1615 if ((axis_offset<0) || (axis_offset>getDataPointRank()))
1616 {
1617 throw DataException("Error - Data::trace, axis_offset must be between 0 and rank-2 inclusive.");
1618 }
1619 DataTypes::ShapeType s=getDataPointShape();
1620 if (getDataPointRank()==2) {
1621 DataTypes::ShapeType ev_shape;
1622 Data ev(0.,ev_shape,getFunctionSpace());
1623 ev.typeMatchRight(*this);
1624 m_data->trace(ev.m_data.get(), axis_offset);
1625 return ev;
1626 }
1627 if (getDataPointRank()==3) {
1628 DataTypes::ShapeType ev_shape;
1629 if (axis_offset==0) {
1630 int s2=s[2];
1631 ev_shape.push_back(s2);
1632 }
1633 else if (axis_offset==1) {
1634 int s0=s[0];
1635 ev_shape.push_back(s0);
1636 }
1637 Data ev(0.,ev_shape,getFunctionSpace());
1638 ev.typeMatchRight(*this);
1639 m_data->trace(ev.m_data.get(), axis_offset);
1640 return ev;
1641 }
1642 if (getDataPointRank()==4) {
1643 DataTypes::ShapeType ev_shape;
1644 if (axis_offset==0) {
1645 ev_shape.push_back(s[2]);
1646 ev_shape.push_back(s[3]);
1647 }
1648 else if (axis_offset==1) {
1649 ev_shape.push_back(s[0]);
1650 ev_shape.push_back(s[3]);
1651 }
1652 else if (axis_offset==2) {
1653 ev_shape.push_back(s[0]);
1654 ev_shape.push_back(s[1]);
1655 }
1656 Data ev(0.,ev_shape,getFunctionSpace());
1657 ev.typeMatchRight(*this);
1658 m_data->trace(ev.m_data.get(), axis_offset);
1659 return ev;
1660 }
1661 else {
1662 throw DataException("Error - Data::trace can only be calculated for rank 2, 3 or 4 object.");
1663 }
1664 }
1665
1666 Data
1667 Data::transpose(int axis_offset) const
1668 {
1669 MAKELAZYOPOFF(TRANS,axis_offset)
1670 DataTypes::ShapeType s=getDataPointShape();
1671 DataTypes::ShapeType ev_shape;
1672 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1673 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1674 int rank=getDataPointRank();
1675 if (axis_offset<0 || axis_offset>rank) {
1676 throw DataException("Error - Data::transpose must have 0 <= axis_offset <= rank=" + rank);
1677 }
1678 for (int i=0; i<rank; i++) {
1679 int index = (axis_offset+i)%rank;
1680 ev_shape.push_back(s[index]); // Append to new shape
1681 }
1682 Data ev(0.,ev_shape,getFunctionSpace());
1683 ev.typeMatchRight(*this);
1684 m_data->transpose(ev.m_data.get(), axis_offset);
1685 return ev;
1686 }
1687
1688 Data
1689 Data::eigenvalues() const
1690 {
1691 if (isLazy())
1692 {
1693 Data temp(*this); // to get around the fact that you can't resolve a const Data
1694 temp.resolve();
1695 return temp.eigenvalues();
1696 }
1697 // check input
1698 DataTypes::ShapeType s=getDataPointShape();
1699 if (getDataPointRank()!=2)
1700 throw DataException("Error - Data::eigenvalues can only be calculated for rank 2 object.");
1701 if(s[0] != s[1])
1702 throw DataException("Error - Data::eigenvalues can only be calculated for object with equal first and second dimension.");
1703 // create return
1704 DataTypes::ShapeType ev_shape(1,s[0]);
1705 Data ev(0.,ev_shape,getFunctionSpace());
1706 ev.typeMatchRight(*this);
1707 m_data->eigenvalues(ev.m_data.get());
1708 return ev;
1709 }
1710
1711 const boost::python::tuple
1712 Data::eigenvalues_and_eigenvectors(const double tol) const
1713 {
1714 if (isLazy())
1715 {
1716 Data temp(*this); // to get around the fact that you can't resolve a const Data
1717 temp.resolve();
1718 return temp.eigenvalues_and_eigenvectors(tol);
1719 }
1720 DataTypes::ShapeType s=getDataPointShape();
1721 if (getDataPointRank()!=2)
1722 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for rank 2 object.");
1723 if(s[0] != s[1])
1724 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for object with equal first and second dimension.");
1725 // create return
1726 DataTypes::ShapeType ev_shape(1,s[0]);
1727 Data ev(0.,ev_shape,getFunctionSpace());
1728 ev.typeMatchRight(*this);
1729 DataTypes::ShapeType V_shape(2,s[0]);
1730 Data V(0.,V_shape,getFunctionSpace());
1731 V.typeMatchRight(*this);
1732 m_data->eigenvalues_and_eigenvectors(ev.m_data.get(),V.m_data.get(),tol);
1733 return make_tuple(boost::python::object(ev),boost::python::object(V));
1734 }
1735
1736 const boost::python::tuple
1737 Data::minGlobalDataPoint() const
1738 {
1739 // NB: calc_minGlobalDataPoint( had to be split off from minGlobalDataPoint( as boost::make_tuple causes an
1740 // abort (for unknown reasons) if there are openmp directives with it in the
1741 // surrounding function
1742
1743 int DataPointNo;
1744 int ProcNo;
1745 calc_minGlobalDataPoint(ProcNo,DataPointNo);
1746 return make_tuple(ProcNo,DataPointNo);
1747 }
1748
1749 void
1750 Data::calc_minGlobalDataPoint(int& ProcNo,
1751 int& DataPointNo) const
1752 {
1753 if (isLazy())
1754 {
1755 Data temp(*this); // to get around the fact that you can't resolve a const Data
1756 temp.resolve();
1757 return temp.calc_minGlobalDataPoint(ProcNo,DataPointNo);
1758 }
1759 int i,j;
1760 int lowi=0,lowj=0;
1761 double min=numeric_limits<double>::max();
1762
1763 Data temp=minval();
1764
1765 int numSamples=temp.getNumSamples();
1766 int numDPPSample=temp.getNumDataPointsPerSample();
1767
1768 double next,local_min;
1769 int local_lowi=0,local_lowj=0;
1770
1771 #pragma omp parallel firstprivate(local_lowi,local_lowj) private(next,local_min)
1772 {
1773 local_min=min;
1774 #pragma omp for private(i,j) schedule(static)
1775 for (i=0; i<numSamples; i++) {
1776 for (j=0; j<numDPPSample; j++) {
1777 next=temp.getDataAtOffset(temp.getDataOffset(i,j));
1778 if (next<local_min) {
1779 local_min=next;
1780 local_lowi=i;
1781 local_lowj=j;
1782 }
1783 }
1784 }
1785 #pragma omp critical
1786 if (local_min<min) {
1787 min=local_min;
1788 lowi=local_lowi;
1789 lowj=local_lowj;
1790 }
1791 }
1792
1793 #ifdef PASO_MPI
1794 // determine the processor on which the minimum occurs
1795 next = temp.getDataPoint(lowi,lowj);
1796 int lowProc = 0;
1797 double *globalMins = new double[get_MPISize()+1];
1798 int error;
1799 error = MPI_Gather ( &next, 1, MPI_DOUBLE, globalMins, 1, MPI_DOUBLE, 0, get_MPIComm() );
1800
1801 if( get_MPIRank()==0 ){
1802 next = globalMins[lowProc];
1803 for( i=1; i<get_MPISize(); i++ )
1804 if( next>globalMins[i] ){
1805 lowProc = i;
1806 next = globalMins[i];
1807 }
1808 }
1809 MPI_Bcast( &lowProc, 1, MPI_DOUBLE, 0, get_MPIComm() );
1810
1811 delete [] globalMins;
1812 ProcNo = lowProc;
1813 #else
1814 ProcNo = 0;
1815 #endif
1816 DataPointNo = lowj + lowi * numDPPSample;
1817 }
1818
1819 void
1820 Data::saveDX(std::string fileName) const
1821 {
1822 if (isEmpty())
1823 {
1824 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1825 }
1826 if (isLazy())
1827 {
1828 Data temp(*this); // to get around the fact that you can't resolve a const Data
1829 temp.resolve();
1830 temp.saveDX(fileName);
1831 return;
1832 }
1833 boost::python::dict args;
1834 args["data"]=boost::python::object(this);
1835 getDomain()->saveDX(fileName,args);
1836 return;
1837 }
1838
1839 void
1840 Data::saveVTK(std::string fileName) const
1841 {
1842 if (isEmpty())
1843 {
1844 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1845 }
1846 if (isLazy())
1847 {
1848 Data temp(*this); // to get around the fact that you can't resolve a const Data
1849 temp.resolve();
1850 temp.saveVTK(fileName);
1851 return;
1852 }
1853 boost::python::dict args;
1854 args["data"]=boost::python::object(this);
1855 getDomain()->saveVTK(fileName,args);
1856 return;
1857 }
1858
1859 Data&
1860 Data::operator+=(const Data& right)
1861 {
1862 if (isProtected()) {
1863 throw DataException("Error - attempt to update protected Data object.");
1864 }
1865 MAKELAZYBINSELF(right,ADD) // for lazy + is equivalent to +=
1866 exclusiveWrite(); // Since Lazy data does not modify its leaves we only need to worry here
1867 binaryOp(right,plus<double>());
1868 return (*this);
1869 }
1870
1871 Data&
1872 Data::operator+=(const boost::python::object& right)
1873 {
1874 if (isProtected()) {
1875 throw DataException("Error - attempt to update protected Data object.");
1876 }
1877 Data tmp(right,getFunctionSpace(),false);
1878 (*this)+=tmp;
1879 return *this;
1880 }
1881
1882 // Hmmm, operator= makes a deep copy but the copy constructor does not?
1883 Data&
1884 Data::operator=(const Data& other)
1885 {
1886 #if defined ASSIGNMENT_MEANS_DEEPCOPY
1887 // This should not be used.
1888 // Just leaving this here until I have completed transition
1889 copy(other);
1890 #else
1891 m_protected=false; // since any changes should be caught by exclusiveWrite();
1892 // m_data=other.m_data;
1893 set_m_data(other.m_data);
1894 #endif
1895 return (*this);
1896 }
1897
1898 Data&
1899 Data::operator-=(const Data& right)
1900 {
1901 if (isProtected()) {
1902 throw DataException("Error - attempt to update protected Data object.");
1903 }
1904 MAKELAZYBINSELF(right,SUB)
1905 exclusiveWrite();
1906 binaryOp(right,minus<double>());
1907 return (*this);
1908 }
1909
1910 Data&
1911 Data::operator-=(const boost::python::object& right)
1912 {
1913 if (isProtected()) {
1914 throw DataException("Error - attempt to update protected Data object.");
1915 }
1916 Data tmp(right,getFunctionSpace(),false);
1917 (*this)-=tmp;
1918 return (*this);
1919 }
1920
1921 Data&
1922 Data::operator*=(const Data& right)
1923 {
1924 if (isProtected()) {
1925 throw DataException("Error - attempt to update protected Data object.");
1926 }
1927 MAKELAZYBINSELF(right,MUL)
1928 exclusiveWrite();
1929 binaryOp(right,multiplies<double>());
1930 return (*this);
1931 }
1932
1933 Data&
1934 Data::operator*=(const boost::python::object& right)
1935 {
1936 if (isProtected()) {
1937 throw DataException("Error - attempt to update protected Data object.");
1938 }
1939 Data tmp(right,getFunctionSpace(),false);
1940 (*this)*=tmp;
1941 return (*this);
1942 }
1943
1944 Data&
1945 Data::operator/=(const Data& right)
1946 {
1947 if (isProtected()) {
1948 throw DataException("Error - attempt to update protected Data object.");
1949 }
1950 MAKELAZYBINSELF(right,DIV)
1951 exclusiveWrite();
1952 binaryOp(right,divides<double>());
1953 return (*this);
1954 }
1955
1956 Data&
1957 Data::operator/=(const boost::python::object& right)
1958 {
1959 if (isProtected()) {
1960 throw DataException("Error - attempt to update protected Data object.");
1961 }
1962 Data tmp(right,getFunctionSpace(),false);
1963 (*this)/=tmp;
1964 return (*this);
1965 }
1966
1967 Data
1968 Data::rpowO(const boost::python::object& left) const
1969 {
1970 Data left_d(left,*this);
1971 return left_d.powD(*this);
1972 }
1973
1974 Data
1975 Data::powO(const boost::python::object& right) const
1976 {
1977 Data tmp(right,getFunctionSpace(),false);
1978 return powD(tmp);
1979 }
1980
1981 Data
1982 Data::powD(const Data& right) const
1983 {
1984 MAKELAZYBIN(right,POW)
1985 return C_TensorBinaryOperation<double (*)(double, double)>(*this, right, ::pow);
1986 }
1987
1988 //
1989 // NOTE: It is essential to specify the namespace this operator belongs to
1990 Data
1991 escript::operator+(const Data& left, const Data& right)
1992 {
1993 MAKELAZYBIN2(left,right,ADD)
1994 return C_TensorBinaryOperation(left, right, plus<double>());
1995 }
1996
1997 //
1998 // NOTE: It is essential to specify the namespace this operator belongs to
1999 Data
2000 escript::operator-(const Data& left, const Data& right)
2001 {
2002 MAKELAZYBIN2(left,right,SUB)
2003 return C_TensorBinaryOperation(left, right, minus<double>());
2004 }
2005
2006 //
2007 // NOTE: It is essential to specify the namespace this operator belongs to
2008 Data
2009 escript::operator*(const Data& left, const Data& right)
2010 {
2011 MAKELAZYBIN2(left,right,MUL)
2012 return C_TensorBinaryOperation(left, right, multiplies<double>());
2013 }
2014
2015 //
2016 // NOTE: It is essential to specify the namespace this operator belongs to
2017 Data
2018 escript::operator/(const Data& left, const Data& right)
2019 {
2020 MAKELAZYBIN2(left,right,DIV)
2021 return C_TensorBinaryOperation(left, right, divides<double>());
2022 }
2023
2024 //
2025 // NOTE: It is essential to specify the namespace this operator belongs to
2026 Data
2027 escript::operator+(const Data& left, const boost::python::object& right)
2028 {
2029 Data tmp(right,left.getFunctionSpace(),false);
2030 MAKELAZYBIN2(left,tmp,ADD)
2031 return left+tmp;
2032 }
2033
2034 //
2035 // NOTE: It is essential to specify the namespace this operator belongs to
2036 Data
2037 escript::operator-(const Data& left, const boost::python::object& right)
2038 {
2039 Data tmp(right,left.getFunctionSpace(),false);
2040 MAKELAZYBIN2(left,tmp,SUB)
2041 return left-tmp;
2042 }
2043
2044 //
2045 // NOTE: It is essential to specify the namespace this operator belongs to
2046 Data
2047 escript::operator*(const Data& left, const boost::python::object& right)
2048 {
2049 Data tmp(right,left.getFunctionSpace(),false);
2050 MAKELAZYBIN2(left,tmp,MUL)
2051 return left*tmp;
2052 }
2053
2054 //
2055 // NOTE: It is essential to specify the namespace this operator belongs to
2056 Data
2057 escript::operator/(const Data& left, const boost::python::object& right)
2058 {
2059 Data tmp(right,left.getFunctionSpace(),false);
2060 MAKELAZYBIN2(left,tmp,DIV)
2061 return left/tmp;
2062 }
2063
2064 //
2065 // NOTE: It is essential to specify the namespace this operator belongs to
2066 Data
2067 escript::operator+(const boost::python::object& left, const Data& right)
2068 {
2069 Data tmp(left,right.getFunctionSpace(),false);
2070 MAKELAZYBIN2(tmp,right,ADD)
2071 return tmp+right;
2072 }
2073
2074 //
2075 // NOTE: It is essential to specify the namespace this operator belongs to
2076 Data
2077 escript::operator-(const boost::python::object& left, const Data& right)
2078 {
2079 Data tmp(left,right.getFunctionSpace(),false);
2080 MAKELAZYBIN2(tmp,right,SUB)
2081 return tmp-right;
2082 }
2083
2084 //
2085 // NOTE: It is essential to specify the namespace this operator belongs to
2086 Data
2087 escript::operator*(const boost::python::object& left, const Data& right)
2088 {
2089 Data tmp(left,right.getFunctionSpace(),false);
2090 MAKELAZYBIN2(tmp,right,MUL)
2091 return tmp*right;
2092 }
2093
2094 //
2095 // NOTE: It is essential to specify the namespace this operator belongs to
2096 Data
2097 escript::operator/(const boost::python::object& left, const Data& right)
2098 {
2099 Data tmp(left,right.getFunctionSpace(),false);
2100 MAKELAZYBIN2(tmp,right,DIV)
2101 return tmp/right;
2102 }
2103
2104
2105 /* TODO */
2106 /* global reduction */
2107 Data
2108 Data::getItem(const boost::python::object& key) const
2109 {
2110
2111 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2112
2113 if (slice_region.size()!=getDataPointRank()) {
2114 throw DataException("Error - slice size does not match Data rank.");
2115 }
2116
2117 return getSlice(slice_region);
2118 }
2119
2120 /* TODO */
2121 /* global reduction */
2122 Data
2123 Data::getSlice(const DataTypes::RegionType& region) const
2124 {
2125 return Data(*this,region);
2126 }
2127
2128 /* TODO */
2129 /* global reduction */
2130 void
2131 Data::setItemO(const boost::python::object& key,
2132 const boost::python::object& value)
2133 {
2134 Data tempData(value,getFunctionSpace());
2135 setItemD(key,tempData);
2136 }
2137
2138 void
2139 Data::setItemD(const boost::python::object& key,
2140 const Data& value)
2141 {
2142 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2143 if (slice_region.size()!=getDataPointRank()) {
2144 throw DataException("Error - slice size does not match Data rank.");
2145 }
2146 exclusiveWrite();
2147 if (getFunctionSpace()!=value.getFunctionSpace()) {
2148 setSlice(Data(value,getFunctionSpace()),slice_region);
2149 } else {
2150 setSlice(value,slice_region);
2151 }
2152 }
2153
2154 void
2155 Data::setSlice(const Data& value,
2156 const DataTypes::RegionType& region)
2157 {
2158 if (isProtected()) {
2159 throw DataException("Error - attempt to update protected Data object.");
2160 }
2161 FORCERESOLVE;
2162 /* if (isLazy())
2163 {
2164 throw DataException("Error - setSlice not permitted on lazy data.");
2165 }*/
2166 exclusiveWrite(); // In case someone finds a way to call this without going through setItemD
2167 Data tempValue(value);
2168 typeMatchLeft(tempValue);
2169 typeMatchRight(tempValue);
2170 getReady()->setSlice(tempValue.m_data.get(),region);
2171 }
2172
2173 void
2174 Data::typeMatchLeft(Data& right) const
2175 {
2176 if (right.isLazy() && !isLazy())
2177 {
2178 right.resolve();
2179 }
2180 if (isExpanded()){
2181 right.expand();
2182 } else if (isTagged()) {
2183 if (right.isConstant()) {
2184 right.tag();
2185 }
2186 }
2187 }
2188
2189 void
2190 Data::typeMatchRight(const Data& right)
2191 {
2192 if (isLazy() && !right.isLazy())
2193 {
2194 resolve();
2195 }
2196 if (isTagged()) {
2197 if (right.isExpanded()) {
2198 expand();
2199 }
2200 } else if (isConstant()) {
2201 if (right.isExpanded()) {
2202 expand();
2203 } else if (right.isTagged()) {
2204 tag();
2205 }
2206 }
2207 }
2208
2209 void
2210 Data::setTaggedValueByName(std::string name,
2211 const boost::python::object& value)
2212 {
2213 if (getFunctionSpace().getDomain()->isValidTagName(name)) {
2214 FORCERESOLVE;
2215 exclusiveWrite();
2216 int tagKey=getFunctionSpace().getDomain()->getTag(name);
2217 setTaggedValue(tagKey,value);
2218 }
2219 }
2220
2221 void
2222 Data::setTaggedValue(int tagKey,
2223 const boost::python::object& value)
2224 {
2225 if (isProtected()) {
2226 throw DataException("Error - attempt to update protected Data object.");
2227 }
2228 //
2229 // Ensure underlying data object is of type DataTagged
2230 FORCERESOLVE;
2231 exclusiveWrite();
2232 if (isConstant()) tag();
2233 WrappedArray w(value);
2234
2235 DataVector temp_data2;
2236 temp_data2.copyFromArray(w,1);
2237
2238 m_data->setTaggedValue(tagKey,w.getShape(), temp_data2);
2239 }
2240
2241
2242 void
2243 Data::setTaggedValueFromCPP(int tagKey,
2244 const DataTypes::ShapeType& pointshape,
2245 const DataTypes::ValueType& value,
2246 int dataOffset)
2247 {
2248 if (isProtected()) {
2249 throw DataException("Error - attempt to update protected Data object.");
2250 }
2251 //
2252 // Ensure underlying data object is of type DataTagged
2253 FORCERESOLVE;
2254 if (isConstant()) tag();
2255 exclusiveWrite();
2256 //
2257 // Call DataAbstract::setTaggedValue
2258 m_data->setTaggedValue(tagKey,pointshape, value, dataOffset);
2259 }
2260
2261 int
2262 Data::getTagNumber(int dpno)
2263 {
2264 if (isEmpty())
2265 {
2266 throw DataException("Error - operation not permitted on instances of DataEmpty.");
2267 }
2268 return getFunctionSpace().getTagFromDataPointNo(dpno);
2269 }
2270
2271
2272 ostream& escript::operator<<(ostream& o, const Data& data)
2273 {
2274 o << data.toString();
2275 return o;
2276 }
2277
2278 Data
2279 escript::C_GeneralTensorProduct(Data& arg_0,
2280 Data& arg_1,
2281 int axis_offset,
2282 int transpose)
2283 {
2284 // General tensor product: res(SL x SR) = arg_0(SL x SM) * arg_1(SM x SR)
2285 // SM is the product of the last axis_offset entries in arg_0.getShape().
2286
2287 // deal with any lazy data
2288 // if (arg_0.isLazy()) {arg_0.resolve();}
2289 // if (arg_1.isLazy()) {arg_1.resolve();}
2290 if (arg_0.isLazy() || arg_1.isLazy() || (AUTOLAZYON && (arg_0.isExpanded() || arg_1.isExpanded())))
2291 {
2292 DataLazy* c=new DataLazy(arg_0.borrowDataPtr(), arg_1.borrowDataPtr(), PROD, axis_offset,transpose);
2293 return Data(c);
2294 }
2295
2296 // Interpolate if necessary and find an appropriate function space
2297 Data arg_0_Z, arg_1_Z;
2298 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2299 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2300 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2301 arg_1_Z = Data(arg_1);
2302 }
2303 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2304 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2305 arg_0_Z =Data(arg_0);
2306 }
2307 else {
2308 throw DataException("Error - C_GeneralTensorProduct: arguments have incompatible function spaces.");
2309 }
2310 } else {
2311 arg_0_Z = Data(arg_0);
2312 arg_1_Z = Data(arg_1);
2313 }
2314 // Get rank and shape of inputs
2315 int rank0 = arg_0_Z.getDataPointRank();
2316 int rank1 = arg_1_Z.getDataPointRank();
2317 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
2318 const DataTypes::ShapeType& shape1 = arg_1_Z.getDataPointShape();
2319
2320 // Prepare for the loops of the product and verify compatibility of shapes
2321 int start0=0, start1=0;
2322 if (transpose == 0) {}
2323 else if (transpose == 1) { start0 = axis_offset; }
2324 else if (transpose == 2) { start1 = rank1-axis_offset; }
2325 else { throw DataException("C_GeneralTensorProduct: Error - transpose should be 0, 1 or 2"); }
2326
2327
2328 // Adjust the shapes for transpose
2329 DataTypes::ShapeType tmpShape0(rank0); // pre-sizing the vectors rather
2330 DataTypes::ShapeType tmpShape1(rank1); // than using push_back
2331 for (int i=0; i<rank0; i++) { tmpShape0[i]=shape0[(i+start0)%rank0]; }
2332 for (int i=0; i<rank1; i++) { tmpShape1[i]=shape1[(i+start1)%rank1]; }
2333
2334 #if 0
2335 // For debugging: show shape after transpose
2336 char tmp[100];
2337 std::string shapeStr;
2338 shapeStr = "(";
2339 for (int i=0; i<rank0; i++) { sprintf(tmp, "%d,", tmpShape0[i]); shapeStr += tmp; }
2340 shapeStr += ")";
2341 cout << "C_GeneralTensorProduct: Shape of arg0 is " << shapeStr << endl;
2342 shapeStr = "(";
2343 for (int i=0; i<rank1; i++) { sprintf(tmp, "%d,", tmpShape1[i]); shapeStr += tmp; }
2344 shapeStr += ")";
2345 cout << "C_GeneralTensorProduct: Shape of arg1 is " << shapeStr << endl;
2346 #endif
2347
2348 // Prepare for the loops of the product
2349 int SL=1, SM=1, SR=1;
2350 for (int i=0; i<rank0-axis_offset; i++) {
2351 SL *= tmpShape0[i];
2352 }
2353 for (int i=rank0-axis_offset; i<rank0; i++) {
2354 if (tmpShape0[i] != tmpShape1[i-(rank0-axis_offset)]) {
2355 throw DataException("C_GeneralTensorProduct: Error - incompatible shapes");
2356 }
2357 SM *= tmpShape0[i];
2358 }
2359 for (int i=axis_offset; i<rank1; i++) {
2360 SR *= tmpShape1[i];
2361 }
2362
2363 // Define the shape of the output (rank of shape is the sum of the loop ranges below)
2364 DataTypes::ShapeType shape2(rank0+rank1-2*axis_offset);
2365 { // block to limit the scope of out_index
2366 int out_index=0;
2367 for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z
2368 for (int i=axis_offset; i<rank1; i++, ++out_index) { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z
2369 }
2370
2371 if (shape2.size()>ESCRIPT_MAX_DATA_RANK)
2372 {
2373 ostringstream os;
2374 os << "C_GeneralTensorProduct: Error - Attempt to create a rank " << shape2.size() << " object. The maximum rank is " << ESCRIPT_MAX_DATA_RANK << ".";
2375 throw DataException(os.str());
2376 }
2377
2378 // Declare output Data object
2379 Data res;
2380
2381 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2382 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataConstant output
2383 double *ptr_0 = &(arg_0_Z.getDataAtOffset(0));
2384 double *ptr_1 = &(arg_1_Z.getDataAtOffset(0));
2385 double *ptr_2 = &(res.getDataAtOffset(0));
2386 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2387 }
2388 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2389
2390 // Prepare the DataConstant input
2391 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2392 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2393
2394 // Borrow DataTagged input from Data object
2395 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2396 if (tmp_1==0) { throw DataException("GTP_1 Programming error - casting to DataTagged."); }
2397
2398 // Prepare a DataTagged output 2
2399 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataTagged output
2400 res.tag();
2401 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2402 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2403
2404 // Prepare offset into DataConstant
2405 int offset_0 = tmp_0->getPointOffset(0,0);
2406 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2407 // Get the views
2408 // DataArrayView view_1 = tmp_1->getDefaultValue();
2409 // DataArrayView view_2 = tmp_2->getDefaultValue();
2410 // // Get the pointers to the actual data
2411 // double *ptr_1 = &((view_1.getData())[0]);
2412 // double *ptr_2 = &((view_2.getData())[0]);
2413
2414 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2415 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2416
2417
2418 // Compute an MVP for the default
2419 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2420 // Compute an MVP for each tag
2421 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2422 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2423 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2424 tmp_2->addTag(i->first);
2425 // DataArrayView view_1 = tmp_1->getDataPointByTag(i->first);
2426 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2427 // double *ptr_1 = &view_1.getData(0);
2428 // double *ptr_2 = &view_2.getData(0);
2429
2430 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2431 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2432
2433 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2434 }
2435
2436 }
2437 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2438
2439 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2440 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2441 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2442 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2443 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2444 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2445 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2446 int sampleNo_1,dataPointNo_1;
2447 int numSamples_1 = arg_1_Z.getNumSamples();
2448 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2449 int offset_0 = tmp_0->getPointOffset(0,0);
2450 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2451 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2452 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2453 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2454 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2455 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2456 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2457 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2458 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2459 }
2460 }
2461
2462 }
2463 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2464
2465 // Borrow DataTagged input from Data object
2466 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2467 if (tmp_0==0) { throw DataException("GTP_0 Programming error - casting to DataTagged."); }
2468
2469 // Prepare the DataConstant input
2470 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2471 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2472
2473 // Prepare a DataTagged output 2
2474 res = Data(0.0, shape2, arg_0_Z.getFunctionSpace()); // DataTagged output
2475 res.tag();
2476 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2477 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2478
2479 // Prepare offset into DataConstant
2480 int offset_1 = tmp_1->getPointOffset(0,0);
2481 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2482 // Get the views
2483 // DataArrayView view_0 = tmp_0->getDefaultValue();
2484 // DataArrayView view_2 = tmp_2->getDefaultValue();
2485 // // Get the pointers to the actual data
2486 // double *ptr_0 = &((view_0.getData())[0]);
2487 // double *ptr_2 = &((view_2.getData())[0]);
2488
2489 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2490 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2491
2492 // Compute an MVP for the default
2493 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2494 // Compute an MVP for each tag
2495 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2496 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2497 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2498 // tmp_2->addTaggedValue(i->first,tmp_2->getDefaultValue());
2499 // DataArrayView view_0 = tmp_0->getDataPointByTag(i->first);
2500 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2501 // double *ptr_0 = &view_0.getData(0);
2502 // double *ptr_2 = &view_2.getData(0);
2503
2504 tmp_2->addTag(i->first);
2505 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2506 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2507 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2508 }
2509
2510 }
2511 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2512
2513 // Borrow DataTagged input from Data object
2514 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2515 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2516
2517 // Borrow DataTagged input from Data object
2518 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2519 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2520
2521 // Prepare a DataTagged output 2
2522 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace());
2523 res.tag(); // DataTagged output
2524 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2525 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2526
2527 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2528 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2529 double *ptr_2 = &(tmp_2->getDefaultValue(0));
2530
2531
2532 // Compute an MVP for the default
2533 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2534 // Merge the tags
2535 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2536 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2537 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2538 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2539 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2540 }
2541 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2542 tmp_2->addTag(i->first);
2543 }
2544 // Compute an MVP for each tag
2545 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2546 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2547 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2548 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2549 double *ptr_2 = &(tmp_2->getDataByTag(i->first,0));
2550
2551 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2552 }
2553
2554 }
2555 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2556
2557 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2558 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2559 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2560 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2561 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2562 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2563 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2564 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2565 int sampleNo_0,dataPointNo_0;
2566 int numSamples_0 = arg_0_Z.getNumSamples();
2567 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2568 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2569 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2570 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2571 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2572 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2573 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2574 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2575 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2576 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2577 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2578 }
2579 }
2580
2581 }
2582 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2583
2584 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2585 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2586 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2587 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2588 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2589 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2590 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2591 int sampleNo_0,dataPointNo_0;
2592 int numSamples_0 = arg_0_Z.getNumSamples();
2593 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2594 int offset_1 = tmp_1->getPointOffset(0,0);
2595 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2596 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2597 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2598 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2599 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2600 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2601 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2602 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2603 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2604 }
2605 }
2606
2607
2608 }
2609 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2610
2611 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2612 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2613 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2614 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2615 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2616 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2617 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2618 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2619 int sampleNo_0,dataPointNo_0;
2620 int numSamples_0 = arg_0_Z.getNumSamples();
2621 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2622 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2623 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2624 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2625 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2626 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2627 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2628 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2629 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2630 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2631 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2632 }
2633 }
2634
2635 }
2636 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2637
2638 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2639 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2640 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2641 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2642 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2643 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2644 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2645 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2646 int sampleNo_0,dataPointNo_0;
2647 int numSamples_0 = arg_0_Z.getNumSamples();
2648 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2649 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2650 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2651 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2652 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2653 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2654 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2655 double *ptr_0 = &(arg_0_Z.getDataAtOffset(offset_0));
2656 double *ptr_1 = &(arg_1_Z.getDataAtOffset(offset_1));
2657 double *ptr_2 = &(res.getDataAtOffset(offset_2));
2658 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2659 }
2660 }
2661
2662 }
2663 else {
2664 throw DataException("Error - C_GeneralTensorProduct: unknown combination of inputs");
2665 }
2666
2667 return res;
2668 }
2669
2670 DataAbstract*
2671 Data::borrowData() const
2672 {
2673 return m_data.get();
2674 }
2675
2676 // Not all that happy about returning a non-const from a const
2677 DataAbstract_ptr
2678 Data::borrowDataPtr() const
2679 {
2680 return m_data;
2681 }
2682
2683 // Not all that happy about returning a non-const from a const
2684 DataReady_ptr
2685 Data::borrowReadyPtr() const
2686 {
2687 DataReady_ptr dr=dynamic_pointer_cast<DataReady>(m_data);
2688 EsysAssert((dr!=0), "Error - casting to DataReady.");
2689 return dr;
2690 }
2691
2692 std::string
2693 Data::toString() const
2694 {
2695 if (!m_data->isEmpty() &&
2696 !m_data->isLazy() &&
2697 getLength()>escriptParams.getInt("TOO_MANY_LINES"))
2698 {
2699 stringstream temp;
2700 temp << "Summary: inf="<< inf_const() << " sup=" << sup_const() << " data points=" << getNumDataPoints();
2701 return temp.str();
2702 }
2703 return m_data->toString();
2704 }
2705
2706
2707
2708 DataTypes::ValueType::const_reference
2709 Data::getDataAtOffset(DataTypes::ValueType::size_type i) const
2710 {
2711 if (isLazy())
2712 {
2713 throw DataException("Programmer error - getDataAtOffset not permitted on lazy data (object is const which prevents resolving).");
2714 }
2715 return getReady()->getDataAtOffset(i);
2716 }
2717
2718
2719 DataTypes::ValueType::reference
2720 Data::getDataAtOffset(DataTypes::ValueType::size_type i)
2721 {
2722 FORCERESOLVE;
2723 exclusiveWrite();
2724 return getReady()->getDataAtOffset(i);
2725 }
2726
2727 DataTypes::ValueType::const_reference
2728 Data::getDataPoint(int sampleNo, int dataPointNo) const
2729 {
2730 if (!isReady())
2731 {
2732 throw DataException("Programmer error - getDataPoint() not permitted on Lazy Data (object is const which prevents resolving).");
2733 }
2734 else
2735 {
2736 const DataReady* dr=getReady();
2737 return dr->getDataAtOffset(dr->getPointOffset(sampleNo, dataPointNo));
2738 }
2739 }
2740
2741
2742 DataTypes::ValueType::reference
2743 Data::getDataPoint(int sampleNo, int dataPointNo)
2744 {
2745 FORCERESOLVE;
2746 if (!isReady())
2747 {
2748 throw DataException("Programmer error - getDataPoint() not permitted on Lazy Data.");
2749 }
2750 else
2751 {
2752 exclusiveWrite();
2753 DataReady* dr=getReady();
2754 return dr->getDataAtOffset(dr->getPointOffset(sampleNo, dataPointNo));
2755 }
2756 }
2757
2758 DataTypes::ValueType*
2759 Data::allocSampleBuffer() const
2760 {
2761 if (isLazy())
2762 {
2763 return new DataTypes::ValueType(getSampleBufferSize());
2764 }
2765 else
2766 {
2767 return NULL;
2768 }
2769 }
2770
2771 void
2772 Data::freeSampleBuffer(DataTypes::ValueType* buffer)
2773 {
2774 if (buffer!=0)
2775 {
2776 delete buffer;
2777 }
2778 }
2779
2780
2781 /* Member functions specific to the MPI implementation */
2782
2783 void
2784 Data::print()
2785 {
2786 int i,j;
2787
2788 printf( "Data is %dX%d\n", getNumSamples(), getNumDataPointsPerSample() );
2789 for( i=0; i<getNumSamples(); i++ )
2790 {
2791 printf( "[%6d]", i );
2792 for( j=0; j<getNumDataPointsPerSample(); j++ )
2793 printf( "\t%10.7g", (getSampleDataRW(i))[j] ); // doesn't really need RW access
2794 printf( "\n" );
2795 }
2796 }
2797 void
2798 Data::dump(const std::string fileName) const
2799 {
2800 try
2801 {
2802 if (isLazy())
2803 {
2804 Data temp(*this); // this is to get a non-const object which we can resolve
2805 temp.resolve();
2806 temp.dump(fileName);
2807 }
2808 else
2809 {
2810 return m_data->dump(fileName);
2811 }
2812 }
2813 catch (std::exception& e)
2814 {
2815 cout << e.what() << endl;
2816 }
2817 }
2818
2819 int
2820 Data::get_MPISize() const
2821 {
2822 int size;
2823 #ifdef PASO_MPI
2824 int error;
2825 error = MPI_Comm_size( get_MPIComm(), &size );
2826 #else
2827 size = 1;
2828 #endif
2829 return size;
2830 }
2831
2832 int
2833 Data::get_MPIRank() const
2834 {
2835 int rank;
2836 #ifdef PASO_MPI
2837 int error;
2838 error = MPI_Comm_rank( get_MPIComm(), &rank );
2839 #else
2840 rank = 0;
2841 #endif
2842 return rank;
2843 }
2844
2845 MPI_Comm
2846 Data::get_MPIComm() const
2847 {
2848 #ifdef PASO_MPI
2849 return MPI_COMM_WORLD;
2850 #else
2851 return -1;
2852 #endif
2853 }
2854
2855

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26