/[escript]/trunk/escript/src/Data.cpp
ViewVC logotype

Contents of /trunk/escript/src/Data.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2282 - (show annotations)
Thu Feb 19 06:30:19 2009 UTC (10 years, 9 months ago) by jfenwick
File size: 81407 byte(s)
setTaggedValueByName now throws if the tag name does not exist.
The documentation says this is what happens - now the code matches the doco.
This resolves issue 238
1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2008 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 #include "Data.h"
16
17 #include "DataExpanded.h"
18 #include "DataConstant.h"
19 #include "DataTagged.h"
20 #include "DataEmpty.h"
21 #include "DataLazy.h"
22 #include "FunctionSpaceFactory.h"
23 #include "AbstractContinuousDomain.h"
24 #include "UnaryFuncs.h"
25 #include "FunctionSpaceException.h"
26 #include "EscriptParams.h"
27
28 extern "C" {
29 #include "esysUtils/blocktimer.h"
30 }
31
32 #include <fstream>
33 #include <algorithm>
34 #include <vector>
35 #include <functional>
36 #include <sstream> // so we can throw messages about ranks
37
38 #include <boost/python/dict.hpp>
39 #include <boost/python/extract.hpp>
40 #include <boost/python/long.hpp>
41 #include "WrappedArray.h"
42
43 using namespace std;
44 using namespace boost::python;
45 using namespace boost;
46 using namespace escript;
47
48 // ensure the current object is not a DataLazy
49 // The idea was that we could add an optional warning whenever a resolve is forced
50 // #define forceResolve() if (isLazy()) {#resolve();}
51
52 #define AUTOLAZYON escriptParams.getAUTOLAZY()
53 #define MAKELAZYOP(X) if (isLazy() || (AUTOLAZYON && m_data->isExpanded())) \
54 {\
55 DataLazy* c=new DataLazy(borrowDataPtr(),X);\
56 return Data(c);\
57 }
58 #define MAKELAZYOPOFF(X,Y) if (isLazy() || (AUTOLAZYON && m_data->isExpanded())) \
59 {\
60 DataLazy* c=new DataLazy(borrowDataPtr(),X,Y);\
61 return Data(c);\
62 }
63
64 #define MAKELAZYBINSELF(R,X) if (isLazy() || R.isLazy() || (AUTOLAZYON && (isExpanded() || R.isExpanded()))) \
65 {\
66 DataLazy* c=new DataLazy(m_data,R.borrowDataPtr(),X);\
67 /* m_data=c->getPtr();*/ set_m_data(c->getPtr());\
68 return (*this);\
69 }
70
71 // like the above but returns a new data rather than *this
72 #define MAKELAZYBIN(R,X) if (isLazy() || R.isLazy() || (AUTOLAZYON && (isExpanded() || R.isExpanded()))) \
73 {\
74 DataLazy* c=new DataLazy(m_data,R.borrowDataPtr(),X);\
75 return Data(c);\
76 }
77
78 #define MAKELAZYBIN2(L,R,X) if (L.isLazy() || R.isLazy() || (AUTOLAZYON && (L.isExpanded() || R.isExpanded()))) \
79 {\
80 DataLazy* c=new DataLazy(L.borrowDataPtr(),R.borrowDataPtr(),X);\
81 return Data(c);\
82 }
83
84 // Do not use the following unless you want to make copies on assignment rather than
85 // share data. CopyOnWrite should make this unnescessary.
86 // #define ASSIGNMENT_MEANS_DEEPCOPY
87
88 namespace
89 {
90
91 // This should be safer once the DataC RO changes have been brought in
92 template <class ARR>
93 boost::python::tuple
94 pointToTuple( const DataTypes::ShapeType& shape,ARR v)
95 {
96 using namespace boost::python;
97 using boost::python::list;
98 int rank=shape.size();
99 if (rank==0)
100 {
101 return make_tuple(v[0]);
102 }
103 else if (rank==1)
104 {
105 list l;
106 for (size_t i=0;i<shape[0];++i)
107 {
108 l.append(v[i]);
109 }
110 return tuple(l);
111 }
112 else if (rank==2)
113 {
114 list lj;
115 for (size_t j=0;j<shape[0];++j)
116 {
117 list li;
118 for (size_t i=0;i<shape[1];++i)
119 {
120 li.append(v[DataTypes::getRelIndex(shape,j,i)]);
121 }
122 lj.append(tuple(li));
123 }
124 return tuple(lj);
125 }
126 else if (rank==3)
127 {
128 list lk;
129 for (size_t k=0;k<shape[0];++k)
130 {
131 list lj;
132 for (size_t j=0;j<shape[1];++j)
133 {
134 list li;
135 for (size_t i=0;i<shape[2];++i)
136 {
137 li.append(v[DataTypes::getRelIndex(shape,k,j,i)]);
138 }
139 lj.append(tuple(li));
140 }
141 lk.append(tuple(lj));
142 }
143 return tuple(lk);
144 }
145 else if (rank==4)
146 {
147 list ll;
148 for (size_t l=0;l<shape[0];++l)
149 {
150 list lk;
151 for (size_t k=0;k<shape[1];++k)
152 {
153 list lj;
154 for (size_t j=0;j<shape[2];++j)
155 {
156 list li;
157 for (size_t i=0;i<shape[3];++i)
158 {
159 li.append(v[DataTypes::getRelIndex(shape,l,k,j,i)]);
160 }
161 lj.append(tuple(li));
162 }
163 lk.append(tuple(lj));
164 }
165 ll.append(tuple(lk));
166 }
167 return tuple(ll);
168 }
169 else
170 throw DataException("Unknown rank in pointToTuple.");
171 }
172
173 } // anonymous namespace
174
175 Data::Data()
176 : m_shared(false), m_lazy(false)
177 {
178 //
179 // Default data is type DataEmpty
180 DataAbstract* temp=new DataEmpty();
181 // m_data=temp->getPtr();
182 set_m_data(temp->getPtr());
183 m_protected=false;
184 }
185
186 Data::Data(double value,
187 const tuple& shape,
188 const FunctionSpace& what,
189 bool expanded)
190 : m_shared(false), m_lazy(false)
191 {
192 DataTypes::ShapeType dataPointShape;
193 for (int i = 0; i < shape.attr("__len__")(); ++i) {
194 dataPointShape.push_back(extract<const int>(shape[i]));
195 }
196
197 int len = DataTypes::noValues(dataPointShape);
198 DataVector temp_data(len,value,len);
199 initialise(temp_data, dataPointShape, what, expanded);
200 m_protected=false;
201 }
202
203 Data::Data(double value,
204 const DataTypes::ShapeType& dataPointShape,
205 const FunctionSpace& what,
206 bool expanded)
207 : m_shared(false), m_lazy(false)
208 {
209 int len = DataTypes::noValues(dataPointShape);
210
211 DataVector temp_data(len,value,len);
212 // DataArrayView temp_dataView(temp_data, dataPointShape);
213
214 // initialise(temp_dataView, what, expanded);
215 initialise(temp_data, dataPointShape, what, expanded);
216
217 m_protected=false;
218 }
219
220 Data::Data(const Data& inData)
221 : m_shared(false), m_lazy(false)
222 {
223 // m_data=inData.m_data;
224 set_m_data(inData.m_data);
225 m_protected=inData.isProtected();
226 }
227
228
229 Data::Data(const Data& inData,
230 const DataTypes::RegionType& region)
231 : m_shared(false), m_lazy(false)
232 {
233 DataAbstract_ptr dat=inData.m_data;
234 if (inData.isLazy())
235 {
236 dat=inData.m_data->resolve();
237 }
238 else
239 {
240 dat=inData.m_data;
241 }
242 //
243 // Create Data which is a slice of another Data
244 DataAbstract* tmp = dat->getSlice(region);
245 // m_data=DataAbstract_ptr(tmp);
246 set_m_data(DataAbstract_ptr(tmp));
247 m_protected=false;
248
249 }
250
251 Data::Data(const Data& inData,
252 const FunctionSpace& functionspace)
253 : m_shared(false), m_lazy(false)
254 {
255 if (inData.isEmpty())
256 {
257 throw DataException("Error - will not interpolate for instances of DataEmpty.");
258 }
259 if (inData.getFunctionSpace()==functionspace) {
260 // m_data=inData.m_data;
261 set_m_data(inData.m_data);
262 }
263 else
264 {
265
266 if (inData.isConstant()) { // for a constant function, we just need to use the new function space
267 if (!inData.probeInterpolation(functionspace))
268 { // Even though this is constant, we still need to check whether interpolation is allowed
269 throw FunctionSpaceException("Cannot interpolate across to the domain of the specified FunctionSpace. (DataConstant)");
270 }
271 // if the data is not lazy, this will just be a cast to DataReady
272 DataReady_ptr dr=inData.m_data->resolve();
273 DataConstant* dc=new DataConstant(functionspace,inData.m_data->getShape(),dr->getVectorRO());
274 // m_data=DataAbstract_ptr(dc);
275 set_m_data(DataAbstract_ptr(dc));
276 } else {
277 Data tmp(0,inData.getDataPointShape(),functionspace,true);
278 // Note: Must use a reference or pointer to a derived object
279 // in order to get polymorphic behaviour. Shouldn't really
280 // be able to create an instance of AbstractDomain but that was done
281 // as a boost:python work around which may no longer be required.
282 /*const AbstractDomain& inDataDomain=inData.getDomain();*/
283 const_Domain_ptr inDataDomain=inData.getDomain();
284 if (inDataDomain==functionspace.getDomain()) {
285 inDataDomain->interpolateOnDomain(tmp,inData);
286 } else {
287 inDataDomain->interpolateACross(tmp,inData);
288 }
289 // m_data=tmp.m_data;
290 set_m_data(tmp.m_data);
291 }
292 }
293 m_protected=false;
294 }
295
296 Data::Data(DataAbstract* underlyingdata)
297 : m_shared(false), m_lazy(false)
298 {
299 set_m_data(underlyingdata->getPtr());
300 m_protected=false;
301 }
302
303 Data::Data(DataAbstract_ptr underlyingdata)
304 : m_shared(false), m_lazy(false)
305 {
306 set_m_data(underlyingdata);
307 m_protected=false;
308 }
309
310 Data::Data(const DataTypes::ValueType& value,
311 const DataTypes::ShapeType& shape,
312 const FunctionSpace& what,
313 bool expanded)
314 : m_shared(false), m_lazy(false)
315 {
316 initialise(value,shape,what,expanded);
317 m_protected=false;
318 }
319
320
321 Data::Data(const object& value,
322 const FunctionSpace& what,
323 bool expanded)
324 : m_shared(false), m_lazy(false)
325 {
326 //numeric::array asNumArray(value);
327 WrappedArray w(value);
328 initialise(w,what,expanded);
329 m_protected=false;
330 }
331
332
333 Data::Data(const object& value,
334 const Data& other)
335 : m_shared(false), m_lazy(false)
336 {
337 WrappedArray w(value);
338
339 // extract the shape of the numarray
340 const DataTypes::ShapeType& tempShape=w.getShape();
341 if (w.getRank()==0) {
342
343
344 // get the space for the data vector
345 int len1 = DataTypes::noValues(tempShape);
346 DataVector temp_data(len1, 0.0, len1);
347 temp_data.copyFromArray(w,1);
348
349 int len = DataTypes::noValues(other.getDataPointShape());
350
351 DataVector temp2_data(len, temp_data[0], len);
352 DataConstant* t=new DataConstant(other.getFunctionSpace(),other.getDataPointShape(),temp2_data);
353 // m_data=DataAbstract_ptr(t);
354 set_m_data(DataAbstract_ptr(t));
355
356 } else {
357 //
358 // Create a DataConstant with the same sample shape as other
359 DataConstant* t=new DataConstant(w,other.getFunctionSpace());
360 // m_data=DataAbstract_ptr(t);
361 set_m_data(DataAbstract_ptr(t));
362 }
363 m_protected=false;
364 }
365
366 Data::~Data()
367 {
368 set_m_data(DataAbstract_ptr());
369 }
370
371
372 // only call in thread safe contexts.
373 // This method should be atomic
374 void Data::set_m_data(DataAbstract_ptr p)
375 {
376 if (m_data.get()!=0) // release old ownership
377 {
378 m_data->removeOwner(this);
379 }
380 if (p.get()!=0)
381 {
382 m_data=p;
383 m_data->addOwner(this);
384 m_shared=m_data->isShared();
385 m_lazy=m_data->isLazy();
386 }
387 }
388
389 void Data::initialise(const WrappedArray& value,
390 const FunctionSpace& what,
391 bool expanded)
392 {
393 //
394 // Construct a Data object of the appropriate type.
395 // Construct the object first as there seems to be a bug which causes
396 // undefined behaviour if an exception is thrown during construction
397 // within the shared_ptr constructor.
398 if (expanded) {
399 DataAbstract* temp=new DataExpanded(value, what);
400 // m_data=temp->getPtr();
401 set_m_data(temp->getPtr());
402 } else {
403 DataAbstract* temp=new DataConstant(value, what);
404 // m_data=temp->getPtr();
405 set_m_data(temp->getPtr());
406 }
407 }
408
409
410 void
411 Data::initialise(const DataTypes::ValueType& value,
412 const DataTypes::ShapeType& shape,
413 const FunctionSpace& what,
414 bool expanded)
415 {
416 //
417 // Construct a Data object of the appropriate type.
418 // Construct the object first as there seems to be a bug which causes
419 // undefined behaviour if an exception is thrown during construction
420 // within the shared_ptr constructor.
421 if (expanded) {
422 DataAbstract* temp=new DataExpanded(what, shape, value);
423 // m_data=temp->getPtr();
424 set_m_data(temp->getPtr());
425 } else {
426 DataAbstract* temp=new DataConstant(what, shape, value);
427 // m_data=temp->getPtr();
428 set_m_data(temp->getPtr());
429 }
430 }
431
432
433 escriptDataC
434 Data::getDataC()
435 {
436 escriptDataC temp;
437 temp.m_dataPtr=(void*)this;
438 return temp;
439 }
440
441 escriptDataC
442 Data::getDataC() const
443 {
444 escriptDataC temp;
445 temp.m_dataPtr=(void*)this;
446 return temp;
447 }
448
449 size_t
450 Data::getSampleBufferSize() const
451 {
452 return m_data->getSampleBufferSize();
453 }
454
455
456 const boost::python::tuple
457 Data::getShapeTuple() const
458 {
459 const DataTypes::ShapeType& shape=getDataPointShape();
460 switch(getDataPointRank()) {
461 case 0:
462 return make_tuple();
463 case 1:
464 return make_tuple(long_(shape[0]));
465 case 2:
466 return make_tuple(long_(shape[0]),long_(shape[1]));
467 case 3:
468 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]));
469 case 4:
470 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]),long_(shape[3]));
471 default:
472 throw DataException("Error - illegal Data rank.");
473 }
474 }
475
476
477 // The different name is needed because boost has trouble with overloaded functions.
478 // It can't work out what type the function is based soley on its name.
479 // There are ways to fix this involving creating function pointer variables for each form
480 // but there doesn't seem to be a need given that the methods have the same name from the python point of view
481 Data
482 Data::copySelf()
483 {
484 DataAbstract* temp=m_data->deepCopy();
485 return Data(temp);
486 }
487
488 void
489 Data::copy(const Data& other)
490 {
491 DataAbstract* temp=other.m_data->deepCopy();
492 DataAbstract_ptr p=temp->getPtr();
493 // m_data=p;
494 set_m_data(p);
495 }
496
497
498 Data
499 Data::delay()
500 {
501 DataLazy* dl=new DataLazy(m_data);
502 return Data(dl);
503 }
504
505 void
506 Data::delaySelf()
507 {
508 if (!isLazy())
509 {
510 // m_data=(new DataLazy(m_data))->getPtr();
511 set_m_data((new DataLazy(m_data))->getPtr());
512 }
513 }
514
515
516 // For lazy data, it would seem that DataTagged will need to be treated differently since even after setting all tags
517 // to zero, all the tags from all the DataTags would be in the result.
518 // However since they all have the same value (0) whether they are there or not should not matter.
519 // So I have decided that for all types this method will create a constant 0.
520 // It can be promoted up as required.
521 // A possible efficiency concern might be expanded->constant->expanded which has an extra memory management
522 // but we can deal with that if it arises.
523 //
524 void
525 Data::setToZero()
526 {
527 if (isEmpty())
528 {
529 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
530 }
531 if (isLazy())
532 {
533 DataTypes::ValueType v(getNoValues(),0);
534 DataConstant* dc=new DataConstant(getFunctionSpace(),getDataPointShape(),v);
535 DataLazy* dl=new DataLazy(dc->getPtr());
536 set_m_data(dl->getPtr());
537 }
538 else
539 {
540 exclusiveWrite();
541 m_data->setToZero();
542 }
543 }
544
545
546 void
547 Data::copyWithMask(const Data& other,
548 const Data& mask)
549 {
550 // 1. Interpolate if required so all Datas use the same FS as this
551 // 2. Tag or Expand so that all Data's are the same type
552 // 3. Iterate over the data vectors copying values where mask is >0
553 if (other.isEmpty() || mask.isEmpty())
554 {
555 throw DataException("Error - copyWithMask not permitted using instances of DataEmpty.");
556 }
557 Data other2(other);
558 Data mask2(mask);
559 other2.resolve();
560 mask2.resolve();
561 this->resolve();
562 FunctionSpace myFS=getFunctionSpace();
563 FunctionSpace oFS=other2.getFunctionSpace();
564 FunctionSpace mFS=mask2.getFunctionSpace();
565 if (oFS!=myFS)
566 {
567 if (other2.probeInterpolation(myFS))
568 {
569 other2=other2.interpolate(myFS);
570 }
571 else
572 {
573 throw DataException("Error - copyWithMask: other FunctionSpace is not compatible with this one.");
574 }
575 }
576 if (mFS!=myFS)
577 {
578 if (mask2.probeInterpolation(myFS))
579 {
580 mask2=mask2.interpolate(myFS);
581 }
582 else
583 {
584 throw DataException("Error - copyWithMask: mask FunctionSpace is not compatible with this one.");
585 }
586 }
587 // Ensure that all args have the same type
588 if (this->isExpanded() || mask2.isExpanded() || other2.isExpanded())
589 {
590 this->expand();
591 other2.expand();
592 mask2.expand();
593 }
594 else if (this->isTagged() || mask2.isTagged() || other2.isTagged())
595 {
596 this->tag();
597 other2.tag();
598 mask2.tag();
599 }
600 else if (this->isConstant() && mask2.isConstant() && other2.isConstant())
601 {
602 }
603 else
604 {
605 throw DataException("Error - Unknown DataAbstract passed to copyWithMask.");
606 }
607 unsigned int selfrank=getDataPointRank();
608 unsigned int otherrank=other2.getDataPointRank();
609 unsigned int maskrank=mask2.getDataPointRank();
610 if ((selfrank==0) && (otherrank>0 || maskrank>0))
611 {
612 // to get here we must be copying from a large object into a scalar
613 // I am not allowing this.
614 // If you are calling copyWithMask then you are considering keeping some existing values
615 // and so I'm going to assume that you don't want your data objects getting a new shape.
616 throw DataException("Attempt to copyWithMask into a scalar from an object or mask with rank>0.");
617 }
618 exclusiveWrite();
619 // Now we iterate over the elements
620 DataVector& self=getReady()->getVectorRW();;
621 const DataVector& ovec=other2.getReadyPtr()->getVectorRO();
622 const DataVector& mvec=mask2.getReadyPtr()->getVectorRO();
623
624 if ((selfrank>0) && (otherrank==0) &&(maskrank==0))
625 {
626 // Not allowing this combination.
627 // it is not clear what the rank of the target should be.
628 // Should it be filled with the scalar (rank stays the same);
629 // or should the target object be reshaped to be a scalar as well.
630 throw DataException("Attempt to copyWithMask from scalar mask and data into non-scalar target.");
631 }
632 if ((selfrank>0) && (otherrank>0) &&(maskrank==0))
633 {
634 if (mvec[0]>0) // copy whole object if scalar is >0
635 {
636 copy(other);
637 }
638 return;
639 }
640 if (isTagged()) // so all objects involved will also be tagged
641 {
642 // note the !
643 if (!((getDataPointShape()==mask2.getDataPointShape()) &&
644 ((other2.getDataPointShape()==mask2.getDataPointShape()) || (otherrank==0))))
645 {
646 throw DataException("copyWithMask, shape mismatch.");
647 }
648
649 // We need to consider the possibility that tags are missing or in the wrong order
650 // My guiding assumption here is: All tagged Datas are assumed to have the default value for
651 // all tags which are not explicitly defined
652
653 const DataTagged* mptr=dynamic_cast<const DataTagged*>(mask2.m_data.get());
654 const DataTagged* optr=dynamic_cast<const DataTagged*>(other2.m_data.get());
655 DataTagged* tptr=dynamic_cast<DataTagged*>(m_data.get());
656
657 // first, add any tags missing from other or mask
658 const DataTagged::DataMapType& olookup=optr->getTagLookup();
659 const DataTagged::DataMapType& mlookup=mptr->getTagLookup();
660 const DataTagged::DataMapType& tlookup=tptr->getTagLookup();
661 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
662 for (i=olookup.begin();i!=olookup.end();i++)
663 {
664 tptr->addTag(i->first);
665 }
666 for (i=mlookup.begin();i!=mlookup.end();i++) {
667 tptr->addTag(i->first);
668 }
669 // now we know that *this has all the required tags but they aren't guaranteed to be in
670 // the same order
671
672 // There are two possibilities: 1. all objects have the same rank. 2. other is a scalar
673 if ((selfrank==otherrank) && (otherrank==maskrank))
674 {
675 for (i=tlookup.begin();i!=tlookup.end();i++)
676 {
677 // get the target offset
678 DataTypes::ValueType::size_type toff=tptr->getOffsetForTag(i->first);
679 DataTypes::ValueType::size_type moff=mptr->getOffsetForTag(i->first);
680 DataTypes::ValueType::size_type ooff=optr->getOffsetForTag(i->first);
681 for (int j=0;j<getDataPointSize();++j)
682 {
683 if (mvec[j+moff]>0)
684 {
685 self[j+toff]=ovec[j+ooff];
686 }
687 }
688 }
689 // now for the default value
690 for (int j=0;j<getDataPointSize();++j)
691 {
692 if (mvec[j+mptr->getDefaultOffset()]>0)
693 {
694 self[j+tptr->getDefaultOffset()]=ovec[j+optr->getDefaultOffset()];
695 }
696 }
697 }
698 else // other is a scalar
699 {
700 for (i=tlookup.begin();i!=tlookup.end();i++)
701 {
702 // get the target offset
703 DataTypes::ValueType::size_type toff=tptr->getOffsetForTag(i->first);
704 DataTypes::ValueType::size_type moff=mptr->getOffsetForTag(i->first);
705 DataTypes::ValueType::size_type ooff=optr->getOffsetForTag(i->first);
706 for (int j=0;j<getDataPointSize();++j)
707 {
708 if (mvec[j+moff]>0)
709 {
710 self[j+toff]=ovec[ooff];
711 }
712 }
713 }
714 // now for the default value
715 for (int j=0;j<getDataPointSize();++j)
716 {
717 if (mvec[j+mptr->getDefaultOffset()]>0)
718 {
719 self[j+tptr->getDefaultOffset()]=ovec[0];
720 }
721 }
722 }
723
724 return; // ugly
725 }
726 // mixed scalar and non-scalar operation
727 if ((selfrank>0) && (otherrank==0) && (mask2.getDataPointShape()==getDataPointShape()))
728 {
729 size_t num_points=self.size();
730 // OPENMP 3.0 allows unsigned loop vars.
731 #if defined(_OPENMP) && (_OPENMP < 200805)
732 long i;
733 #else
734 size_t i;
735 #endif
736 size_t psize=getDataPointSize();
737 #pragma omp parallel for private(i) schedule(static)
738 for (i=0;i<num_points;++i)
739 {
740 if (mvec[i]>0)
741 {
742 self[i]=ovec[i/psize]; // since this is expanded there is one scalar
743 } // dest point
744 }
745 return; // ugly!
746 }
747 // tagged data is already taken care of so we only need to worry about shapes
748 // special cases with scalars are already dealt with so all we need to worry about is shape
749 if ((getDataPointShape()!=other2.getDataPointShape()) || getDataPointShape()!=mask2.getDataPointShape())
750 {
751 ostringstream oss;
752 oss <<"Error - size mismatch in arguments to copyWithMask.";
753 oss << "\nself_shape=" << DataTypes::shapeToString(getDataPointShape());
754 oss << " other2_shape=" << DataTypes::shapeToString(other2.getDataPointShape());
755 oss << " mask2_shape=" << DataTypes::shapeToString(mask2.getDataPointShape());
756 throw DataException(oss.str());
757 }
758 size_t num_points=self.size();
759
760 // OPENMP 3.0 allows unsigned loop vars.
761 #if defined(_OPENMP) && (_OPENMP < 200805)
762 long i;
763 #else
764 size_t i;
765 #endif
766 #pragma omp parallel for private(i) schedule(static)
767 for (i=0;i<num_points;++i)
768 {
769 if (mvec[i]>0)
770 {
771 self[i]=ovec[i];
772 }
773 }
774 }
775
776 bool
777 Data::isExpanded() const
778 {
779 DataExpanded* temp=dynamic_cast<DataExpanded*>(m_data.get());
780 return (temp!=0);
781 }
782
783 bool
784 Data::actsExpanded() const
785 {
786 return m_data->actsExpanded();
787
788 }
789
790
791 bool
792 Data::isTagged() const
793 {
794 DataTagged* temp=dynamic_cast<DataTagged*>(m_data.get());
795 return (temp!=0);
796 }
797
798 bool
799 Data::isEmpty() const
800 {
801 DataEmpty* temp=dynamic_cast<DataEmpty*>(m_data.get());
802 return (temp!=0);
803 }
804
805 bool
806 Data::isConstant() const
807 {
808 DataConstant* temp=dynamic_cast<DataConstant*>(m_data.get());
809 return (temp!=0);
810 }
811
812 bool
813 Data::isLazy() const
814 {
815 return m_lazy; // not asking m_data because we need to be able to ask this while m_data is changing
816 }
817
818 // at the moment this is synonymous with !isLazy() but that could change
819 bool
820 Data::isReady() const
821 {
822 return (dynamic_cast<DataReady*>(m_data.get())!=0);
823 }
824
825
826 void
827 Data::setProtection()
828 {
829 m_protected=true;
830 }
831
832 bool
833 Data::isProtected() const
834 {
835 return m_protected;
836 }
837
838
839
840 void
841 Data::expand()
842 {
843 if (isConstant()) {
844 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
845 DataAbstract* temp=new DataExpanded(*tempDataConst);
846 // m_data=temp->getPtr();
847 set_m_data(temp->getPtr());
848 } else if (isTagged()) {
849 DataTagged* tempDataTag=dynamic_cast<DataTagged*>(m_data.get());
850 DataAbstract* temp=new DataExpanded(*tempDataTag);
851 // m_data=temp->getPtr();
852 set_m_data(temp->getPtr());
853 } else if (isExpanded()) {
854 //
855 // do nothing
856 } else if (isEmpty()) {
857 throw DataException("Error - Expansion of DataEmpty not possible.");
858 } else if (isLazy()) {
859 resolve();
860 expand(); // resolve might not give us expanded data
861 } else {
862 throw DataException("Error - Expansion not implemented for this Data type.");
863 }
864 }
865
866 void
867 Data::tag()
868 {
869 if (isConstant()) {
870 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
871 DataAbstract* temp=new DataTagged(*tempDataConst);
872 // m_data=temp->getPtr();
873 set_m_data(temp->getPtr());
874 } else if (isTagged()) {
875 // do nothing
876 } else if (isExpanded()) {
877 throw DataException("Error - Creating tag data from DataExpanded not possible.");
878 } else if (isEmpty()) {
879 throw DataException("Error - Creating tag data from DataEmpty not possible.");
880 } else if (isLazy()) {
881 DataAbstract_ptr res=m_data->resolve();
882 if (m_data->isExpanded())
883 {
884 throw DataException("Error - data would resolve to DataExpanded, tagging is not possible.");
885 }
886 // m_data=res;
887 set_m_data(res);
888 tag();
889 } else {
890 throw DataException("Error - Tagging not implemented for this Data type.");
891 }
892 }
893
894 void
895 Data::resolve()
896 {
897 if (isLazy())
898 {
899 // m_data=m_data->resolve();
900 set_m_data(m_data->resolve());
901 }
902 }
903
904 void
905 Data::requireWrite()
906 {
907 resolve();
908 exclusiveWrite();
909 }
910
911 Data
912 Data::oneOver() const
913 {
914 MAKELAZYOP(RECIP)
915 return C_TensorUnaryOperation(*this, bind1st(divides<double>(),1.));
916 }
917
918 Data
919 Data::wherePositive() const
920 {
921 MAKELAZYOP(GZ)
922 return C_TensorUnaryOperation(*this, bind2nd(greater<double>(),0.0));
923 }
924
925 Data
926 Data::whereNegative() const
927 {
928 MAKELAZYOP(LZ)
929 return C_TensorUnaryOperation(*this, bind2nd(less<double>(),0.0));
930 }
931
932 Data
933 Data::whereNonNegative() const
934 {
935 MAKELAZYOP(GEZ)
936 return C_TensorUnaryOperation(*this, bind2nd(greater_equal<double>(),0.0));
937 }
938
939 Data
940 Data::whereNonPositive() const
941 {
942 MAKELAZYOP(LEZ)
943 return C_TensorUnaryOperation(*this, bind2nd(less_equal<double>(),0.0));
944 }
945
946 Data
947 Data::whereZero(double tol) const
948 {
949 // Data dataAbs=abs();
950 // return C_TensorUnaryOperation(dataAbs, bind2nd(less_equal<double>(),tol));
951 MAKELAZYOPOFF(EZ,tol)
952 return C_TensorUnaryOperation(*this, bind2nd(AbsLTE(),tol));
953
954 }
955
956 Data
957 Data::whereNonZero(double tol) const
958 {
959 // Data dataAbs=abs();
960 // return C_TensorUnaryOperation(dataAbs, bind2nd(greater<double>(),tol));
961 MAKELAZYOPOFF(NEZ,tol)
962 return C_TensorUnaryOperation(*this, bind2nd(AbsGT(),tol));
963
964 }
965
966 Data
967 Data::interpolate(const FunctionSpace& functionspace) const
968 {
969 return Data(*this,functionspace);
970 }
971
972 bool
973 Data::probeInterpolation(const FunctionSpace& functionspace) const
974 {
975 return getFunctionSpace().probeInterpolation(functionspace);
976 }
977
978 Data
979 Data::gradOn(const FunctionSpace& functionspace) const
980 {
981 if (isEmpty())
982 {
983 throw DataException("Error - operation not permitted on instances of DataEmpty.");
984 }
985 double blocktimer_start = blocktimer_time();
986 if (functionspace.getDomain()!=getDomain())
987 throw DataException("Error - gradient cannot be calculated on different domains.");
988 DataTypes::ShapeType grad_shape=getDataPointShape();
989 grad_shape.push_back(functionspace.getDim());
990 Data out(0.0,grad_shape,functionspace,true);
991 getDomain()->setToGradient(out,*this);
992 blocktimer_increment("grad()", blocktimer_start);
993 return out;
994 }
995
996 Data
997 Data::grad() const
998 {
999 if (isEmpty())
1000 {
1001 throw DataException("Error - operation not permitted on instances of DataEmpty.");
1002 }
1003 return gradOn(escript::function(*getDomain()));
1004 }
1005
1006 int
1007 Data::getDataPointSize() const
1008 {
1009 return m_data->getNoValues();
1010 }
1011
1012 DataTypes::ValueType::size_type
1013 Data::getLength() const
1014 {
1015 return m_data->getLength();
1016 }
1017
1018 const
1019 boost::python::numeric :: array
1020 Data:: getValueOfDataPoint(int dataPointNo)
1021 {
1022 return boost::python::numeric::array(getValueOfDataPointAsTuple(dataPointNo));
1023 }
1024
1025 const boost::python::object
1026 Data::getValueOfDataPointAsTuple(int dataPointNo)
1027 {
1028 forceResolve();
1029 if (getNumDataPointsPerSample()>0) {
1030 int sampleNo = dataPointNo/getNumDataPointsPerSample();
1031 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1032 //
1033 // Check a valid sample number has been supplied
1034 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
1035 throw DataException("Error - Data::getValueOfDataPointAsTuple: invalid sampleNo.");
1036 }
1037
1038 //
1039 // Check a valid data point number has been supplied
1040 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
1041 throw DataException("Error - Data::getValueOfDataPointAsTuple: invalid dataPointNoInSample.");
1042 }
1043 // TODO: global error handling
1044 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
1045 return pointToTuple(getDataPointShape(),&(getDataAtOffsetRO(offset)));
1046 }
1047 else
1048 {
1049 // The old method would return an empty numArray of the given shape
1050 // I'm going to throw an exception because if we have zero points per sample we have problems
1051 throw DataException("Error - need at least 1 datapoint per sample.");
1052 }
1053
1054 }
1055
1056
1057 void
1058 Data::setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object)
1059 {
1060 // this will throw if the value cannot be represented
1061 setValueOfDataPointToArray(dataPointNo,py_object);
1062 }
1063
1064 void
1065 Data::setValueOfDataPointToArray(int dataPointNo, const boost::python::object& obj)
1066 {
1067 if (isProtected()) {
1068 throw DataException("Error - attempt to update protected Data object.");
1069 }
1070 forceResolve();
1071
1072 WrappedArray w(obj);
1073 //
1074 // check rank
1075 if (static_cast<unsigned int>(w.getRank())<getDataPointRank())
1076 throw DataException("Rank of numarray does not match Data object rank");
1077
1078 //
1079 // check shape of num_array
1080 for (unsigned int i=0; i<getDataPointRank(); i++) {
1081 if (w.getShape()[i]!=getDataPointShape()[i])
1082 throw DataException("Shape of numarray does not match Data object rank");
1083 }
1084 //
1085 // make sure data is expanded:
1086 //
1087 if (!isExpanded()) {
1088 expand();
1089 }
1090 if (getNumDataPointsPerSample()>0) {
1091 int sampleNo = dataPointNo/getNumDataPointsPerSample();
1092 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1093 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,w);
1094 } else {
1095 m_data->copyToDataPoint(-1, 0,w);
1096 }
1097 }
1098
1099 void
1100 Data::setValueOfDataPoint(int dataPointNo, const double value)
1101 {
1102 if (isProtected()) {
1103 throw DataException("Error - attempt to update protected Data object.");
1104 }
1105 //
1106 // make sure data is expanded:
1107 forceResolve();
1108 if (!isExpanded()) {
1109 expand();
1110 }
1111 if (getNumDataPointsPerSample()>0) {
1112 int sampleNo = dataPointNo/getNumDataPointsPerSample();
1113 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1114 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,value);
1115 } else {
1116 m_data->copyToDataPoint(-1, 0,value);
1117 }
1118 }
1119
1120 const
1121 boost::python::numeric::array
1122 Data::getValueOfGlobalDataPoint(int procNo, int dataPointNo)
1123 {
1124 return boost::python::numeric::array(getValueOfGlobalDataPointAsTuple(procNo, dataPointNo));
1125 }
1126
1127 const
1128 boost::python::object
1129 Data::getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo)
1130 {
1131 // This could be lazier than it is now
1132 forceResolve();
1133
1134 // copy datapoint into a buffer
1135 // broadcast buffer to all nodes
1136 // convert buffer to tuple
1137 // return tuple
1138
1139 const DataTypes::ShapeType& dataPointShape = getDataPointShape();
1140 size_t length=DataTypes::noValues(dataPointShape);
1141
1142 // added for the MPI communication
1143 double *tmpData = new double[length];
1144
1145 // updated for the MPI case
1146 if( get_MPIRank()==procNo ){
1147 if (getNumDataPointsPerSample()>0) {
1148 int sampleNo = dataPointNo/getNumDataPointsPerSample();
1149 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1150 //
1151 // Check a valid sample number has been supplied
1152 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
1153 throw DataException("Error - Data::convertToNumArray: invalid sampleNo.");
1154 }
1155
1156 //
1157 // Check a valid data point number has been supplied
1158 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
1159 throw DataException("Error - Data::convertToNumArray: invalid dataPointNoInSample.");
1160 }
1161 // TODO: global error handling
1162 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
1163
1164 memcpy(tmpData,&(getDataAtOffsetRO(offset)),length*sizeof(double));
1165 }
1166 }
1167 #ifdef PASO_MPI
1168 // broadcast the data to all other processes
1169 MPI_Bcast( tmpData, length, MPI_DOUBLE, procNo, get_MPIComm() );
1170 #endif
1171
1172 boost::python::tuple t=pointToTuple(dataPointShape,tmpData);
1173 delete [] tmpData;
1174 //
1175 // return the loaded array
1176 return t;
1177
1178 }
1179
1180
1181 boost::python::object
1182 Data::integrateToTuple_const() const
1183 {
1184 if (isLazy())
1185 {
1186 throw DataException("Error - cannot integrate for constant lazy data.");
1187 }
1188 return integrateWorker();
1189 }
1190
1191 boost::python::object
1192 Data::integrateToTuple()
1193 {
1194 if (isLazy())
1195 {
1196 expand();
1197 }
1198 return integrateWorker();
1199
1200 }
1201
1202
1203 boost::python::object
1204 Data::integrate_const() const
1205 {
1206 if (isLazy())
1207 {
1208 throw DataException("Error - cannot integrate for constant lazy data.");
1209 }
1210 return boost::python::numeric::array(integrateWorker());
1211 }
1212
1213 boost::python::object
1214 Data::integrate()
1215 {
1216 if (isLazy())
1217 {
1218 expand();
1219 }
1220 return boost::python::numeric::array(integrateWorker());
1221 }
1222
1223
1224
1225 boost::python::object
1226 Data::integrateWorker() const
1227 {
1228 DataTypes::ShapeType shape = getDataPointShape();
1229 int dataPointSize = getDataPointSize();
1230
1231 //
1232 // calculate the integral values
1233 vector<double> integrals(dataPointSize);
1234 vector<double> integrals_local(dataPointSize);
1235 const AbstractContinuousDomain* dom=dynamic_cast<const AbstractContinuousDomain*>(getDomain().get());
1236 if (dom==0)
1237 {
1238 throw DataException("Can not integrate over non-continuous domains.");
1239 }
1240 #ifdef PASO_MPI
1241 dom->setToIntegrals(integrals_local,*this);
1242 // Global sum: use an array instead of a vector because elements of array are guaranteed to be contiguous in memory
1243 double *tmp = new double[dataPointSize];
1244 double *tmp_local = new double[dataPointSize];
1245 for (int i=0; i<dataPointSize; i++) { tmp_local[i] = integrals_local[i]; }
1246 MPI_Allreduce( &tmp_local[0], &tmp[0], dataPointSize, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD );
1247 for (int i=0; i<dataPointSize; i++) { integrals[i] = tmp[i]; }
1248 tuple result=pointToTuple(shape,tmp);
1249 delete[] tmp;
1250 delete[] tmp_local;
1251 #else
1252 dom->setToIntegrals(integrals,*this);
1253 /* double *tmp = new double[dataPointSize];
1254 for (int i=0; i<dataPointSize; i++) { tmp[i]=integrals[i]; }*/
1255 tuple result=pointToTuple(shape,integrals);
1256 // delete tmp;
1257 #endif
1258
1259
1260 return result;
1261 }
1262
1263 Data
1264 Data::sin() const
1265 {
1266 MAKELAZYOP(SIN)
1267 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sin);
1268 }
1269
1270 Data
1271 Data::cos() const
1272 {
1273 MAKELAZYOP(COS)
1274 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cos);
1275 }
1276
1277 Data
1278 Data::tan() const
1279 {
1280 MAKELAZYOP(TAN)
1281 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tan);
1282 }
1283
1284 Data
1285 Data::asin() const
1286 {
1287 MAKELAZYOP(ASIN)
1288 return C_TensorUnaryOperation<double (*)(double)>(*this, ::asin);
1289 }
1290
1291 Data
1292 Data::acos() const
1293 {
1294 MAKELAZYOP(ACOS)
1295 return C_TensorUnaryOperation<double (*)(double)>(*this, ::acos);
1296 }
1297
1298
1299 Data
1300 Data::atan() const
1301 {
1302 MAKELAZYOP(ATAN)
1303 return C_TensorUnaryOperation<double (*)(double)>(*this, ::atan);
1304 }
1305
1306 Data
1307 Data::sinh() const
1308 {
1309 MAKELAZYOP(SINH)
1310 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sinh);
1311 }
1312
1313 Data
1314 Data::cosh() const
1315 {
1316 MAKELAZYOP(COSH)
1317 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cosh);
1318 }
1319
1320 Data
1321 Data::tanh() const
1322 {
1323 MAKELAZYOP(TANH)
1324 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tanh);
1325 }
1326
1327
1328 Data
1329 Data::erf() const
1330 {
1331 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1332 throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");
1333 #else
1334 MAKELAZYOP(ERF)
1335 return C_TensorUnaryOperation(*this, ::erf);
1336 #endif
1337 }
1338
1339 Data
1340 Data::asinh() const
1341 {
1342 MAKELAZYOP(ASINH)
1343 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1344 return C_TensorUnaryOperation(*this, escript::asinh_substitute);
1345 #else
1346 return C_TensorUnaryOperation(*this, ::asinh);
1347 #endif
1348 }
1349
1350 Data
1351 Data::acosh() const
1352 {
1353 MAKELAZYOP(ACOSH)
1354 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1355 return C_TensorUnaryOperation(*this, escript::acosh_substitute);
1356 #else
1357 return C_TensorUnaryOperation(*this, ::acosh);
1358 #endif
1359 }
1360
1361 Data
1362 Data::atanh() const
1363 {
1364 MAKELAZYOP(ATANH)
1365 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1366 return C_TensorUnaryOperation(*this, escript::atanh_substitute);
1367 #else
1368 return C_TensorUnaryOperation(*this, ::atanh);
1369 #endif
1370 }
1371
1372 Data
1373 Data::log10() const
1374 {
1375 MAKELAZYOP(LOG10)
1376 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log10);
1377 }
1378
1379 Data
1380 Data::log() const
1381 {
1382 MAKELAZYOP(LOG)
1383 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log);
1384 }
1385
1386 Data
1387 Data::sign() const
1388 {
1389 MAKELAZYOP(SIGN)
1390 return C_TensorUnaryOperation(*this, escript::fsign);
1391 }
1392
1393 Data
1394 Data::abs() const
1395 {
1396 MAKELAZYOP(ABS)
1397 return C_TensorUnaryOperation<double (*)(double)>(*this, ::fabs);
1398 }
1399
1400 Data
1401 Data::neg() const
1402 {
1403 MAKELAZYOP(NEG)
1404 return C_TensorUnaryOperation(*this, negate<double>());
1405 }
1406
1407 Data
1408 Data::pos() const
1409 {
1410 // not doing lazy check here is deliberate.
1411 // since a deep copy of lazy data should be cheap, I'll just let it happen now
1412 Data result;
1413 // perform a deep copy
1414 result.copy(*this);
1415 return result;
1416 }
1417
1418 Data
1419 Data::exp() const
1420 {
1421 MAKELAZYOP(EXP)
1422 return C_TensorUnaryOperation<double (*)(double)>(*this, ::exp);
1423 }
1424
1425 Data
1426 Data::sqrt() const
1427 {
1428 MAKELAZYOP(SQRT)
1429 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sqrt);
1430 }
1431
1432 double
1433 Data::Lsup_const() const
1434 {
1435 if (isLazy())
1436 {
1437 throw DataException("Error - cannot compute Lsup for constant lazy data.");
1438 }
1439 return LsupWorker();
1440 }
1441
1442 double
1443 Data::Lsup()
1444 {
1445 if (isLazy())
1446 {
1447 resolve();
1448 }
1449 return LsupWorker();
1450 }
1451
1452 double
1453 Data::sup_const() const
1454 {
1455 if (isLazy())
1456 {
1457 throw DataException("Error - cannot compute sup for constant lazy data.");
1458 }
1459 return supWorker();
1460 }
1461
1462 double
1463 Data::sup()
1464 {
1465 if (isLazy())
1466 {
1467 resolve();
1468 }
1469 return supWorker();
1470 }
1471
1472 double
1473 Data::inf_const() const
1474 {
1475 if (isLazy())
1476 {
1477 throw DataException("Error - cannot compute inf for constant lazy data.");
1478 }
1479 return infWorker();
1480 }
1481
1482 double
1483 Data::inf()
1484 {
1485 if (isLazy())
1486 {
1487 resolve();
1488 }
1489 return infWorker();
1490 }
1491
1492 double
1493 Data::LsupWorker() const
1494 {
1495 double localValue;
1496 //
1497 // set the initial absolute maximum value to zero
1498
1499 AbsMax abs_max_func;
1500 localValue = algorithm(abs_max_func,0);
1501 #ifdef PASO_MPI
1502 double globalValue;
1503 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1504 return globalValue;
1505 #else
1506 return localValue;
1507 #endif
1508 }
1509
1510 double
1511 Data::supWorker() const
1512 {
1513 double localValue;
1514 //
1515 // set the initial maximum value to min possible double
1516 FMax fmax_func;
1517 localValue = algorithm(fmax_func,numeric_limits<double>::max()*-1);
1518 #ifdef PASO_MPI
1519 double globalValue;
1520 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1521 return globalValue;
1522 #else
1523 return localValue;
1524 #endif
1525 }
1526
1527 double
1528 Data::infWorker() const
1529 {
1530 double localValue;
1531 //
1532 // set the initial minimum value to max possible double
1533 FMin fmin_func;
1534 localValue = algorithm(fmin_func,numeric_limits<double>::max());
1535 #ifdef PASO_MPI
1536 double globalValue;
1537 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD );
1538 return globalValue;
1539 #else
1540 return localValue;
1541 #endif
1542 }
1543
1544 /* TODO */
1545 /* global reduction */
1546 Data
1547 Data::maxval() const
1548 {
1549 if (isLazy())
1550 {
1551 Data temp(*this); // to get around the fact that you can't resolve a const Data
1552 temp.resolve();
1553 return temp.maxval();
1554 }
1555 //
1556 // set the initial maximum value to min possible double
1557 FMax fmax_func;
1558 return dp_algorithm(fmax_func,numeric_limits<double>::max()*-1);
1559 }
1560
1561 Data
1562 Data::minval() const
1563 {
1564 if (isLazy())
1565 {
1566 Data temp(*this); // to get around the fact that you can't resolve a const Data
1567 temp.resolve();
1568 return temp.minval();
1569 }
1570 //
1571 // set the initial minimum value to max possible double
1572 FMin fmin_func;
1573 return dp_algorithm(fmin_func,numeric_limits<double>::max());
1574 }
1575
1576 Data
1577 Data::swapaxes(const int axis0, const int axis1) const
1578 {
1579 if (isLazy())
1580 {
1581 Data temp(*this);
1582 temp.resolve();
1583 return temp.swapaxes(axis0,axis1);
1584 }
1585 int axis0_tmp,axis1_tmp;
1586 DataTypes::ShapeType s=getDataPointShape();
1587 DataTypes::ShapeType ev_shape;
1588 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1589 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1590 int rank=getDataPointRank();
1591 if (rank<2) {
1592 throw DataException("Error - Data::swapaxes argument must have at least rank 2.");
1593 }
1594 if (axis0<0 || axis0>rank-1) {
1595 throw DataException("Error - Data::swapaxes: axis0 must be between 0 and rank-1=" + rank-1);
1596 }
1597 if (axis1<0 || axis1>rank-1) {
1598 throw DataException("Error - Data::swapaxes: axis1 must be between 0 and rank-1=" + rank-1);
1599 }
1600 if (axis0 == axis1) {
1601 throw DataException("Error - Data::swapaxes: axis indices must be different.");
1602 }
1603 if (axis0 > axis1) {
1604 axis0_tmp=axis1;
1605 axis1_tmp=axis0;
1606 } else {
1607 axis0_tmp=axis0;
1608 axis1_tmp=axis1;
1609 }
1610 for (int i=0; i<rank; i++) {
1611 if (i == axis0_tmp) {
1612 ev_shape.push_back(s[axis1_tmp]);
1613 } else if (i == axis1_tmp) {
1614 ev_shape.push_back(s[axis0_tmp]);
1615 } else {
1616 ev_shape.push_back(s[i]);
1617 }
1618 }
1619 Data ev(0.,ev_shape,getFunctionSpace());
1620 ev.typeMatchRight(*this);
1621 m_data->swapaxes(ev.m_data.get(), axis0_tmp, axis1_tmp);
1622 return ev;
1623
1624 }
1625
1626 Data
1627 Data::symmetric() const
1628 {
1629 // check input
1630 DataTypes::ShapeType s=getDataPointShape();
1631 if (getDataPointRank()==2) {
1632 if(s[0] != s[1])
1633 throw DataException("Error - Data::symmetric can only be calculated for rank 2 object with equal first and second dimension.");
1634 }
1635 else if (getDataPointRank()==4) {
1636 if(!(s[0] == s[2] && s[1] == s[3]))
1637 throw DataException("Error - Data::symmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1638 }
1639 else {
1640 throw DataException("Error - Data::symmetric can only be calculated for rank 2 or 4 object.");
1641 }
1642 MAKELAZYOP(SYM)
1643 Data ev(0.,getDataPointShape(),getFunctionSpace());
1644 ev.typeMatchRight(*this);
1645 m_data->symmetric(ev.m_data.get());
1646 return ev;
1647 }
1648
1649 Data
1650 Data::nonsymmetric() const
1651 {
1652 MAKELAZYOP(NSYM)
1653 // check input
1654 DataTypes::ShapeType s=getDataPointShape();
1655 if (getDataPointRank()==2) {
1656 if(s[0] != s[1])
1657 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 object with equal first and second dimension.");
1658 DataTypes::ShapeType ev_shape;
1659 ev_shape.push_back(s[0]);
1660 ev_shape.push_back(s[1]);
1661 Data ev(0.,ev_shape,getFunctionSpace());
1662 ev.typeMatchRight(*this);
1663 m_data->nonsymmetric(ev.m_data.get());
1664 return ev;
1665 }
1666 else if (getDataPointRank()==4) {
1667 if(!(s[0] == s[2] && s[1] == s[3]))
1668 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1669 DataTypes::ShapeType ev_shape;
1670 ev_shape.push_back(s[0]);
1671 ev_shape.push_back(s[1]);
1672 ev_shape.push_back(s[2]);
1673 ev_shape.push_back(s[3]);
1674 Data ev(0.,ev_shape,getFunctionSpace());
1675 ev.typeMatchRight(*this);
1676 m_data->nonsymmetric(ev.m_data.get());
1677 return ev;
1678 }
1679 else {
1680 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 or 4 object.");
1681 }
1682 }
1683
1684 Data
1685 Data::trace(int axis_offset) const
1686 {
1687 MAKELAZYOPOFF(TRACE,axis_offset)
1688 if ((axis_offset<0) || (axis_offset>getDataPointRank()))
1689 {
1690 throw DataException("Error - Data::trace, axis_offset must be between 0 and rank-2 inclusive.");
1691 }
1692 DataTypes::ShapeType s=getDataPointShape();
1693 if (getDataPointRank()==2) {
1694 DataTypes::ShapeType ev_shape;
1695 Data ev(0.,ev_shape,getFunctionSpace());
1696 ev.typeMatchRight(*this);
1697 m_data->trace(ev.m_data.get(), axis_offset);
1698 return ev;
1699 }
1700 if (getDataPointRank()==3) {
1701 DataTypes::ShapeType ev_shape;
1702 if (axis_offset==0) {
1703 int s2=s[2];
1704 ev_shape.push_back(s2);
1705 }
1706 else if (axis_offset==1) {
1707 int s0=s[0];
1708 ev_shape.push_back(s0);
1709 }
1710 Data ev(0.,ev_shape,getFunctionSpace());
1711 ev.typeMatchRight(*this);
1712 m_data->trace(ev.m_data.get(), axis_offset);
1713 return ev;
1714 }
1715 if (getDataPointRank()==4) {
1716 DataTypes::ShapeType ev_shape;
1717 if (axis_offset==0) {
1718 ev_shape.push_back(s[2]);
1719 ev_shape.push_back(s[3]);
1720 }
1721 else if (axis_offset==1) {
1722 ev_shape.push_back(s[0]);
1723 ev_shape.push_back(s[3]);
1724 }
1725 else if (axis_offset==2) {
1726 ev_shape.push_back(s[0]);
1727 ev_shape.push_back(s[1]);
1728 }
1729 Data ev(0.,ev_shape,getFunctionSpace());
1730 ev.typeMatchRight(*this);
1731 m_data->trace(ev.m_data.get(), axis_offset);
1732 return ev;
1733 }
1734 else {
1735 throw DataException("Error - Data::trace can only be calculated for rank 2, 3 or 4 object.");
1736 }
1737 }
1738
1739 Data
1740 Data::transpose(int axis_offset) const
1741 {
1742 MAKELAZYOPOFF(TRANS,axis_offset)
1743 DataTypes::ShapeType s=getDataPointShape();
1744 DataTypes::ShapeType ev_shape;
1745 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1746 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1747 int rank=getDataPointRank();
1748 if (axis_offset<0 || axis_offset>rank) {
1749 throw DataException("Error - Data::transpose must have 0 <= axis_offset <= rank=" + rank);
1750 }
1751 for (int i=0; i<rank; i++) {
1752 int index = (axis_offset+i)%rank;
1753 ev_shape.push_back(s[index]); // Append to new shape
1754 }
1755 Data ev(0.,ev_shape,getFunctionSpace());
1756 ev.typeMatchRight(*this);
1757 m_data->transpose(ev.m_data.get(), axis_offset);
1758 return ev;
1759 }
1760
1761 Data
1762 Data::eigenvalues() const
1763 {
1764 if (isLazy())
1765 {
1766 Data temp(*this); // to get around the fact that you can't resolve a const Data
1767 temp.resolve();
1768 return temp.eigenvalues();
1769 }
1770 // check input
1771 DataTypes::ShapeType s=getDataPointShape();
1772 if (getDataPointRank()!=2)
1773 throw DataException("Error - Data::eigenvalues can only be calculated for rank 2 object.");
1774 if(s[0] != s[1])
1775 throw DataException("Error - Data::eigenvalues can only be calculated for object with equal first and second dimension.");
1776 // create return
1777 DataTypes::ShapeType ev_shape(1,s[0]);
1778 Data ev(0.,ev_shape,getFunctionSpace());
1779 ev.typeMatchRight(*this);
1780 m_data->eigenvalues(ev.m_data.get());
1781 return ev;
1782 }
1783
1784 const boost::python::tuple
1785 Data::eigenvalues_and_eigenvectors(const double tol) const
1786 {
1787 if (isLazy())
1788 {
1789 Data temp(*this); // to get around the fact that you can't resolve a const Data
1790 temp.resolve();
1791 return temp.eigenvalues_and_eigenvectors(tol);
1792 }
1793 DataTypes::ShapeType s=getDataPointShape();
1794 if (getDataPointRank()!=2)
1795 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for rank 2 object.");
1796 if(s[0] != s[1])
1797 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for object with equal first and second dimension.");
1798 // create return
1799 DataTypes::ShapeType ev_shape(1,s[0]);
1800 Data ev(0.,ev_shape,getFunctionSpace());
1801 ev.typeMatchRight(*this);
1802 DataTypes::ShapeType V_shape(2,s[0]);
1803 Data V(0.,V_shape,getFunctionSpace());
1804 V.typeMatchRight(*this);
1805 m_data->eigenvalues_and_eigenvectors(ev.m_data.get(),V.m_data.get(),tol);
1806 return make_tuple(boost::python::object(ev),boost::python::object(V));
1807 }
1808
1809 const boost::python::tuple
1810 Data::minGlobalDataPoint() const
1811 {
1812 // NB: calc_minGlobalDataPoint( had to be split off from minGlobalDataPoint( as boost::make_tuple causes an
1813 // abort (for unknown reasons) if there are openmp directives with it in the
1814 // surrounding function
1815
1816 int DataPointNo;
1817 int ProcNo;
1818 calc_minGlobalDataPoint(ProcNo,DataPointNo);
1819 return make_tuple(ProcNo,DataPointNo);
1820 }
1821
1822 void
1823 Data::calc_minGlobalDataPoint(int& ProcNo,
1824 int& DataPointNo) const
1825 {
1826 if (isLazy())
1827 {
1828 Data temp(*this); // to get around the fact that you can't resolve a const Data
1829 temp.resolve();
1830 return temp.calc_minGlobalDataPoint(ProcNo,DataPointNo);
1831 }
1832 int i,j;
1833 int lowi=0,lowj=0;
1834 double min=numeric_limits<double>::max();
1835
1836 Data temp=minval();
1837
1838 int numSamples=temp.getNumSamples();
1839 int numDPPSample=temp.getNumDataPointsPerSample();
1840
1841 double next,local_min;
1842 int local_lowi=0,local_lowj=0;
1843
1844 #pragma omp parallel firstprivate(local_lowi,local_lowj) private(next,local_min)
1845 {
1846 local_min=min;
1847 #pragma omp for private(i,j) schedule(static)
1848 for (i=0; i<numSamples; i++) {
1849 for (j=0; j<numDPPSample; j++) {
1850 next=temp.getDataAtOffsetRO(temp.getDataOffset(i,j));
1851 if (next<local_min) {
1852 local_min=next;
1853 local_lowi=i;
1854 local_lowj=j;
1855 }
1856 }
1857 }
1858 #pragma omp critical
1859 if (local_min<min) {
1860 min=local_min;
1861 lowi=local_lowi;
1862 lowj=local_lowj;
1863 }
1864 }
1865
1866 #ifdef PASO_MPI
1867 // determine the processor on which the minimum occurs
1868 next = temp.getDataPointRO(lowi,lowj);
1869 int lowProc = 0;
1870 double *globalMins = new double[get_MPISize()+1];
1871 int error;
1872 error = MPI_Gather ( &next, 1, MPI_DOUBLE, globalMins, 1, MPI_DOUBLE, 0, get_MPIComm() );
1873
1874 if( get_MPIRank()==0 ){
1875 next = globalMins[lowProc];
1876 for( i=1; i<get_MPISize(); i++ )
1877 if( next>globalMins[i] ){
1878 lowProc = i;
1879 next = globalMins[i];
1880 }
1881 }
1882 MPI_Bcast( &lowProc, 1, MPI_DOUBLE, 0, get_MPIComm() );
1883
1884 delete [] globalMins;
1885 ProcNo = lowProc;
1886 #else
1887 ProcNo = 0;
1888 #endif
1889 DataPointNo = lowj + lowi * numDPPSample;
1890 }
1891
1892 void
1893 Data::saveDX(std::string fileName) const
1894 {
1895 if (isEmpty())
1896 {
1897 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1898 }
1899 if (isLazy())
1900 {
1901 Data temp(*this); // to get around the fact that you can't resolve a const Data
1902 temp.resolve();
1903 temp.saveDX(fileName);
1904 return;
1905 }
1906 boost::python::dict args;
1907 args["data"]=boost::python::object(this);
1908 getDomain()->saveDX(fileName,args);
1909 return;
1910 }
1911
1912 void
1913 Data::saveVTK(std::string fileName) const
1914 {
1915 if (isEmpty())
1916 {
1917 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1918 }
1919 if (isLazy())
1920 {
1921 Data temp(*this); // to get around the fact that you can't resolve a const Data
1922 temp.resolve();
1923 temp.saveVTK(fileName);
1924 return;
1925 }
1926 boost::python::dict args;
1927 args["data"]=boost::python::object(this);
1928 getDomain()->saveVTK(fileName,args);
1929 return;
1930 }
1931
1932 Data&
1933 Data::operator+=(const Data& right)
1934 {
1935 if (isProtected()) {
1936 throw DataException("Error - attempt to update protected Data object.");
1937 }
1938 MAKELAZYBINSELF(right,ADD) // for lazy + is equivalent to +=
1939 exclusiveWrite(); // Since Lazy data does not modify its leaves we only need to worry here
1940 binaryOp(right,plus<double>());
1941 return (*this);
1942 }
1943
1944 Data&
1945 Data::operator+=(const boost::python::object& right)
1946 {
1947 if (isProtected()) {
1948 throw DataException("Error - attempt to update protected Data object.");
1949 }
1950 Data tmp(right,getFunctionSpace(),false);
1951 (*this)+=tmp;
1952 return *this;
1953 }
1954
1955 // Hmmm, operator= makes a deep copy but the copy constructor does not?
1956 Data&
1957 Data::operator=(const Data& other)
1958 {
1959 #if defined ASSIGNMENT_MEANS_DEEPCOPY
1960 // This should not be used.
1961 // Just leaving this here until I have completed transition
1962 copy(other);
1963 #else
1964 m_protected=false; // since any changes should be caught by exclusiveWrite();
1965 // m_data=other.m_data;
1966 set_m_data(other.m_data);
1967 #endif
1968 return (*this);
1969 }
1970
1971 Data&
1972 Data::operator-=(const Data& right)
1973 {
1974 if (isProtected()) {
1975 throw DataException("Error - attempt to update protected Data object.");
1976 }
1977 MAKELAZYBINSELF(right,SUB)
1978 exclusiveWrite();
1979 binaryOp(right,minus<double>());
1980 return (*this);
1981 }
1982
1983 Data&
1984 Data::operator-=(const boost::python::object& right)
1985 {
1986 if (isProtected()) {
1987 throw DataException("Error - attempt to update protected Data object.");
1988 }
1989 Data tmp(right,getFunctionSpace(),false);
1990 (*this)-=tmp;
1991 return (*this);
1992 }
1993
1994 Data&
1995 Data::operator*=(const Data& right)
1996 {
1997 if (isProtected()) {
1998 throw DataException("Error - attempt to update protected Data object.");
1999 }
2000 MAKELAZYBINSELF(right,MUL)
2001 exclusiveWrite();
2002 binaryOp(right,multiplies<double>());
2003 return (*this);
2004 }
2005
2006 Data&
2007 Data::operator*=(const boost::python::object& right)
2008 {
2009 if (isProtected()) {
2010 throw DataException("Error - attempt to update protected Data object.");
2011 }
2012 Data tmp(right,getFunctionSpace(),false);
2013 (*this)*=tmp;
2014 return (*this);
2015 }
2016
2017 Data&
2018 Data::operator/=(const Data& right)
2019 {
2020 if (isProtected()) {
2021 throw DataException("Error - attempt to update protected Data object.");
2022 }
2023 MAKELAZYBINSELF(right,DIV)
2024 exclusiveWrite();
2025 binaryOp(right,divides<double>());
2026 return (*this);
2027 }
2028
2029 Data&
2030 Data::operator/=(const boost::python::object& right)
2031 {
2032 if (isProtected()) {
2033 throw DataException("Error - attempt to update protected Data object.");
2034 }
2035 Data tmp(right,getFunctionSpace(),false);
2036 (*this)/=tmp;
2037 return (*this);
2038 }
2039
2040 Data
2041 Data::rpowO(const boost::python::object& left) const
2042 {
2043 Data left_d(left,*this);
2044 return left_d.powD(*this);
2045 }
2046
2047 Data
2048 Data::powO(const boost::python::object& right) const
2049 {
2050 Data tmp(right,getFunctionSpace(),false);
2051 return powD(tmp);
2052 }
2053
2054 Data
2055 Data::powD(const Data& right) const
2056 {
2057 MAKELAZYBIN(right,POW)
2058 return C_TensorBinaryOperation<double (*)(double, double)>(*this, right, ::pow);
2059 }
2060
2061 //
2062 // NOTE: It is essential to specify the namespace this operator belongs to
2063 Data
2064 escript::operator+(const Data& left, const Data& right)
2065 {
2066 MAKELAZYBIN2(left,right,ADD)
2067 return C_TensorBinaryOperation(left, right, plus<double>());
2068 }
2069
2070 //
2071 // NOTE: It is essential to specify the namespace this operator belongs to
2072 Data
2073 escript::operator-(const Data& left, const Data& right)
2074 {
2075 MAKELAZYBIN2(left,right,SUB)
2076 return C_TensorBinaryOperation(left, right, minus<double>());
2077 }
2078
2079 //
2080 // NOTE: It is essential to specify the namespace this operator belongs to
2081 Data
2082 escript::operator*(const Data& left, const Data& right)
2083 {
2084 MAKELAZYBIN2(left,right,MUL)
2085 return C_TensorBinaryOperation(left, right, multiplies<double>());
2086 }
2087
2088 //
2089 // NOTE: It is essential to specify the namespace this operator belongs to
2090 Data
2091 escript::operator/(const Data& left, const Data& right)
2092 {
2093 MAKELAZYBIN2(left,right,DIV)
2094 return C_TensorBinaryOperation(left, right, divides<double>());
2095 }
2096
2097 //
2098 // NOTE: It is essential to specify the namespace this operator belongs to
2099 Data
2100 escript::operator+(const Data& left, const boost::python::object& right)
2101 {
2102 Data tmp(right,left.getFunctionSpace(),false);
2103 MAKELAZYBIN2(left,tmp,ADD)
2104 return left+tmp;
2105 }
2106
2107 //
2108 // NOTE: It is essential to specify the namespace this operator belongs to
2109 Data
2110 escript::operator-(const Data& left, const boost::python::object& right)
2111 {
2112 Data tmp(right,left.getFunctionSpace(),false);
2113 MAKELAZYBIN2(left,tmp,SUB)
2114 return left-tmp;
2115 }
2116
2117 //
2118 // NOTE: It is essential to specify the namespace this operator belongs to
2119 Data
2120 escript::operator*(const Data& left, const boost::python::object& right)
2121 {
2122 Data tmp(right,left.getFunctionSpace(),false);
2123 MAKELAZYBIN2(left,tmp,MUL)
2124 return left*tmp;
2125 }
2126
2127 //
2128 // NOTE: It is essential to specify the namespace this operator belongs to
2129 Data
2130 escript::operator/(const Data& left, const boost::python::object& right)
2131 {
2132 Data tmp(right,left.getFunctionSpace(),false);
2133 MAKELAZYBIN2(left,tmp,DIV)
2134 return left/tmp;
2135 }
2136
2137 //
2138 // NOTE: It is essential to specify the namespace this operator belongs to
2139 Data
2140 escript::operator+(const boost::python::object& left, const Data& right)
2141 {
2142 Data tmp(left,right.getFunctionSpace(),false);
2143 MAKELAZYBIN2(tmp,right,ADD)
2144 return tmp+right;
2145 }
2146
2147 //
2148 // NOTE: It is essential to specify the namespace this operator belongs to
2149 Data
2150 escript::operator-(const boost::python::object& left, const Data& right)
2151 {
2152 Data tmp(left,right.getFunctionSpace(),false);
2153 MAKELAZYBIN2(tmp,right,SUB)
2154 return tmp-right;
2155 }
2156
2157 //
2158 // NOTE: It is essential to specify the namespace this operator belongs to
2159 Data
2160 escript::operator*(const boost::python::object& left, const Data& right)
2161 {
2162 Data tmp(left,right.getFunctionSpace(),false);
2163 MAKELAZYBIN2(tmp,right,MUL)
2164 return tmp*right;
2165 }
2166
2167 //
2168 // NOTE: It is essential to specify the namespace this operator belongs to
2169 Data
2170 escript::operator/(const boost::python::object& left, const Data& right)
2171 {
2172 Data tmp(left,right.getFunctionSpace(),false);
2173 MAKELAZYBIN2(tmp,right,DIV)
2174 return tmp/right;
2175 }
2176
2177
2178 /* TODO */
2179 /* global reduction */
2180 Data
2181 Data::getItem(const boost::python::object& key) const
2182 {
2183
2184 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2185
2186 if (slice_region.size()!=getDataPointRank()) {
2187 throw DataException("Error - slice size does not match Data rank.");
2188 }
2189
2190 return getSlice(slice_region);
2191 }
2192
2193 /* TODO */
2194 /* global reduction */
2195 Data
2196 Data::getSlice(const DataTypes::RegionType& region) const
2197 {
2198 return Data(*this,region);
2199 }
2200
2201 /* TODO */
2202 /* global reduction */
2203 void
2204 Data::setItemO(const boost::python::object& key,
2205 const boost::python::object& value)
2206 {
2207 Data tempData(value,getFunctionSpace());
2208 setItemD(key,tempData);
2209 }
2210
2211 void
2212 Data::setItemD(const boost::python::object& key,
2213 const Data& value)
2214 {
2215 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2216 if (slice_region.size()!=getDataPointRank()) {
2217 throw DataException("Error - slice size does not match Data rank.");
2218 }
2219 exclusiveWrite();
2220 if (getFunctionSpace()!=value.getFunctionSpace()) {
2221 setSlice(Data(value,getFunctionSpace()),slice_region);
2222 } else {
2223 setSlice(value,slice_region);
2224 }
2225 }
2226
2227 void
2228 Data::setSlice(const Data& value,
2229 const DataTypes::RegionType& region)
2230 {
2231 if (isProtected()) {
2232 throw DataException("Error - attempt to update protected Data object.");
2233 }
2234 forceResolve();
2235 exclusiveWrite(); // In case someone finds a way to call this without going through setItemD
2236 Data tempValue(value);
2237 typeMatchLeft(tempValue);
2238 typeMatchRight(tempValue);
2239 getReady()->setSlice(tempValue.m_data.get(),region);
2240 }
2241
2242 void
2243 Data::typeMatchLeft(Data& right) const
2244 {
2245 if (right.isLazy() && !isLazy())
2246 {
2247 right.resolve();
2248 }
2249 if (isExpanded()){
2250 right.expand();
2251 } else if (isTagged()) {
2252 if (right.isConstant()) {
2253 right.tag();
2254 }
2255 }
2256 }
2257
2258 void
2259 Data::typeMatchRight(const Data& right)
2260 {
2261 if (isLazy() && !right.isLazy())
2262 {
2263 resolve();
2264 }
2265 if (isTagged()) {
2266 if (right.isExpanded()) {
2267 expand();
2268 }
2269 } else if (isConstant()) {
2270 if (right.isExpanded()) {
2271 expand();
2272 } else if (right.isTagged()) {
2273 tag();
2274 }
2275 }
2276 }
2277
2278 // The normal TaggedValue adds the tag if it is not already present
2279 // This form does not. It throws instead.
2280 // This is because the names are maintained by the domain and cannot be added
2281 // without knowing the tag number to map it to.
2282 void
2283 Data::setTaggedValueByName(std::string name,
2284 const boost::python::object& value)
2285 {
2286 if (getFunctionSpace().getDomain()->isValidTagName(name)) {
2287 forceResolve();
2288 exclusiveWrite();
2289 int tagKey=getFunctionSpace().getDomain()->getTag(name);
2290 setTaggedValue(tagKey,value);
2291 }
2292 else
2293 { // The
2294 throw DataException("Error - unknown tag in setTaggedValueByName.");
2295 }
2296 }
2297
2298 void
2299 Data::setTaggedValue(int tagKey,
2300 const boost::python::object& value)
2301 {
2302 if (isProtected()) {
2303 throw DataException("Error - attempt to update protected Data object.");
2304 }
2305 //
2306 // Ensure underlying data object is of type DataTagged
2307 forceResolve();
2308 exclusiveWrite();
2309 if (isConstant()) tag();
2310 WrappedArray w(value);
2311
2312 DataVector temp_data2;
2313 temp_data2.copyFromArray(w,1);
2314
2315 m_data->setTaggedValue(tagKey,w.getShape(), temp_data2);
2316 }
2317
2318
2319 void
2320 Data::setTaggedValueFromCPP(int tagKey,
2321 const DataTypes::ShapeType& pointshape,
2322 const DataTypes::ValueType& value,
2323 int dataOffset)
2324 {
2325 if (isProtected()) {
2326 throw DataException("Error - attempt to update protected Data object.");
2327 }
2328 //
2329 // Ensure underlying data object is of type DataTagged
2330 forceResolve();
2331 if (isConstant()) tag();
2332 exclusiveWrite();
2333 //
2334 // Call DataAbstract::setTaggedValue
2335 m_data->setTaggedValue(tagKey,pointshape, value, dataOffset);
2336 }
2337
2338 int
2339 Data::getTagNumber(int dpno)
2340 {
2341 if (isEmpty())
2342 {
2343 throw DataException("Error - operation not permitted on instances of DataEmpty.");
2344 }
2345 return getFunctionSpace().getTagFromDataPointNo(dpno);
2346 }
2347
2348
2349 ostream& escript::operator<<(ostream& o, const Data& data)
2350 {
2351 o << data.toString();
2352 return o;
2353 }
2354
2355 Data
2356 escript::C_GeneralTensorProduct(Data& arg_0,
2357 Data& arg_1,
2358 int axis_offset,
2359 int transpose)
2360 {
2361 // General tensor product: res(SL x SR) = arg_0(SL x SM) * arg_1(SM x SR)
2362 // SM is the product of the last axis_offset entries in arg_0.getShape().
2363
2364 // deal with any lazy data
2365 // if (arg_0.isLazy()) {arg_0.resolve();}
2366 // if (arg_1.isLazy()) {arg_1.resolve();}
2367 if (arg_0.isLazy() || arg_1.isLazy() || (AUTOLAZYON && (arg_0.isExpanded() || arg_1.isExpanded())))
2368 {
2369 DataLazy* c=new DataLazy(arg_0.borrowDataPtr(), arg_1.borrowDataPtr(), PROD, axis_offset,transpose);
2370 return Data(c);
2371 }
2372
2373 // Interpolate if necessary and find an appropriate function space
2374 Data arg_0_Z, arg_1_Z;
2375 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2376 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2377 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2378 arg_1_Z = Data(arg_1);
2379 }
2380 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2381 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2382 arg_0_Z =Data(arg_0);
2383 }
2384 else {
2385 throw DataException("Error - C_GeneralTensorProduct: arguments have incompatible function spaces.");
2386 }
2387 } else {
2388 arg_0_Z = Data(arg_0);
2389 arg_1_Z = Data(arg_1);
2390 }
2391 // Get rank and shape of inputs
2392 int rank0 = arg_0_Z.getDataPointRank();
2393 int rank1 = arg_1_Z.getDataPointRank();
2394 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
2395 const DataTypes::ShapeType& shape1 = arg_1_Z.getDataPointShape();
2396
2397 // Prepare for the loops of the product and verify compatibility of shapes
2398 int start0=0, start1=0;
2399 if (transpose == 0) {}
2400 else if (transpose == 1) { start0 = axis_offset; }
2401 else if (transpose == 2) { start1 = rank1-axis_offset; }
2402 else { throw DataException("C_GeneralTensorProduct: Error - transpose should be 0, 1 or 2"); }
2403
2404
2405 // Adjust the shapes for transpose
2406 DataTypes::ShapeType tmpShape0(rank0); // pre-sizing the vectors rather
2407 DataTypes::ShapeType tmpShape1(rank1); // than using push_back
2408 for (int i=0; i<rank0; i++) { tmpShape0[i]=shape0[(i+start0)%rank0]; }
2409 for (int i=0; i<rank1; i++) { tmpShape1[i]=shape1[(i+start1)%rank1]; }
2410
2411 #if 0
2412 // For debugging: show shape after transpose
2413 char tmp[100];
2414 std::string shapeStr;
2415 shapeStr = "(";
2416 for (int i=0; i<rank0; i++) { sprintf(tmp, "%d,", tmpShape0[i]); shapeStr += tmp; }
2417 shapeStr += ")";
2418 cout << "C_GeneralTensorProduct: Shape of arg0 is " << shapeStr << endl;
2419 shapeStr = "(";
2420 for (int i=0; i<rank1; i++) { sprintf(tmp, "%d,", tmpShape1[i]); shapeStr += tmp; }
2421 shapeStr += ")";
2422 cout << "C_GeneralTensorProduct: Shape of arg1 is " << shapeStr << endl;
2423 #endif
2424
2425 // Prepare for the loops of the product
2426 int SL=1, SM=1, SR=1;
2427 for (int i=0; i<rank0-axis_offset; i++) {
2428 SL *= tmpShape0[i];
2429 }
2430 for (int i=rank0-axis_offset; i<rank0; i++) {
2431 if (tmpShape0[i] != tmpShape1[i-(rank0-axis_offset)]) {
2432 throw DataException("C_GeneralTensorProduct: Error - incompatible shapes");
2433 }
2434 SM *= tmpShape0[i];
2435 }
2436 for (int i=axis_offset; i<rank1; i++) {
2437 SR *= tmpShape1[i];
2438 }
2439
2440 // Define the shape of the output (rank of shape is the sum of the loop ranges below)
2441 DataTypes::ShapeType shape2(rank0+rank1-2*axis_offset);
2442 { // block to limit the scope of out_index
2443 int out_index=0;
2444 for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z
2445 for (int i=axis_offset; i<rank1; i++, ++out_index) { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z
2446 }
2447
2448 if (shape2.size()>ESCRIPT_MAX_DATA_RANK)
2449 {
2450 ostringstream os;
2451 os << "C_GeneralTensorProduct: Error - Attempt to create a rank " << shape2.size() << " object. The maximum rank is " << ESCRIPT_MAX_DATA_RANK << ".";
2452 throw DataException(os.str());
2453 }
2454
2455 // Declare output Data object
2456 Data res;
2457
2458 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2459 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataConstant output
2460 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2461 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2462 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2463 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2464 }
2465 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2466
2467 // Prepare the DataConstant input
2468 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2469 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2470
2471 // Borrow DataTagged input from Data object
2472 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2473 if (tmp_1==0) { throw DataException("GTP_1 Programming error - casting to DataTagged."); }
2474
2475 // Prepare a DataTagged output 2
2476 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataTagged output
2477 res.tag();
2478 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2479 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2480
2481 // Prepare offset into DataConstant
2482 int offset_0 = tmp_0->getPointOffset(0,0);
2483 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2484
2485 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2486 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2487
2488 // Compute an MVP for the default
2489 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2490 // Compute an MVP for each tag
2491 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2492 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2493 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2494 tmp_2->addTag(i->first);
2495
2496 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2497 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2498
2499 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2500 }
2501
2502 }
2503 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2504
2505 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2506 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2507 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2508 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2509 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2510 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2511 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2512 int sampleNo_1,dataPointNo_1;
2513 int numSamples_1 = arg_1_Z.getNumSamples();
2514 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2515 int offset_0 = tmp_0->getPointOffset(0,0);
2516 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2517 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2518 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2519 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2520 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2521 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2522 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2523 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2524 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2525 }
2526 }
2527
2528 }
2529 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2530
2531 // Borrow DataTagged input from Data object
2532 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2533 if (tmp_0==0) { throw DataException("GTP_0 Programming error - casting to DataTagged."); }
2534
2535 // Prepare the DataConstant input
2536 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2537 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2538
2539 // Prepare a DataTagged output 2
2540 res = Data(0.0, shape2, arg_0_Z.getFunctionSpace()); // DataTagged output
2541 res.tag();
2542 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2543 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2544
2545 // Prepare offset into DataConstant
2546 int offset_1 = tmp_1->getPointOffset(0,0);
2547 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2548 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2549 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2550
2551 // Compute an MVP for the default
2552 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2553 // Compute an MVP for each tag
2554 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2555 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2556 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2557
2558 tmp_2->addTag(i->first);
2559 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2560 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2561 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2562 }
2563
2564 }
2565 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2566
2567 // Borrow DataTagged input from Data object
2568 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2569 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2570
2571 // Borrow DataTagged input from Data object
2572 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2573 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2574
2575 // Prepare a DataTagged output 2
2576 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace());
2577 res.tag(); // DataTagged output
2578 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2579 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2580
2581 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2582 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2583 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2584
2585 // Compute an MVP for the default
2586 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2587 // Merge the tags
2588 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2589 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2590 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2591 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2592 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2593 }
2594 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2595 tmp_2->addTag(i->first);
2596 }
2597 // Compute an MVP for each tag
2598 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2599 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2600 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2601 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2602 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2603
2604 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2605 }
2606
2607 }
2608 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2609
2610 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2611 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2612 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2613 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2614 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2615 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2616 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2617 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2618 int sampleNo_0,dataPointNo_0;
2619 int numSamples_0 = arg_0_Z.getNumSamples();
2620 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2621 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2622 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2623 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2624 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2625 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2626 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2627 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2628 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2629 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2630 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2631 }
2632 }
2633
2634 }
2635 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2636
2637 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2638 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2639 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2640 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2641 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2642 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2643 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2644 int sampleNo_0,dataPointNo_0;
2645 int numSamples_0 = arg_0_Z.getNumSamples();
2646 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2647 int offset_1 = tmp_1->getPointOffset(0,0);
2648 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2649 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2650 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2651 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2652 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2653 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2654 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2655 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2656 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2657 }
2658 }
2659
2660
2661 }
2662 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2663
2664 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2665 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2666 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2667 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2668 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2669 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2670 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2671 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2672 int sampleNo_0,dataPointNo_0;
2673 int numSamples_0 = arg_0_Z.getNumSamples();
2674 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2675 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2676 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2677 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2678 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2679 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2680 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2681 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2682 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2683 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2684 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2685 }
2686 }
2687
2688 }
2689 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2690
2691 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2692 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2693 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2694 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2695 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2696 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2697 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2698 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2699 int sampleNo_0,dataPointNo_0;
2700 int numSamples_0 = arg_0_Z.getNumSamples();
2701 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2702 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2703 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2704 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2705 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2706 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2707 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2708 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2709 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2710 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2711 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2712 }
2713 }
2714
2715 }
2716 else {
2717 throw DataException("Error - C_GeneralTensorProduct: unknown combination of inputs");
2718 }
2719
2720 return res;
2721 }
2722
2723 DataAbstract*
2724 Data::borrowData() const
2725 {
2726 return m_data.get();
2727 }
2728
2729 // Not all that happy about returning a non-const from a const
2730 DataAbstract_ptr
2731 Data::borrowDataPtr() const
2732 {
2733 return m_data;
2734 }
2735
2736 // Not all that happy about returning a non-const from a const
2737 DataReady_ptr
2738 Data::borrowReadyPtr() const
2739 {
2740 DataReady_ptr dr=dynamic_pointer_cast<DataReady>(m_data);
2741 EsysAssert((dr!=0), "Error - casting to DataReady.");
2742 return dr;
2743 }
2744
2745 std::string
2746 Data::toString() const
2747 {
2748 if (!m_data->isEmpty() &&
2749 !m_data->isLazy() &&
2750 getLength()>escriptParams.getInt("TOO_MANY_LINES"))
2751 {
2752 stringstream temp;
2753 temp << "Summary: inf="<< inf_const() << " sup=" << sup_const() << " data points=" << getNumDataPoints();
2754 return temp.str();
2755 }
2756 return m_data->toString();
2757 }
2758
2759
2760 // This method is not thread-safe
2761 DataTypes::ValueType::reference
2762 Data::getDataAtOffsetRW(DataTypes::ValueType::size_type i)
2763 {
2764 checkExclusiveWrite();
2765 return getReady()->getDataAtOffsetRW(i);
2766 }
2767
2768 // This method is not thread-safe
2769 DataTypes::ValueType::const_reference
2770 Data::getDataAtOffsetRO(DataTypes::ValueType::size_type i)
2771 {
2772 forceResolve();
2773 return getReady()->getDataAtOffsetRO(i);
2774 }
2775
2776
2777 // DataTypes::ValueType::const_reference
2778 // Data::getDataAtOffsetRO(DataTypes::ValueType::size_type i) const
2779 // {
2780 // if (isLazy())
2781 // {
2782 // throw DataException("Programmer error - getDataAtOffsetRO() not permitted on Lazy Data (object is const which prevents resolving).");
2783 // }
2784 // return getReady()->getDataAtOffsetRO(i);
2785 // }
2786
2787
2788 DataTypes::ValueType::const_reference
2789 Data::getDataPointRO(int sampleNo, int dataPointNo)
2790 {
2791 forceResolve();
2792 if (!isReady())
2793 {
2794 throw DataException("Programmer error -getDataPointRO() not permitted on Lazy Data.");
2795 }
2796 else
2797 {
2798 const DataReady* dr=getReady();
2799 return dr->getDataAtOffsetRO(dr->getPointOffset(sampleNo, dataPointNo));
2800 }
2801 }
2802
2803
2804
2805
2806 DataTypes::ValueType::reference
2807 Data::getDataPointRW(int sampleNo, int dataPointNo)
2808 {
2809 checkExclusiveWrite();
2810 DataReady* dr=getReady();
2811 return dr->getDataAtOffsetRW(dr->getPointOffset(sampleNo, dataPointNo));
2812 }
2813
2814 BufferGroup*
2815 Data::allocSampleBuffer() const
2816 {
2817 if (isLazy())
2818 {
2819 #ifdef _OPENMP
2820 int tnum=omp_get_max_threads();
2821 #else
2822 int tnum=1;
2823 #endif
2824 return new BufferGroup(getSampleBufferSize(),tnum);
2825 }
2826 else
2827 {
2828 return NULL;
2829 }
2830 }
2831
2832 void
2833 Data::freeSampleBuffer(BufferGroup* bufferg)
2834 {
2835 if (bufferg!=0)
2836 {
2837 delete bufferg;
2838 }
2839 }
2840
2841
2842 /* Member functions specific to the MPI implementation */
2843
2844 void
2845 Data::print()
2846 {
2847 int i,j;
2848
2849 printf( "Data is %dX%d\n", getNumSamples(), getNumDataPointsPerSample() );
2850 for( i=0; i<getNumSamples(); i++ )
2851 {
2852 printf( "[%6d]", i );
2853 for( j=0; j<getNumDataPointsPerSample(); j++ )
2854 printf( "\t%10.7g", (getSampleDataRW(i))[j] ); // doesn't really need RW access
2855 printf( "\n" );
2856 }
2857 }
2858 void
2859 Data::dump(const std::string fileName) const
2860 {
2861 try
2862 {
2863 if (isLazy())
2864 {
2865 Data temp(*this); // this is to get a non-const object which we can resolve
2866 temp.resolve();
2867 temp.dump(fileName);
2868 }
2869 else
2870 {
2871 return m_data->dump(fileName);
2872 }
2873 }
2874 catch (std::exception& e)
2875 {
2876 cout << e.what() << endl;
2877 }
2878 }
2879
2880 int
2881 Data::get_MPISize() const
2882 {
2883 int size;
2884 #ifdef PASO_MPI
2885 int error;
2886 error = MPI_Comm_size( get_MPIComm(), &size );
2887 #else
2888 size = 1;
2889 #endif
2890 return size;
2891 }
2892
2893 int
2894 Data::get_MPIRank() const
2895 {
2896 int rank;
2897 #ifdef PASO_MPI
2898 int error;
2899 error = MPI_Comm_rank( get_MPIComm(), &rank );
2900 #else
2901 rank = 0;
2902 #endif
2903 return rank;
2904 }
2905
2906 MPI_Comm
2907 Data::get_MPIComm() const
2908 {
2909 #ifdef PASO_MPI
2910 return MPI_COMM_WORLD;
2911 #else
2912 return -1;
2913 #endif
2914 }
2915
2916

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26