/[escript]/branches/numpy/escript/src/Data.cpp
ViewVC logotype

Contents of /branches/numpy/escript/src/Data.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2453 - (show annotations)
Tue Jun 2 22:54:20 2009 UTC (9 years, 9 months ago) by jfenwick
File size: 80688 byte(s)
Removing NONUMARRAY defines

1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2008 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 #include "Data.h"
16
17 #include "DataExpanded.h"
18 #include "DataConstant.h"
19 #include "DataTagged.h"
20 #include "DataEmpty.h"
21 #include "DataLazy.h"
22 #include "FunctionSpaceFactory.h"
23 #include "AbstractContinuousDomain.h"
24 #include "UnaryFuncs.h"
25 #include "FunctionSpaceException.h"
26 #include "EscriptParams.h"
27
28 extern "C" {
29 #include "esysUtils/blocktimer.h"
30 }
31
32 #include <fstream>
33 #include <algorithm>
34 #include <vector>
35 #include <functional>
36 #include <sstream> // so we can throw messages about ranks
37
38 #include <boost/python/dict.hpp>
39 #include <boost/python/extract.hpp>
40 #include <boost/python/long.hpp>
41 #include "WrappedArray.h"
42
43 using namespace std;
44 using namespace boost::python;
45 using namespace boost;
46 using namespace escript;
47
48 // ensure the current object is not a DataLazy
49 // The idea was that we could add an optional warning whenever a resolve is forced
50 // #define forceResolve() if (isLazy()) {#resolve();}
51
52 #define AUTOLAZYON escriptParams.getAUTOLAZY()
53 #define MAKELAZYOP(X) if (isLazy() || (AUTOLAZYON && m_data->isExpanded())) \
54 {\
55 DataLazy* c=new DataLazy(borrowDataPtr(),X);\
56 return Data(c);\
57 }
58 #define MAKELAZYOPOFF(X,Y) if (isLazy() || (AUTOLAZYON && m_data->isExpanded())) \
59 {\
60 DataLazy* c=new DataLazy(borrowDataPtr(),X,Y);\
61 return Data(c);\
62 }
63
64 #define MAKELAZYBINSELF(R,X) if (isLazy() || R.isLazy() || (AUTOLAZYON && (isExpanded() || R.isExpanded()))) \
65 {\
66 DataLazy* c=new DataLazy(m_data,R.borrowDataPtr(),X);\
67 /* m_data=c->getPtr();*/ set_m_data(c->getPtr());\
68 return (*this);\
69 }
70
71 // like the above but returns a new data rather than *this
72 #define MAKELAZYBIN(R,X) if (isLazy() || R.isLazy() || (AUTOLAZYON && (isExpanded() || R.isExpanded()))) \
73 {\
74 DataLazy* c=new DataLazy(m_data,R.borrowDataPtr(),X);\
75 return Data(c);\
76 }
77
78 #define MAKELAZYBIN2(L,R,X) if (L.isLazy() || R.isLazy() || (AUTOLAZYON && (L.isExpanded() || R.isExpanded()))) \
79 {\
80 DataLazy* c=new DataLazy(L.borrowDataPtr(),R.borrowDataPtr(),X);\
81 return Data(c);\
82 }
83
84 // Do not use the following unless you want to make copies on assignment rather than
85 // share data. CopyOnWrite should make this unnescessary.
86 // #define ASSIGNMENT_MEANS_DEEPCOPY
87
88 namespace
89 {
90
91 // This should be safer once the DataC RO changes have been brought in
92 template <class ARR>
93 boost::python::tuple
94 pointToTuple( const DataTypes::ShapeType& shape,ARR v)
95 {
96 using namespace boost::python;
97 using boost::python::list;
98 int rank=shape.size();
99 if (rank==0)
100 {
101 return make_tuple(v[0]);
102 }
103 else if (rank==1)
104 {
105 list l;
106 for (size_t i=0;i<shape[0];++i)
107 {
108 l.append(v[i]);
109 }
110 return tuple(l);
111 }
112 else if (rank==2)
113 {
114 list lj;
115 for (size_t j=0;j<shape[0];++j)
116 {
117 list li;
118 for (size_t i=0;i<shape[1];++i)
119 {
120 li.append(v[DataTypes::getRelIndex(shape,j,i)]);
121 }
122 lj.append(tuple(li));
123 }
124 return tuple(lj);
125 }
126 else if (rank==3)
127 {
128 list lk;
129 for (size_t k=0;k<shape[0];++k)
130 {
131 list lj;
132 for (size_t j=0;j<shape[1];++j)
133 {
134 list li;
135 for (size_t i=0;i<shape[2];++i)
136 {
137 li.append(v[DataTypes::getRelIndex(shape,k,j,i)]);
138 }
139 lj.append(tuple(li));
140 }
141 lk.append(tuple(lj));
142 }
143 return tuple(lk);
144 }
145 else if (rank==4)
146 {
147 list ll;
148 for (size_t l=0;l<shape[0];++l)
149 {
150 list lk;
151 for (size_t k=0;k<shape[1];++k)
152 {
153 list lj;
154 for (size_t j=0;j<shape[2];++j)
155 {
156 list li;
157 for (size_t i=0;i<shape[3];++i)
158 {
159 li.append(v[DataTypes::getRelIndex(shape,l,k,j,i)]);
160 }
161 lj.append(tuple(li));
162 }
163 lk.append(tuple(lj));
164 }
165 ll.append(tuple(lk));
166 }
167 return tuple(ll);
168 }
169 else
170 throw DataException("Unknown rank in pointToTuple.");
171 }
172
173 } // anonymous namespace
174
175 Data::Data()
176 : m_shared(false), m_lazy(false)
177 {
178 //
179 // Default data is type DataEmpty
180 DataAbstract* temp=new DataEmpty();
181 // m_data=temp->getPtr();
182 set_m_data(temp->getPtr());
183 m_protected=false;
184 }
185
186 Data::Data(double value,
187 const tuple& shape,
188 const FunctionSpace& what,
189 bool expanded)
190 : m_shared(false), m_lazy(false)
191 {
192 DataTypes::ShapeType dataPointShape;
193 for (int i = 0; i < shape.attr("__len__")(); ++i) {
194 dataPointShape.push_back(extract<const int>(shape[i]));
195 }
196
197 int len = DataTypes::noValues(dataPointShape);
198 DataVector temp_data(len,value,len);
199 initialise(temp_data, dataPointShape, what, expanded);
200 m_protected=false;
201 }
202
203 Data::Data(double value,
204 const DataTypes::ShapeType& dataPointShape,
205 const FunctionSpace& what,
206 bool expanded)
207 : m_shared(false), m_lazy(false)
208 {
209 int len = DataTypes::noValues(dataPointShape);
210
211 DataVector temp_data(len,value,len);
212 // DataArrayView temp_dataView(temp_data, dataPointShape);
213
214 // initialise(temp_dataView, what, expanded);
215 initialise(temp_data, dataPointShape, what, expanded);
216
217 m_protected=false;
218 }
219
220 Data::Data(const Data& inData)
221 : m_shared(false), m_lazy(false)
222 {
223 // m_data=inData.m_data;
224 set_m_data(inData.m_data);
225 m_protected=inData.isProtected();
226 }
227
228
229 Data::Data(const Data& inData,
230 const DataTypes::RegionType& region)
231 : m_shared(false), m_lazy(false)
232 {
233 DataAbstract_ptr dat=inData.m_data;
234 if (inData.isLazy())
235 {
236 dat=inData.m_data->resolve();
237 }
238 else
239 {
240 dat=inData.m_data;
241 }
242 //
243 // Create Data which is a slice of another Data
244 DataAbstract* tmp = dat->getSlice(region);
245 // m_data=DataAbstract_ptr(tmp);
246 set_m_data(DataAbstract_ptr(tmp));
247 m_protected=false;
248
249 }
250
251 Data::Data(const Data& inData,
252 const FunctionSpace& functionspace)
253 : m_shared(false), m_lazy(false)
254 {
255 if (inData.isEmpty())
256 {
257 throw DataException("Error - will not interpolate for instances of DataEmpty.");
258 }
259 if (inData.getFunctionSpace()==functionspace) {
260 // m_data=inData.m_data;
261 set_m_data(inData.m_data);
262 }
263 else
264 {
265
266 if (inData.isConstant()) { // for a constant function, we just need to use the new function space
267 if (!inData.probeInterpolation(functionspace))
268 { // Even though this is constant, we still need to check whether interpolation is allowed
269 throw FunctionSpaceException("Cannot interpolate across to the domain of the specified FunctionSpace. (DataConstant)");
270 }
271 // if the data is not lazy, this will just be a cast to DataReady
272 DataReady_ptr dr=inData.m_data->resolve();
273 DataConstant* dc=new DataConstant(functionspace,inData.m_data->getShape(),dr->getVectorRO());
274 // m_data=DataAbstract_ptr(dc);
275 set_m_data(DataAbstract_ptr(dc));
276 } else {
277 Data tmp(0,inData.getDataPointShape(),functionspace,true);
278 // Note: Must use a reference or pointer to a derived object
279 // in order to get polymorphic behaviour. Shouldn't really
280 // be able to create an instance of AbstractDomain but that was done
281 // as a boost:python work around which may no longer be required.
282 /*const AbstractDomain& inDataDomain=inData.getDomain();*/
283 const_Domain_ptr inDataDomain=inData.getDomain();
284 if (inDataDomain==functionspace.getDomain()) {
285 inDataDomain->interpolateOnDomain(tmp,inData);
286 } else {
287 inDataDomain->interpolateACross(tmp,inData);
288 }
289 // m_data=tmp.m_data;
290 set_m_data(tmp.m_data);
291 }
292 }
293 m_protected=false;
294 }
295
296 Data::Data(DataAbstract* underlyingdata)
297 : m_shared(false), m_lazy(false)
298 {
299 set_m_data(underlyingdata->getPtr());
300 m_protected=false;
301 }
302
303 Data::Data(DataAbstract_ptr underlyingdata)
304 : m_shared(false), m_lazy(false)
305 {
306 set_m_data(underlyingdata);
307 m_protected=false;
308 }
309
310 Data::Data(const DataTypes::ValueType& value,
311 const DataTypes::ShapeType& shape,
312 const FunctionSpace& what,
313 bool expanded)
314 : m_shared(false), m_lazy(false)
315 {
316 initialise(value,shape,what,expanded);
317 m_protected=false;
318 }
319
320
321 Data::Data(const object& value,
322 const FunctionSpace& what,
323 bool expanded)
324 : m_shared(false), m_lazy(false)
325 {
326 //numeric::array asNumArray(value);
327 WrappedArray w(value);
328 initialise(w,what,expanded);
329 m_protected=false;
330 }
331
332
333 Data::Data(const object& value,
334 const Data& other)
335 : m_shared(false), m_lazy(false)
336 {
337 WrappedArray w(value);
338
339 // extract the shape of the numarray
340 const DataTypes::ShapeType& tempShape=w.getShape();
341 if (w.getRank()==0) {
342
343
344 // get the space for the data vector
345 int len1 = DataTypes::noValues(tempShape);
346 DataVector temp_data(len1, 0.0, len1);
347 temp_data.copyFromArray(w,1);
348
349 int len = DataTypes::noValues(other.getDataPointShape());
350
351 DataVector temp2_data(len, temp_data[0], len);
352 DataConstant* t=new DataConstant(other.getFunctionSpace(),other.getDataPointShape(),temp2_data);
353 // m_data=DataAbstract_ptr(t);
354 set_m_data(DataAbstract_ptr(t));
355
356 } else {
357 //
358 // Create a DataConstant with the same sample shape as other
359 DataConstant* t=new DataConstant(w,other.getFunctionSpace());
360 // m_data=DataAbstract_ptr(t);
361 set_m_data(DataAbstract_ptr(t));
362 }
363 m_protected=false;
364 }
365
366 Data::~Data()
367 {
368 set_m_data(DataAbstract_ptr());
369 }
370
371
372 // only call in thread safe contexts.
373 // This method should be atomic
374 void Data::set_m_data(DataAbstract_ptr p)
375 {
376 if (m_data.get()!=0) // release old ownership
377 {
378 m_data->removeOwner(this);
379 }
380 if (p.get()!=0)
381 {
382 m_data=p;
383 m_data->addOwner(this);
384 m_shared=m_data->isShared();
385 m_lazy=m_data->isLazy();
386 }
387 }
388
389 void Data::initialise(const WrappedArray& value,
390 const FunctionSpace& what,
391 bool expanded)
392 {
393 //
394 // Construct a Data object of the appropriate type.
395 // Construct the object first as there seems to be a bug which causes
396 // undefined behaviour if an exception is thrown during construction
397 // within the shared_ptr constructor.
398 if (expanded) {
399 DataAbstract* temp=new DataExpanded(value, what);
400 // m_data=temp->getPtr();
401 set_m_data(temp->getPtr());
402 } else {
403 DataAbstract* temp=new DataConstant(value, what);
404 // m_data=temp->getPtr();
405 set_m_data(temp->getPtr());
406 }
407 }
408
409
410 void
411 Data::initialise(const DataTypes::ValueType& value,
412 const DataTypes::ShapeType& shape,
413 const FunctionSpace& what,
414 bool expanded)
415 {
416 //
417 // Construct a Data object of the appropriate type.
418 // Construct the object first as there seems to be a bug which causes
419 // undefined behaviour if an exception is thrown during construction
420 // within the shared_ptr constructor.
421 if (expanded) {
422 DataAbstract* temp=new DataExpanded(what, shape, value);
423 // m_data=temp->getPtr();
424 set_m_data(temp->getPtr());
425 } else {
426 DataAbstract* temp=new DataConstant(what, shape, value);
427 // m_data=temp->getPtr();
428 set_m_data(temp->getPtr());
429 }
430 }
431
432
433 escriptDataC
434 Data::getDataC()
435 {
436 escriptDataC temp;
437 temp.m_dataPtr=(void*)this;
438 return temp;
439 }
440
441 escriptDataC
442 Data::getDataC() const
443 {
444 escriptDataC temp;
445 temp.m_dataPtr=(void*)this;
446 return temp;
447 }
448
449 size_t
450 Data::getSampleBufferSize() const
451 {
452 return m_data->getSampleBufferSize();
453 }
454
455
456 const boost::python::tuple
457 Data::getShapeTuple() const
458 {
459 const DataTypes::ShapeType& shape=getDataPointShape();
460 switch(getDataPointRank()) {
461 case 0:
462 return make_tuple();
463 case 1:
464 return make_tuple(long_(shape[0]));
465 case 2:
466 return make_tuple(long_(shape[0]),long_(shape[1]));
467 case 3:
468 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]));
469 case 4:
470 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]),long_(shape[3]));
471 default:
472 throw DataException("Error - illegal Data rank.");
473 }
474 }
475
476
477 // The different name is needed because boost has trouble with overloaded functions.
478 // It can't work out what type the function is based soley on its name.
479 // There are ways to fix this involving creating function pointer variables for each form
480 // but there doesn't seem to be a need given that the methods have the same name from the python point of view
481 Data
482 Data::copySelf()
483 {
484 DataAbstract* temp=m_data->deepCopy();
485 return Data(temp);
486 }
487
488 void
489 Data::copy(const Data& other)
490 {
491 DataAbstract* temp=other.m_data->deepCopy();
492 DataAbstract_ptr p=temp->getPtr();
493 // m_data=p;
494 set_m_data(p);
495 }
496
497
498 Data
499 Data::delay()
500 {
501 DataLazy* dl=new DataLazy(m_data);
502 return Data(dl);
503 }
504
505 void
506 Data::delaySelf()
507 {
508 if (!isLazy())
509 {
510 // m_data=(new DataLazy(m_data))->getPtr();
511 set_m_data((new DataLazy(m_data))->getPtr());
512 }
513 }
514
515
516 // For lazy data, it would seem that DataTagged will need to be treated differently since even after setting all tags
517 // to zero, all the tags from all the DataTags would be in the result.
518 // However since they all have the same value (0) whether they are there or not should not matter.
519 // So I have decided that for all types this method will create a constant 0.
520 // It can be promoted up as required.
521 // A possible efficiency concern might be expanded->constant->expanded which has an extra memory management
522 // but we can deal with that if it arises.
523 //
524 void
525 Data::setToZero()
526 {
527 if (isEmpty())
528 {
529 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
530 }
531 if (isLazy())
532 {
533 DataTypes::ValueType v(getNoValues(),0);
534 DataConstant* dc=new DataConstant(getFunctionSpace(),getDataPointShape(),v);
535 DataLazy* dl=new DataLazy(dc->getPtr());
536 set_m_data(dl->getPtr());
537 }
538 else
539 {
540 exclusiveWrite();
541 m_data->setToZero();
542 }
543 }
544
545
546 void
547 Data::copyWithMask(const Data& other,
548 const Data& mask)
549 {
550 // 1. Interpolate if required so all Datas use the same FS as this
551 // 2. Tag or Expand so that all Data's are the same type
552 // 3. Iterate over the data vectors copying values where mask is >0
553 if (other.isEmpty() || mask.isEmpty())
554 {
555 throw DataException("Error - copyWithMask not permitted using instances of DataEmpty.");
556 }
557 Data other2(other);
558 Data mask2(mask);
559 other2.resolve();
560 mask2.resolve();
561 this->resolve();
562 FunctionSpace myFS=getFunctionSpace();
563 FunctionSpace oFS=other2.getFunctionSpace();
564 FunctionSpace mFS=mask2.getFunctionSpace();
565 if (oFS!=myFS)
566 {
567 if (other2.probeInterpolation(myFS))
568 {
569 other2=other2.interpolate(myFS);
570 }
571 else
572 {
573 throw DataException("Error - copyWithMask: other FunctionSpace is not compatible with this one.");
574 }
575 }
576 if (mFS!=myFS)
577 {
578 if (mask2.probeInterpolation(myFS))
579 {
580 mask2=mask2.interpolate(myFS);
581 }
582 else
583 {
584 throw DataException("Error - copyWithMask: mask FunctionSpace is not compatible with this one.");
585 }
586 }
587 // Ensure that all args have the same type
588 if (this->isExpanded() || mask2.isExpanded() || other2.isExpanded())
589 {
590 this->expand();
591 other2.expand();
592 mask2.expand();
593 }
594 else if (this->isTagged() || mask2.isTagged() || other2.isTagged())
595 {
596 this->tag();
597 other2.tag();
598 mask2.tag();
599 }
600 else if (this->isConstant() && mask2.isConstant() && other2.isConstant())
601 {
602 }
603 else
604 {
605 throw DataException("Error - Unknown DataAbstract passed to copyWithMask.");
606 }
607 unsigned int selfrank=getDataPointRank();
608 unsigned int otherrank=other2.getDataPointRank();
609 unsigned int maskrank=mask2.getDataPointRank();
610 if ((selfrank==0) && (otherrank>0 || maskrank>0))
611 {
612 // to get here we must be copying from a large object into a scalar
613 // I am not allowing this.
614 // If you are calling copyWithMask then you are considering keeping some existing values
615 // and so I'm going to assume that you don't want your data objects getting a new shape.
616 throw DataException("Attempt to copyWithMask into a scalar from an object or mask with rank>0.");
617 }
618 exclusiveWrite();
619 // Now we iterate over the elements
620 DataVector& self=getReady()->getVectorRW();;
621 const DataVector& ovec=other2.getReadyPtr()->getVectorRO();
622 const DataVector& mvec=mask2.getReadyPtr()->getVectorRO();
623
624 if ((selfrank>0) && (otherrank==0) &&(maskrank==0))
625 {
626 // Not allowing this combination.
627 // it is not clear what the rank of the target should be.
628 // Should it be filled with the scalar (rank stays the same);
629 // or should the target object be reshaped to be a scalar as well.
630 throw DataException("Attempt to copyWithMask from scalar mask and data into non-scalar target.");
631 }
632 if ((selfrank>0) && (otherrank>0) &&(maskrank==0))
633 {
634 if (mvec[0]>0) // copy whole object if scalar is >0
635 {
636 copy(other);
637 }
638 return;
639 }
640 if (isTagged()) // so all objects involved will also be tagged
641 {
642 // note the !
643 if (!((getDataPointShape()==mask2.getDataPointShape()) &&
644 ((other2.getDataPointShape()==mask2.getDataPointShape()) || (otherrank==0))))
645 {
646 throw DataException("copyWithMask, shape mismatch.");
647 }
648
649 // We need to consider the possibility that tags are missing or in the wrong order
650 // My guiding assumption here is: All tagged Datas are assumed to have the default value for
651 // all tags which are not explicitly defined
652
653 const DataTagged* mptr=dynamic_cast<const DataTagged*>(mask2.m_data.get());
654 const DataTagged* optr=dynamic_cast<const DataTagged*>(other2.m_data.get());
655 DataTagged* tptr=dynamic_cast<DataTagged*>(m_data.get());
656
657 // first, add any tags missing from other or mask
658 const DataTagged::DataMapType& olookup=optr->getTagLookup();
659 const DataTagged::DataMapType& mlookup=mptr->getTagLookup();
660 const DataTagged::DataMapType& tlookup=tptr->getTagLookup();
661 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
662 for (i=olookup.begin();i!=olookup.end();i++)
663 {
664 tptr->addTag(i->first);
665 }
666 for (i=mlookup.begin();i!=mlookup.end();i++) {
667 tptr->addTag(i->first);
668 }
669 // now we know that *this has all the required tags but they aren't guaranteed to be in
670 // the same order
671
672 // There are two possibilities: 1. all objects have the same rank. 2. other is a scalar
673 if ((selfrank==otherrank) && (otherrank==maskrank))
674 {
675 for (i=tlookup.begin();i!=tlookup.end();i++)
676 {
677 // get the target offset
678 DataTypes::ValueType::size_type toff=tptr->getOffsetForTag(i->first);
679 DataTypes::ValueType::size_type moff=mptr->getOffsetForTag(i->first);
680 DataTypes::ValueType::size_type ooff=optr->getOffsetForTag(i->first);
681 for (int j=0;j<getDataPointSize();++j)
682 {
683 if (mvec[j+moff]>0)
684 {
685 self[j+toff]=ovec[j+ooff];
686 }
687 }
688 }
689 // now for the default value
690 for (int j=0;j<getDataPointSize();++j)
691 {
692 if (mvec[j+mptr->getDefaultOffset()]>0)
693 {
694 self[j+tptr->getDefaultOffset()]=ovec[j+optr->getDefaultOffset()];
695 }
696 }
697 }
698 else // other is a scalar
699 {
700 for (i=tlookup.begin();i!=tlookup.end();i++)
701 {
702 // get the target offset
703 DataTypes::ValueType::size_type toff=tptr->getOffsetForTag(i->first);
704 DataTypes::ValueType::size_type moff=mptr->getOffsetForTag(i->first);
705 DataTypes::ValueType::size_type ooff=optr->getOffsetForTag(i->first);
706 for (int j=0;j<getDataPointSize();++j)
707 {
708 if (mvec[j+moff]>0)
709 {
710 self[j+toff]=ovec[ooff];
711 }
712 }
713 }
714 // now for the default value
715 for (int j=0;j<getDataPointSize();++j)
716 {
717 if (mvec[j+mptr->getDefaultOffset()]>0)
718 {
719 self[j+tptr->getDefaultOffset()]=ovec[0];
720 }
721 }
722 }
723
724 return; // ugly
725 }
726 // mixed scalar and non-scalar operation
727 if ((selfrank>0) && (otherrank==0) && (mask2.getDataPointShape()==getDataPointShape()))
728 {
729 size_t num_points=self.size();
730 // OPENMP 3.0 allows unsigned loop vars.
731 #if defined(_OPENMP) && (_OPENMP < 200805)
732 long i;
733 #else
734 size_t i;
735 #endif
736 size_t psize=getDataPointSize();
737 #pragma omp parallel for private(i) schedule(static)
738 for (i=0;i<num_points;++i)
739 {
740 if (mvec[i]>0)
741 {
742 self[i]=ovec[i/psize]; // since this is expanded there is one scalar
743 } // dest point
744 }
745 return; // ugly!
746 }
747 // tagged data is already taken care of so we only need to worry about shapes
748 // special cases with scalars are already dealt with so all we need to worry about is shape
749 if ((getDataPointShape()!=other2.getDataPointShape()) || getDataPointShape()!=mask2.getDataPointShape())
750 {
751 ostringstream oss;
752 oss <<"Error - size mismatch in arguments to copyWithMask.";
753 oss << "\nself_shape=" << DataTypes::shapeToString(getDataPointShape());
754 oss << " other2_shape=" << DataTypes::shapeToString(other2.getDataPointShape());
755 oss << " mask2_shape=" << DataTypes::shapeToString(mask2.getDataPointShape());
756 throw DataException(oss.str());
757 }
758 size_t num_points=self.size();
759
760 // OPENMP 3.0 allows unsigned loop vars.
761 #if defined(_OPENMP) && (_OPENMP < 200805)
762 long i;
763 #else
764 size_t i;
765 #endif
766 #pragma omp parallel for private(i) schedule(static)
767 for (i=0;i<num_points;++i)
768 {
769 if (mvec[i]>0)
770 {
771 self[i]=ovec[i];
772 }
773 }
774 }
775
776 bool
777 Data::isExpanded() const
778 {
779 DataExpanded* temp=dynamic_cast<DataExpanded*>(m_data.get());
780 return (temp!=0);
781 }
782
783 bool
784 Data::actsExpanded() const
785 {
786 return m_data->actsExpanded();
787
788 }
789
790
791 bool
792 Data::isTagged() const
793 {
794 DataTagged* temp=dynamic_cast<DataTagged*>(m_data.get());
795 return (temp!=0);
796 }
797
798 bool
799 Data::isEmpty() const
800 {
801 DataEmpty* temp=dynamic_cast<DataEmpty*>(m_data.get());
802 return (temp!=0);
803 }
804
805 bool
806 Data::isConstant() const
807 {
808 DataConstant* temp=dynamic_cast<DataConstant*>(m_data.get());
809 return (temp!=0);
810 }
811
812 bool
813 Data::isLazy() const
814 {
815 return m_lazy; // not asking m_data because we need to be able to ask this while m_data is changing
816 }
817
818 // at the moment this is synonymous with !isLazy() but that could change
819 bool
820 Data::isReady() const
821 {
822 return (dynamic_cast<DataReady*>(m_data.get())!=0);
823 }
824
825
826 void
827 Data::setProtection()
828 {
829 m_protected=true;
830 }
831
832 bool
833 Data::isProtected() const
834 {
835 return m_protected;
836 }
837
838
839
840 void
841 Data::expand()
842 {
843 if (isConstant()) {
844 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
845 DataAbstract* temp=new DataExpanded(*tempDataConst);
846 // m_data=temp->getPtr();
847 set_m_data(temp->getPtr());
848 } else if (isTagged()) {
849 DataTagged* tempDataTag=dynamic_cast<DataTagged*>(m_data.get());
850 DataAbstract* temp=new DataExpanded(*tempDataTag);
851 // m_data=temp->getPtr();
852 set_m_data(temp->getPtr());
853 } else if (isExpanded()) {
854 //
855 // do nothing
856 } else if (isEmpty()) {
857 throw DataException("Error - Expansion of DataEmpty not possible.");
858 } else if (isLazy()) {
859 resolve();
860 expand(); // resolve might not give us expanded data
861 } else {
862 throw DataException("Error - Expansion not implemented for this Data type.");
863 }
864 }
865
866 void
867 Data::tag()
868 {
869 if (isConstant()) {
870 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
871 DataAbstract* temp=new DataTagged(*tempDataConst);
872 // m_data=temp->getPtr();
873 set_m_data(temp->getPtr());
874 } else if (isTagged()) {
875 // do nothing
876 } else if (isExpanded()) {
877 throw DataException("Error - Creating tag data from DataExpanded not possible.");
878 } else if (isEmpty()) {
879 throw DataException("Error - Creating tag data from DataEmpty not possible.");
880 } else if (isLazy()) {
881 DataAbstract_ptr res=m_data->resolve();
882 if (m_data->isExpanded())
883 {
884 throw DataException("Error - data would resolve to DataExpanded, tagging is not possible.");
885 }
886 // m_data=res;
887 set_m_data(res);
888 tag();
889 } else {
890 throw DataException("Error - Tagging not implemented for this Data type.");
891 }
892 }
893
894 void
895 Data::resolve()
896 {
897 if (isLazy())
898 {
899 // m_data=m_data->resolve();
900 set_m_data(m_data->resolve());
901 }
902 }
903
904 void
905 Data::requireWrite()
906 {
907 resolve();
908 exclusiveWrite();
909 }
910
911 Data
912 Data::oneOver() const
913 {
914 MAKELAZYOP(RECIP)
915 return C_TensorUnaryOperation(*this, bind1st(divides<double>(),1.));
916 }
917
918 Data
919 Data::wherePositive() const
920 {
921 MAKELAZYOP(GZ)
922 return C_TensorUnaryOperation(*this, bind2nd(greater<double>(),0.0));
923 }
924
925 Data
926 Data::whereNegative() const
927 {
928 MAKELAZYOP(LZ)
929 return C_TensorUnaryOperation(*this, bind2nd(less<double>(),0.0));
930 }
931
932 Data
933 Data::whereNonNegative() const
934 {
935 MAKELAZYOP(GEZ)
936 return C_TensorUnaryOperation(*this, bind2nd(greater_equal<double>(),0.0));
937 }
938
939 Data
940 Data::whereNonPositive() const
941 {
942 MAKELAZYOP(LEZ)
943 return C_TensorUnaryOperation(*this, bind2nd(less_equal<double>(),0.0));
944 }
945
946 Data
947 Data::whereZero(double tol) const
948 {
949 // Data dataAbs=abs();
950 // return C_TensorUnaryOperation(dataAbs, bind2nd(less_equal<double>(),tol));
951 MAKELAZYOPOFF(EZ,tol)
952 return C_TensorUnaryOperation(*this, bind2nd(AbsLTE(),tol));
953
954 }
955
956 Data
957 Data::whereNonZero(double tol) const
958 {
959 // Data dataAbs=abs();
960 // return C_TensorUnaryOperation(dataAbs, bind2nd(greater<double>(),tol));
961 MAKELAZYOPOFF(NEZ,tol)
962 return C_TensorUnaryOperation(*this, bind2nd(AbsGT(),tol));
963
964 }
965
966 Data
967 Data::interpolate(const FunctionSpace& functionspace) const
968 {
969 return Data(*this,functionspace);
970 }
971
972 bool
973 Data::probeInterpolation(const FunctionSpace& functionspace) const
974 {
975 return getFunctionSpace().probeInterpolation(functionspace);
976 }
977
978 Data
979 Data::gradOn(const FunctionSpace& functionspace) const
980 {
981 if (isEmpty())
982 {
983 throw DataException("Error - operation not permitted on instances of DataEmpty.");
984 }
985 double blocktimer_start = blocktimer_time();
986 if (functionspace.getDomain()!=getDomain())
987 throw DataException("Error - gradient cannot be calculated on different domains.");
988 DataTypes::ShapeType grad_shape=getDataPointShape();
989 grad_shape.push_back(functionspace.getDim());
990 Data out(0.0,grad_shape,functionspace,true);
991 getDomain()->setToGradient(out,*this);
992 blocktimer_increment("grad()", blocktimer_start);
993 return out;
994 }
995
996 Data
997 Data::grad() const
998 {
999 if (isEmpty())
1000 {
1001 throw DataException("Error - operation not permitted on instances of DataEmpty.");
1002 }
1003 return gradOn(escript::function(*getDomain()));
1004 }
1005
1006 int
1007 Data::getDataPointSize() const
1008 {
1009 return m_data->getNoValues();
1010 }
1011
1012 DataTypes::ValueType::size_type
1013 Data::getLength() const
1014 {
1015 return m_data->getLength();
1016 }
1017
1018 const boost::python::object
1019 Data::getValueOfDataPointAsTuple(int dataPointNo)
1020 {
1021 forceResolve();
1022 if (getNumDataPointsPerSample()>0) {
1023 int sampleNo = dataPointNo/getNumDataPointsPerSample();
1024 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1025 //
1026 // Check a valid sample number has been supplied
1027 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
1028 throw DataException("Error - Data::getValueOfDataPointAsTuple: invalid sampleNo.");
1029 }
1030
1031 //
1032 // Check a valid data point number has been supplied
1033 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
1034 throw DataException("Error - Data::getValueOfDataPointAsTuple: invalid dataPointNoInSample.");
1035 }
1036 // TODO: global error handling
1037 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
1038 return pointToTuple(getDataPointShape(),&(getDataAtOffsetRO(offset)));
1039 }
1040 else
1041 {
1042 // The old method would return an empty numArray of the given shape
1043 // I'm going to throw an exception because if we have zero points per sample we have problems
1044 throw DataException("Error - need at least 1 datapoint per sample.");
1045 }
1046
1047 }
1048
1049
1050 void
1051 Data::setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object)
1052 {
1053 // this will throw if the value cannot be represented
1054 setValueOfDataPointToArray(dataPointNo,py_object);
1055 }
1056
1057 void
1058 Data::setValueOfDataPointToArray(int dataPointNo, const boost::python::object& obj)
1059 {
1060 if (isProtected()) {
1061 throw DataException("Error - attempt to update protected Data object.");
1062 }
1063 forceResolve();
1064
1065 WrappedArray w(obj);
1066 //
1067 // check rank
1068 if (static_cast<unsigned int>(w.getRank())<getDataPointRank())
1069 throw DataException("Rank of numarray does not match Data object rank");
1070
1071 //
1072 // check shape of num_array
1073 for (unsigned int i=0; i<getDataPointRank(); i++) {
1074 if (w.getShape()[i]!=getDataPointShape()[i])
1075 throw DataException("Shape of numarray does not match Data object rank");
1076 }
1077 //
1078 // make sure data is expanded:
1079 //
1080 if (!isExpanded()) {
1081 expand();
1082 }
1083 if (getNumDataPointsPerSample()>0) {
1084 int sampleNo = dataPointNo/getNumDataPointsPerSample();
1085 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1086 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,w);
1087 } else {
1088 m_data->copyToDataPoint(-1, 0,w);
1089 }
1090 }
1091
1092 void
1093 Data::setValueOfDataPoint(int dataPointNo, const double value)
1094 {
1095 if (isProtected()) {
1096 throw DataException("Error - attempt to update protected Data object.");
1097 }
1098 //
1099 // make sure data is expanded:
1100 forceResolve();
1101 if (!isExpanded()) {
1102 expand();
1103 }
1104 if (getNumDataPointsPerSample()>0) {
1105 int sampleNo = dataPointNo/getNumDataPointsPerSample();
1106 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1107 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,value);
1108 } else {
1109 m_data->copyToDataPoint(-1, 0,value);
1110 }
1111 }
1112
1113 const
1114 boost::python::object
1115 Data::getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo)
1116 {
1117 // This could be lazier than it is now
1118 forceResolve();
1119
1120 // copy datapoint into a buffer
1121 // broadcast buffer to all nodes
1122 // convert buffer to tuple
1123 // return tuple
1124
1125 const DataTypes::ShapeType& dataPointShape = getDataPointShape();
1126 size_t length=DataTypes::noValues(dataPointShape);
1127
1128 // added for the MPI communication
1129 double *tmpData = new double[length];
1130
1131 // updated for the MPI case
1132 if( get_MPIRank()==procNo ){
1133 if (getNumDataPointsPerSample()>0) {
1134 int sampleNo = dataPointNo/getNumDataPointsPerSample();
1135 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1136 //
1137 // Check a valid sample number has been supplied
1138 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
1139 throw DataException("Error - Data::convertToNumArray: invalid sampleNo.");
1140 }
1141
1142 //
1143 // Check a valid data point number has been supplied
1144 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
1145 throw DataException("Error - Data::convertToNumArray: invalid dataPointNoInSample.");
1146 }
1147 // TODO: global error handling
1148 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
1149
1150 memcpy(tmpData,&(getDataAtOffsetRO(offset)),length*sizeof(double));
1151 }
1152 }
1153 #ifdef PASO_MPI
1154 // broadcast the data to all other processes
1155 MPI_Bcast( tmpData, length, MPI_DOUBLE, procNo, get_MPIComm() );
1156 #endif
1157
1158 boost::python::tuple t=pointToTuple(dataPointShape,tmpData);
1159 delete [] tmpData;
1160 //
1161 // return the loaded array
1162 return t;
1163
1164 }
1165
1166
1167 boost::python::object
1168 Data::integrateToTuple_const() const
1169 {
1170 if (isLazy())
1171 {
1172 throw DataException("Error - cannot integrate for constant lazy data.");
1173 }
1174 return integrateWorker();
1175 }
1176
1177 boost::python::object
1178 Data::integrateToTuple()
1179 {
1180 if (isLazy())
1181 {
1182 expand();
1183 }
1184 return integrateWorker();
1185
1186 }
1187
1188 boost::python::object
1189 Data::integrateWorker() const
1190 {
1191 DataTypes::ShapeType shape = getDataPointShape();
1192 int dataPointSize = getDataPointSize();
1193
1194 //
1195 // calculate the integral values
1196 vector<double> integrals(dataPointSize);
1197 vector<double> integrals_local(dataPointSize);
1198 const AbstractContinuousDomain* dom=dynamic_cast<const AbstractContinuousDomain*>(getDomain().get());
1199 if (dom==0)
1200 {
1201 throw DataException("Can not integrate over non-continuous domains.");
1202 }
1203 #ifdef PASO_MPI
1204 dom->setToIntegrals(integrals_local,*this);
1205 // Global sum: use an array instead of a vector because elements of array are guaranteed to be contiguous in memory
1206 double *tmp = new double[dataPointSize];
1207 double *tmp_local = new double[dataPointSize];
1208 for (int i=0; i<dataPointSize; i++) { tmp_local[i] = integrals_local[i]; }
1209 MPI_Allreduce( &tmp_local[0], &tmp[0], dataPointSize, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD );
1210 for (int i=0; i<dataPointSize; i++) { integrals[i] = tmp[i]; }
1211 tuple result=pointToTuple(shape,tmp);
1212 delete[] tmp;
1213 delete[] tmp_local;
1214 #else
1215 dom->setToIntegrals(integrals,*this);
1216 /* double *tmp = new double[dataPointSize];
1217 for (int i=0; i<dataPointSize; i++) { tmp[i]=integrals[i]; }*/
1218 tuple result=pointToTuple(shape,integrals);
1219 // delete tmp;
1220 #endif
1221
1222
1223 return result;
1224 }
1225
1226 Data
1227 Data::sin() const
1228 {
1229 MAKELAZYOP(SIN)
1230 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sin);
1231 }
1232
1233 Data
1234 Data::cos() const
1235 {
1236 MAKELAZYOP(COS)
1237 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cos);
1238 }
1239
1240 Data
1241 Data::tan() const
1242 {
1243 MAKELAZYOP(TAN)
1244 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tan);
1245 }
1246
1247 Data
1248 Data::asin() const
1249 {
1250 MAKELAZYOP(ASIN)
1251 return C_TensorUnaryOperation<double (*)(double)>(*this, ::asin);
1252 }
1253
1254 Data
1255 Data::acos() const
1256 {
1257 MAKELAZYOP(ACOS)
1258 return C_TensorUnaryOperation<double (*)(double)>(*this, ::acos);
1259 }
1260
1261
1262 Data
1263 Data::atan() const
1264 {
1265 MAKELAZYOP(ATAN)
1266 return C_TensorUnaryOperation<double (*)(double)>(*this, ::atan);
1267 }
1268
1269 Data
1270 Data::sinh() const
1271 {
1272 MAKELAZYOP(SINH)
1273 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sinh);
1274 }
1275
1276 Data
1277 Data::cosh() const
1278 {
1279 MAKELAZYOP(COSH)
1280 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cosh);
1281 }
1282
1283 Data
1284 Data::tanh() const
1285 {
1286 MAKELAZYOP(TANH)
1287 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tanh);
1288 }
1289
1290
1291 Data
1292 Data::erf() const
1293 {
1294 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1295 throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");
1296 #else
1297 MAKELAZYOP(ERF)
1298 return C_TensorUnaryOperation(*this, ::erf);
1299 #endif
1300 }
1301
1302 Data
1303 Data::asinh() const
1304 {
1305 MAKELAZYOP(ASINH)
1306 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1307 return C_TensorUnaryOperation(*this, escript::asinh_substitute);
1308 #else
1309 return C_TensorUnaryOperation(*this, ::asinh);
1310 #endif
1311 }
1312
1313 Data
1314 Data::acosh() const
1315 {
1316 MAKELAZYOP(ACOSH)
1317 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1318 return C_TensorUnaryOperation(*this, escript::acosh_substitute);
1319 #else
1320 return C_TensorUnaryOperation(*this, ::acosh);
1321 #endif
1322 }
1323
1324 Data
1325 Data::atanh() const
1326 {
1327 MAKELAZYOP(ATANH)
1328 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1329 return C_TensorUnaryOperation(*this, escript::atanh_substitute);
1330 #else
1331 return C_TensorUnaryOperation(*this, ::atanh);
1332 #endif
1333 }
1334
1335 Data
1336 Data::log10() const
1337 {
1338 MAKELAZYOP(LOG10)
1339 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log10);
1340 }
1341
1342 Data
1343 Data::log() const
1344 {
1345 MAKELAZYOP(LOG)
1346 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log);
1347 }
1348
1349 Data
1350 Data::sign() const
1351 {
1352 MAKELAZYOP(SIGN)
1353 return C_TensorUnaryOperation(*this, escript::fsign);
1354 }
1355
1356 Data
1357 Data::abs() const
1358 {
1359 MAKELAZYOP(ABS)
1360 return C_TensorUnaryOperation<double (*)(double)>(*this, ::fabs);
1361 }
1362
1363 Data
1364 Data::neg() const
1365 {
1366 MAKELAZYOP(NEG)
1367 return C_TensorUnaryOperation(*this, negate<double>());
1368 }
1369
1370 Data
1371 Data::pos() const
1372 {
1373 // not doing lazy check here is deliberate.
1374 // since a deep copy of lazy data should be cheap, I'll just let it happen now
1375 Data result;
1376 // perform a deep copy
1377 result.copy(*this);
1378 return result;
1379 }
1380
1381 Data
1382 Data::exp() const
1383 {
1384 MAKELAZYOP(EXP)
1385 return C_TensorUnaryOperation<double (*)(double)>(*this, ::exp);
1386 }
1387
1388 Data
1389 Data::sqrt() const
1390 {
1391 MAKELAZYOP(SQRT)
1392 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sqrt);
1393 }
1394
1395 double
1396 Data::Lsup_const() const
1397 {
1398 if (isLazy())
1399 {
1400 throw DataException("Error - cannot compute Lsup for constant lazy data.");
1401 }
1402 return LsupWorker();
1403 }
1404
1405 double
1406 Data::Lsup()
1407 {
1408 if (isLazy())
1409 {
1410 resolve();
1411 }
1412 return LsupWorker();
1413 }
1414
1415 double
1416 Data::sup_const() const
1417 {
1418 if (isLazy())
1419 {
1420 throw DataException("Error - cannot compute sup for constant lazy data.");
1421 }
1422 return supWorker();
1423 }
1424
1425 double
1426 Data::sup()
1427 {
1428 if (isLazy())
1429 {
1430 resolve();
1431 }
1432 return supWorker();
1433 }
1434
1435 double
1436 Data::inf_const() const
1437 {
1438 if (isLazy())
1439 {
1440 throw DataException("Error - cannot compute inf for constant lazy data.");
1441 }
1442 return infWorker();
1443 }
1444
1445 double
1446 Data::inf()
1447 {
1448 if (isLazy())
1449 {
1450 resolve();
1451 }
1452 return infWorker();
1453 }
1454
1455 double
1456 Data::LsupWorker() const
1457 {
1458 double localValue;
1459 //
1460 // set the initial absolute maximum value to zero
1461
1462 AbsMax abs_max_func;
1463 localValue = algorithm(abs_max_func,0);
1464 #ifdef PASO_MPI
1465 double globalValue;
1466 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1467 return globalValue;
1468 #else
1469 return localValue;
1470 #endif
1471 }
1472
1473 double
1474 Data::supWorker() const
1475 {
1476 double localValue;
1477 //
1478 // set the initial maximum value to min possible double
1479 FMax fmax_func;
1480 localValue = algorithm(fmax_func,numeric_limits<double>::max()*-1);
1481 #ifdef PASO_MPI
1482 double globalValue;
1483 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1484 return globalValue;
1485 #else
1486 return localValue;
1487 #endif
1488 }
1489
1490 double
1491 Data::infWorker() const
1492 {
1493 double localValue;
1494 //
1495 // set the initial minimum value to max possible double
1496 FMin fmin_func;
1497 localValue = algorithm(fmin_func,numeric_limits<double>::max());
1498 #ifdef PASO_MPI
1499 double globalValue;
1500 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD );
1501 return globalValue;
1502 #else
1503 return localValue;
1504 #endif
1505 }
1506
1507 /* TODO */
1508 /* global reduction */
1509 Data
1510 Data::maxval() const
1511 {
1512 if (isLazy())
1513 {
1514 Data temp(*this); // to get around the fact that you can't resolve a const Data
1515 temp.resolve();
1516 return temp.maxval();
1517 }
1518 //
1519 // set the initial maximum value to min possible double
1520 FMax fmax_func;
1521 return dp_algorithm(fmax_func,numeric_limits<double>::max()*-1);
1522 }
1523
1524 Data
1525 Data::minval() const
1526 {
1527 if (isLazy())
1528 {
1529 Data temp(*this); // to get around the fact that you can't resolve a const Data
1530 temp.resolve();
1531 return temp.minval();
1532 }
1533 //
1534 // set the initial minimum value to max possible double
1535 FMin fmin_func;
1536 return dp_algorithm(fmin_func,numeric_limits<double>::max());
1537 }
1538
1539 Data
1540 Data::swapaxes(const int axis0, const int axis1) const
1541 {
1542 if (isLazy())
1543 {
1544 Data temp(*this);
1545 temp.resolve();
1546 return temp.swapaxes(axis0,axis1);
1547 }
1548 int axis0_tmp,axis1_tmp;
1549 DataTypes::ShapeType s=getDataPointShape();
1550 DataTypes::ShapeType ev_shape;
1551 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1552 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1553 int rank=getDataPointRank();
1554 if (rank<2) {
1555 throw DataException("Error - Data::swapaxes argument must have at least rank 2.");
1556 }
1557 if (axis0<0 || axis0>rank-1) {
1558 throw DataException("Error - Data::swapaxes: axis0 must be between 0 and rank-1=" + rank-1);
1559 }
1560 if (axis1<0 || axis1>rank-1) {
1561 throw DataException("Error - Data::swapaxes: axis1 must be between 0 and rank-1=" + rank-1);
1562 }
1563 if (axis0 == axis1) {
1564 throw DataException("Error - Data::swapaxes: axis indices must be different.");
1565 }
1566 if (axis0 > axis1) {
1567 axis0_tmp=axis1;
1568 axis1_tmp=axis0;
1569 } else {
1570 axis0_tmp=axis0;
1571 axis1_tmp=axis1;
1572 }
1573 for (int i=0; i<rank; i++) {
1574 if (i == axis0_tmp) {
1575 ev_shape.push_back(s[axis1_tmp]);
1576 } else if (i == axis1_tmp) {
1577 ev_shape.push_back(s[axis0_tmp]);
1578 } else {
1579 ev_shape.push_back(s[i]);
1580 }
1581 }
1582 Data ev(0.,ev_shape,getFunctionSpace());
1583 ev.typeMatchRight(*this);
1584 m_data->swapaxes(ev.m_data.get(), axis0_tmp, axis1_tmp);
1585 return ev;
1586
1587 }
1588
1589 Data
1590 Data::symmetric() const
1591 {
1592 // check input
1593 DataTypes::ShapeType s=getDataPointShape();
1594 if (getDataPointRank()==2) {
1595 if(s[0] != s[1])
1596 throw DataException("Error - Data::symmetric can only be calculated for rank 2 object with equal first and second dimension.");
1597 }
1598 else if (getDataPointRank()==4) {
1599 if(!(s[0] == s[2] && s[1] == s[3]))
1600 throw DataException("Error - Data::symmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1601 }
1602 else {
1603 throw DataException("Error - Data::symmetric can only be calculated for rank 2 or 4 object.");
1604 }
1605 MAKELAZYOP(SYM)
1606 Data ev(0.,getDataPointShape(),getFunctionSpace());
1607 ev.typeMatchRight(*this);
1608 m_data->symmetric(ev.m_data.get());
1609 return ev;
1610 }
1611
1612 Data
1613 Data::nonsymmetric() const
1614 {
1615 MAKELAZYOP(NSYM)
1616 // check input
1617 DataTypes::ShapeType s=getDataPointShape();
1618 if (getDataPointRank()==2) {
1619 if(s[0] != s[1])
1620 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 object with equal first and second dimension.");
1621 DataTypes::ShapeType ev_shape;
1622 ev_shape.push_back(s[0]);
1623 ev_shape.push_back(s[1]);
1624 Data ev(0.,ev_shape,getFunctionSpace());
1625 ev.typeMatchRight(*this);
1626 m_data->nonsymmetric(ev.m_data.get());
1627 return ev;
1628 }
1629 else if (getDataPointRank()==4) {
1630 if(!(s[0] == s[2] && s[1] == s[3]))
1631 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1632 DataTypes::ShapeType ev_shape;
1633 ev_shape.push_back(s[0]);
1634 ev_shape.push_back(s[1]);
1635 ev_shape.push_back(s[2]);
1636 ev_shape.push_back(s[3]);
1637 Data ev(0.,ev_shape,getFunctionSpace());
1638 ev.typeMatchRight(*this);
1639 m_data->nonsymmetric(ev.m_data.get());
1640 return ev;
1641 }
1642 else {
1643 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 or 4 object.");
1644 }
1645 }
1646
1647 Data
1648 Data::trace(int axis_offset) const
1649 {
1650 MAKELAZYOPOFF(TRACE,axis_offset)
1651 if ((axis_offset<0) || (axis_offset>getDataPointRank()))
1652 {
1653 throw DataException("Error - Data::trace, axis_offset must be between 0 and rank-2 inclusive.");
1654 }
1655 DataTypes::ShapeType s=getDataPointShape();
1656 if (getDataPointRank()==2) {
1657 DataTypes::ShapeType ev_shape;
1658 Data ev(0.,ev_shape,getFunctionSpace());
1659 ev.typeMatchRight(*this);
1660 m_data->trace(ev.m_data.get(), axis_offset);
1661 return ev;
1662 }
1663 if (getDataPointRank()==3) {
1664 DataTypes::ShapeType ev_shape;
1665 if (axis_offset==0) {
1666 int s2=s[2];
1667 ev_shape.push_back(s2);
1668 }
1669 else if (axis_offset==1) {
1670 int s0=s[0];
1671 ev_shape.push_back(s0);
1672 }
1673 Data ev(0.,ev_shape,getFunctionSpace());
1674 ev.typeMatchRight(*this);
1675 m_data->trace(ev.m_data.get(), axis_offset);
1676 return ev;
1677 }
1678 if (getDataPointRank()==4) {
1679 DataTypes::ShapeType ev_shape;
1680 if (axis_offset==0) {
1681 ev_shape.push_back(s[2]);
1682 ev_shape.push_back(s[3]);
1683 }
1684 else if (axis_offset==1) {
1685 ev_shape.push_back(s[0]);
1686 ev_shape.push_back(s[3]);
1687 }
1688 else if (axis_offset==2) {
1689 ev_shape.push_back(s[0]);
1690 ev_shape.push_back(s[1]);
1691 }
1692 Data ev(0.,ev_shape,getFunctionSpace());
1693 ev.typeMatchRight(*this);
1694 m_data->trace(ev.m_data.get(), axis_offset);
1695 return ev;
1696 }
1697 else {
1698 throw DataException("Error - Data::trace can only be calculated for rank 2, 3 or 4 object.");
1699 }
1700 }
1701
1702 Data
1703 Data::transpose(int axis_offset) const
1704 {
1705 MAKELAZYOPOFF(TRANS,axis_offset)
1706 DataTypes::ShapeType s=getDataPointShape();
1707 DataTypes::ShapeType ev_shape;
1708 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1709 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1710 int rank=getDataPointRank();
1711 if (axis_offset<0 || axis_offset>rank) {
1712 throw DataException("Error - Data::transpose must have 0 <= axis_offset <= rank=" + rank);
1713 }
1714 for (int i=0; i<rank; i++) {
1715 int index = (axis_offset+i)%rank;
1716 ev_shape.push_back(s[index]); // Append to new shape
1717 }
1718 Data ev(0.,ev_shape,getFunctionSpace());
1719 ev.typeMatchRight(*this);
1720 m_data->transpose(ev.m_data.get(), axis_offset);
1721 return ev;
1722 }
1723
1724 Data
1725 Data::eigenvalues() const
1726 {
1727 if (isLazy())
1728 {
1729 Data temp(*this); // to get around the fact that you can't resolve a const Data
1730 temp.resolve();
1731 return temp.eigenvalues();
1732 }
1733 // check input
1734 DataTypes::ShapeType s=getDataPointShape();
1735 if (getDataPointRank()!=2)
1736 throw DataException("Error - Data::eigenvalues can only be calculated for rank 2 object.");
1737 if(s[0] != s[1])
1738 throw DataException("Error - Data::eigenvalues can only be calculated for object with equal first and second dimension.");
1739 // create return
1740 DataTypes::ShapeType ev_shape(1,s[0]);
1741 Data ev(0.,ev_shape,getFunctionSpace());
1742 ev.typeMatchRight(*this);
1743 m_data->eigenvalues(ev.m_data.get());
1744 return ev;
1745 }
1746
1747 const boost::python::tuple
1748 Data::eigenvalues_and_eigenvectors(const double tol) const
1749 {
1750 if (isLazy())
1751 {
1752 Data temp(*this); // to get around the fact that you can't resolve a const Data
1753 temp.resolve();
1754 return temp.eigenvalues_and_eigenvectors(tol);
1755 }
1756 DataTypes::ShapeType s=getDataPointShape();
1757 if (getDataPointRank()!=2)
1758 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for rank 2 object.");
1759 if(s[0] != s[1])
1760 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for object with equal first and second dimension.");
1761 // create return
1762 DataTypes::ShapeType ev_shape(1,s[0]);
1763 Data ev(0.,ev_shape,getFunctionSpace());
1764 ev.typeMatchRight(*this);
1765 DataTypes::ShapeType V_shape(2,s[0]);
1766 Data V(0.,V_shape,getFunctionSpace());
1767 V.typeMatchRight(*this);
1768 m_data->eigenvalues_and_eigenvectors(ev.m_data.get(),V.m_data.get(),tol);
1769 return make_tuple(boost::python::object(ev),boost::python::object(V));
1770 }
1771
1772 const boost::python::tuple
1773 Data::minGlobalDataPoint() const
1774 {
1775 // NB: calc_minGlobalDataPoint( had to be split off from minGlobalDataPoint( as boost::make_tuple causes an
1776 // abort (for unknown reasons) if there are openmp directives with it in the
1777 // surrounding function
1778
1779 int DataPointNo;
1780 int ProcNo;
1781 calc_minGlobalDataPoint(ProcNo,DataPointNo);
1782 return make_tuple(ProcNo,DataPointNo);
1783 }
1784
1785 void
1786 Data::calc_minGlobalDataPoint(int& ProcNo,
1787 int& DataPointNo) const
1788 {
1789 if (isLazy())
1790 {
1791 Data temp(*this); // to get around the fact that you can't resolve a const Data
1792 temp.resolve();
1793 return temp.calc_minGlobalDataPoint(ProcNo,DataPointNo);
1794 }
1795 int i,j;
1796 int lowi=0,lowj=0;
1797 double min=numeric_limits<double>::max();
1798
1799 Data temp=minval();
1800
1801 int numSamples=temp.getNumSamples();
1802 int numDPPSample=temp.getNumDataPointsPerSample();
1803
1804 double next,local_min;
1805 int local_lowi=0,local_lowj=0;
1806
1807 #pragma omp parallel firstprivate(local_lowi,local_lowj) private(next,local_min)
1808 {
1809 local_min=min;
1810 #pragma omp for private(i,j) schedule(static)
1811 for (i=0; i<numSamples; i++) {
1812 for (j=0; j<numDPPSample; j++) {
1813 next=temp.getDataAtOffsetRO(temp.getDataOffset(i,j));
1814 if (next<local_min) {
1815 local_min=next;
1816 local_lowi=i;
1817 local_lowj=j;
1818 }
1819 }
1820 }
1821 #pragma omp critical
1822 if (local_min<min) {
1823 min=local_min;
1824 lowi=local_lowi;
1825 lowj=local_lowj;
1826 }
1827 }
1828
1829 #ifdef PASO_MPI
1830 // determine the processor on which the minimum occurs
1831 next = temp.getDataPointRO(lowi,lowj);
1832 int lowProc = 0;
1833 double *globalMins = new double[get_MPISize()+1];
1834 int error;
1835 error = MPI_Gather ( &next, 1, MPI_DOUBLE, globalMins, 1, MPI_DOUBLE, 0, get_MPIComm() );
1836
1837 if( get_MPIRank()==0 ){
1838 next = globalMins[lowProc];
1839 for( i=1; i<get_MPISize(); i++ )
1840 if( next>globalMins[i] ){
1841 lowProc = i;
1842 next = globalMins[i];
1843 }
1844 }
1845 MPI_Bcast( &lowProc, 1, MPI_DOUBLE, 0, get_MPIComm() );
1846
1847 delete [] globalMins;
1848 ProcNo = lowProc;
1849 #else
1850 ProcNo = 0;
1851 #endif
1852 DataPointNo = lowj + lowi * numDPPSample;
1853 }
1854
1855 void
1856 Data::saveDX(std::string fileName) const
1857 {
1858 if (isEmpty())
1859 {
1860 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1861 }
1862 if (isLazy())
1863 {
1864 Data temp(*this); // to get around the fact that you can't resolve a const Data
1865 temp.resolve();
1866 temp.saveDX(fileName);
1867 return;
1868 }
1869 boost::python::dict args;
1870 args["data"]=boost::python::object(this);
1871 getDomain()->saveDX(fileName,args);
1872 return;
1873 }
1874
1875 void
1876 Data::saveVTK(std::string fileName) const
1877 {
1878 if (isEmpty())
1879 {
1880 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1881 }
1882 if (isLazy())
1883 {
1884 Data temp(*this); // to get around the fact that you can't resolve a const Data
1885 temp.resolve();
1886 temp.saveVTK(fileName);
1887 return;
1888 }
1889 boost::python::dict args;
1890 args["data"]=boost::python::object(this);
1891 getDomain()->saveVTK(fileName,args,"","");
1892 return;
1893 }
1894
1895 Data&
1896 Data::operator+=(const Data& right)
1897 {
1898 if (isProtected()) {
1899 throw DataException("Error - attempt to update protected Data object.");
1900 }
1901 MAKELAZYBINSELF(right,ADD) // for lazy + is equivalent to +=
1902 exclusiveWrite(); // Since Lazy data does not modify its leaves we only need to worry here
1903 binaryOp(right,plus<double>());
1904 return (*this);
1905 }
1906
1907 Data&
1908 Data::operator+=(const boost::python::object& right)
1909 {
1910 if (isProtected()) {
1911 throw DataException("Error - attempt to update protected Data object.");
1912 }
1913 Data tmp(right,getFunctionSpace(),false);
1914 (*this)+=tmp;
1915 return *this;
1916 }
1917
1918 // Hmmm, operator= makes a deep copy but the copy constructor does not?
1919 Data&
1920 Data::operator=(const Data& other)
1921 {
1922 #if defined ASSIGNMENT_MEANS_DEEPCOPY
1923 // This should not be used.
1924 // Just leaving this here until I have completed transition
1925 copy(other);
1926 #else
1927 m_protected=false; // since any changes should be caught by exclusiveWrite();
1928 // m_data=other.m_data;
1929 set_m_data(other.m_data);
1930 #endif
1931 return (*this);
1932 }
1933
1934 Data&
1935 Data::operator-=(const Data& right)
1936 {
1937 if (isProtected()) {
1938 throw DataException("Error - attempt to update protected Data object.");
1939 }
1940 MAKELAZYBINSELF(right,SUB)
1941 exclusiveWrite();
1942 binaryOp(right,minus<double>());
1943 return (*this);
1944 }
1945
1946 Data&
1947 Data::operator-=(const boost::python::object& right)
1948 {
1949 if (isProtected()) {
1950 throw DataException("Error - attempt to update protected Data object.");
1951 }
1952 Data tmp(right,getFunctionSpace(),false);
1953 (*this)-=tmp;
1954 return (*this);
1955 }
1956
1957 Data&
1958 Data::operator*=(const Data& right)
1959 {
1960 if (isProtected()) {
1961 throw DataException("Error - attempt to update protected Data object.");
1962 }
1963 MAKELAZYBINSELF(right,MUL)
1964 exclusiveWrite();
1965 binaryOp(right,multiplies<double>());
1966 return (*this);
1967 }
1968
1969 Data&
1970 Data::operator*=(const boost::python::object& right)
1971 {
1972 if (isProtected()) {
1973 throw DataException("Error - attempt to update protected Data object.");
1974 }
1975 Data tmp(right,getFunctionSpace(),false);
1976 (*this)*=tmp;
1977 return (*this);
1978 }
1979
1980 Data&
1981 Data::operator/=(const Data& right)
1982 {
1983 if (isProtected()) {
1984 throw DataException("Error - attempt to update protected Data object.");
1985 }
1986 MAKELAZYBINSELF(right,DIV)
1987 exclusiveWrite();
1988 binaryOp(right,divides<double>());
1989 return (*this);
1990 }
1991
1992 Data&
1993 Data::operator/=(const boost::python::object& right)
1994 {
1995 if (isProtected()) {
1996 throw DataException("Error - attempt to update protected Data object.");
1997 }
1998 Data tmp(right,getFunctionSpace(),false);
1999 (*this)/=tmp;
2000 return (*this);
2001 }
2002
2003 Data
2004 Data::rpowO(const boost::python::object& left) const
2005 {
2006 Data left_d(left,*this);
2007 return left_d.powD(*this);
2008 }
2009
2010 Data
2011 Data::powO(const boost::python::object& right) const
2012 {
2013 Data tmp(right,getFunctionSpace(),false);
2014 return powD(tmp);
2015 }
2016
2017 Data
2018 Data::powD(const Data& right) const
2019 {
2020 MAKELAZYBIN(right,POW)
2021 return C_TensorBinaryOperation<double (*)(double, double)>(*this, right, ::pow);
2022 }
2023
2024 //
2025 // NOTE: It is essential to specify the namespace this operator belongs to
2026 Data
2027 escript::operator+(const Data& left, const Data& right)
2028 {
2029 MAKELAZYBIN2(left,right,ADD)
2030 return C_TensorBinaryOperation(left, right, plus<double>());
2031 }
2032
2033 //
2034 // NOTE: It is essential to specify the namespace this operator belongs to
2035 Data
2036 escript::operator-(const Data& left, const Data& right)
2037 {
2038 MAKELAZYBIN2(left,right,SUB)
2039 return C_TensorBinaryOperation(left, right, minus<double>());
2040 }
2041
2042 //
2043 // NOTE: It is essential to specify the namespace this operator belongs to
2044 Data
2045 escript::operator*(const Data& left, const Data& right)
2046 {
2047 MAKELAZYBIN2(left,right,MUL)
2048 return C_TensorBinaryOperation(left, right, multiplies<double>());
2049 }
2050
2051 //
2052 // NOTE: It is essential to specify the namespace this operator belongs to
2053 Data
2054 escript::operator/(const Data& left, const Data& right)
2055 {
2056 MAKELAZYBIN2(left,right,DIV)
2057 return C_TensorBinaryOperation(left, right, divides<double>());
2058 }
2059
2060 //
2061 // NOTE: It is essential to specify the namespace this operator belongs to
2062 Data
2063 escript::operator+(const Data& left, const boost::python::object& right)
2064 {
2065 Data tmp(right,left.getFunctionSpace(),false);
2066 MAKELAZYBIN2(left,tmp,ADD)
2067 return left+tmp;
2068 }
2069
2070 //
2071 // NOTE: It is essential to specify the namespace this operator belongs to
2072 Data
2073 escript::operator-(const Data& left, const boost::python::object& right)
2074 {
2075 Data tmp(right,left.getFunctionSpace(),false);
2076 MAKELAZYBIN2(left,tmp,SUB)
2077 return left-tmp;
2078 }
2079
2080 //
2081 // NOTE: It is essential to specify the namespace this operator belongs to
2082 Data
2083 escript::operator*(const Data& left, const boost::python::object& right)
2084 {
2085 Data tmp(right,left.getFunctionSpace(),false);
2086 MAKELAZYBIN2(left,tmp,MUL)
2087 return left*tmp;
2088 }
2089
2090 //
2091 // NOTE: It is essential to specify the namespace this operator belongs to
2092 Data
2093 escript::operator/(const Data& left, const boost::python::object& right)
2094 {
2095 Data tmp(right,left.getFunctionSpace(),false);
2096 MAKELAZYBIN2(left,tmp,DIV)
2097 return left/tmp;
2098 }
2099
2100 //
2101 // NOTE: It is essential to specify the namespace this operator belongs to
2102 Data
2103 escript::operator+(const boost::python::object& left, const Data& right)
2104 {
2105 Data tmp(left,right.getFunctionSpace(),false);
2106 MAKELAZYBIN2(tmp,right,ADD)
2107 return tmp+right;
2108 }
2109
2110 //
2111 // NOTE: It is essential to specify the namespace this operator belongs to
2112 Data
2113 escript::operator-(const boost::python::object& left, const Data& right)
2114 {
2115 Data tmp(left,right.getFunctionSpace(),false);
2116 MAKELAZYBIN2(tmp,right,SUB)
2117 return tmp-right;
2118 }
2119
2120 //
2121 // NOTE: It is essential to specify the namespace this operator belongs to
2122 Data
2123 escript::operator*(const boost::python::object& left, const Data& right)
2124 {
2125 Data tmp(left,right.getFunctionSpace(),false);
2126 MAKELAZYBIN2(tmp,right,MUL)
2127 return tmp*right;
2128 }
2129
2130 //
2131 // NOTE: It is essential to specify the namespace this operator belongs to
2132 Data
2133 escript::operator/(const boost::python::object& left, const Data& right)
2134 {
2135 Data tmp(left,right.getFunctionSpace(),false);
2136 MAKELAZYBIN2(tmp,right,DIV)
2137 return tmp/right;
2138 }
2139
2140
2141 /* TODO */
2142 /* global reduction */
2143 Data
2144 Data::getItem(const boost::python::object& key) const
2145 {
2146
2147 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2148
2149 if (slice_region.size()!=getDataPointRank()) {
2150 throw DataException("Error - slice size does not match Data rank.");
2151 }
2152
2153 return getSlice(slice_region);
2154 }
2155
2156 /* TODO */
2157 /* global reduction */
2158 Data
2159 Data::getSlice(const DataTypes::RegionType& region) const
2160 {
2161 return Data(*this,region);
2162 }
2163
2164 /* TODO */
2165 /* global reduction */
2166 void
2167 Data::setItemO(const boost::python::object& key,
2168 const boost::python::object& value)
2169 {
2170 Data tempData(value,getFunctionSpace());
2171 setItemD(key,tempData);
2172 }
2173
2174 void
2175 Data::setItemD(const boost::python::object& key,
2176 const Data& value)
2177 {
2178 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2179 if (slice_region.size()!=getDataPointRank()) {
2180 throw DataException("Error - slice size does not match Data rank.");
2181 }
2182 exclusiveWrite();
2183 if (getFunctionSpace()!=value.getFunctionSpace()) {
2184 setSlice(Data(value,getFunctionSpace()),slice_region);
2185 } else {
2186 setSlice(value,slice_region);
2187 }
2188 }
2189
2190 void
2191 Data::setSlice(const Data& value,
2192 const DataTypes::RegionType& region)
2193 {
2194 if (isProtected()) {
2195 throw DataException("Error - attempt to update protected Data object.");
2196 }
2197 forceResolve();
2198 exclusiveWrite(); // In case someone finds a way to call this without going through setItemD
2199 Data tempValue(value);
2200 typeMatchLeft(tempValue);
2201 typeMatchRight(tempValue);
2202 getReady()->setSlice(tempValue.m_data.get(),region);
2203 }
2204
2205 void
2206 Data::typeMatchLeft(Data& right) const
2207 {
2208 if (right.isLazy() && !isLazy())
2209 {
2210 right.resolve();
2211 }
2212 if (isExpanded()){
2213 right.expand();
2214 } else if (isTagged()) {
2215 if (right.isConstant()) {
2216 right.tag();
2217 }
2218 }
2219 }
2220
2221 void
2222 Data::typeMatchRight(const Data& right)
2223 {
2224 if (isLazy() && !right.isLazy())
2225 {
2226 resolve();
2227 }
2228 if (isTagged()) {
2229 if (right.isExpanded()) {
2230 expand();
2231 }
2232 } else if (isConstant()) {
2233 if (right.isExpanded()) {
2234 expand();
2235 } else if (right.isTagged()) {
2236 tag();
2237 }
2238 }
2239 }
2240
2241 // The normal TaggedValue adds the tag if it is not already present
2242 // This form does not. It throws instead.
2243 // This is because the names are maintained by the domain and cannot be added
2244 // without knowing the tag number to map it to.
2245 void
2246 Data::setTaggedValueByName(std::string name,
2247 const boost::python::object& value)
2248 {
2249 if (getFunctionSpace().getDomain()->isValidTagName(name)) {
2250 forceResolve();
2251 exclusiveWrite();
2252 int tagKey=getFunctionSpace().getDomain()->getTag(name);
2253 setTaggedValue(tagKey,value);
2254 }
2255 else
2256 { // The
2257 throw DataException("Error - unknown tag in setTaggedValueByName.");
2258 }
2259 }
2260
2261 void
2262 Data::setTaggedValue(int tagKey,
2263 const boost::python::object& value)
2264 {
2265 if (isProtected()) {
2266 throw DataException("Error - attempt to update protected Data object.");
2267 }
2268 //
2269 // Ensure underlying data object is of type DataTagged
2270 forceResolve();
2271 exclusiveWrite();
2272 if (isConstant()) tag();
2273 WrappedArray w(value);
2274
2275 DataVector temp_data2;
2276 temp_data2.copyFromArray(w,1);
2277
2278 m_data->setTaggedValue(tagKey,w.getShape(), temp_data2);
2279 }
2280
2281
2282 void
2283 Data::setTaggedValueFromCPP(int tagKey,
2284 const DataTypes::ShapeType& pointshape,
2285 const DataTypes::ValueType& value,
2286 int dataOffset)
2287 {
2288 if (isProtected()) {
2289 throw DataException("Error - attempt to update protected Data object.");
2290 }
2291 //
2292 // Ensure underlying data object is of type DataTagged
2293 forceResolve();
2294 if (isConstant()) tag();
2295 exclusiveWrite();
2296 //
2297 // Call DataAbstract::setTaggedValue
2298 m_data->setTaggedValue(tagKey,pointshape, value, dataOffset);
2299 }
2300
2301 int
2302 Data::getTagNumber(int dpno)
2303 {
2304 if (isEmpty())
2305 {
2306 throw DataException("Error - operation not permitted on instances of DataEmpty.");
2307 }
2308 return getFunctionSpace().getTagFromDataPointNo(dpno);
2309 }
2310
2311
2312 ostream& escript::operator<<(ostream& o, const Data& data)
2313 {
2314 o << data.toString();
2315 return o;
2316 }
2317
2318 Data
2319 escript::C_GeneralTensorProduct(Data& arg_0,
2320 Data& arg_1,
2321 int axis_offset,
2322 int transpose)
2323 {
2324 // General tensor product: res(SL x SR) = arg_0(SL x SM) * arg_1(SM x SR)
2325 // SM is the product of the last axis_offset entries in arg_0.getShape().
2326
2327 // deal with any lazy data
2328 // if (arg_0.isLazy()) {arg_0.resolve();}
2329 // if (arg_1.isLazy()) {arg_1.resolve();}
2330 if (arg_0.isLazy() || arg_1.isLazy() || (AUTOLAZYON && (arg_0.isExpanded() || arg_1.isExpanded())))
2331 {
2332 DataLazy* c=new DataLazy(arg_0.borrowDataPtr(), arg_1.borrowDataPtr(), PROD, axis_offset,transpose);
2333 return Data(c);
2334 }
2335
2336 // Interpolate if necessary and find an appropriate function space
2337 Data arg_0_Z, arg_1_Z;
2338 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2339 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2340 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2341 arg_1_Z = Data(arg_1);
2342 }
2343 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2344 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2345 arg_0_Z =Data(arg_0);
2346 }
2347 else {
2348 throw DataException("Error - C_GeneralTensorProduct: arguments have incompatible function spaces.");
2349 }
2350 } else {
2351 arg_0_Z = Data(arg_0);
2352 arg_1_Z = Data(arg_1);
2353 }
2354 // Get rank and shape of inputs
2355 int rank0 = arg_0_Z.getDataPointRank();
2356 int rank1 = arg_1_Z.getDataPointRank();
2357 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
2358 const DataTypes::ShapeType& shape1 = arg_1_Z.getDataPointShape();
2359
2360 // Prepare for the loops of the product and verify compatibility of shapes
2361 int start0=0, start1=0;
2362 if (transpose == 0) {}
2363 else if (transpose == 1) { start0 = axis_offset; }
2364 else if (transpose == 2) { start1 = rank1-axis_offset; }
2365 else { throw DataException("C_GeneralTensorProduct: Error - transpose should be 0, 1 or 2"); }
2366
2367
2368 // Adjust the shapes for transpose
2369 DataTypes::ShapeType tmpShape0(rank0); // pre-sizing the vectors rather
2370 DataTypes::ShapeType tmpShape1(rank1); // than using push_back
2371 for (int i=0; i<rank0; i++) { tmpShape0[i]=shape0[(i+start0)%rank0]; }
2372 for (int i=0; i<rank1; i++) { tmpShape1[i]=shape1[(i+start1)%rank1]; }
2373
2374 #if 0
2375 // For debugging: show shape after transpose
2376 char tmp[100];
2377 std::string shapeStr;
2378 shapeStr = "(";
2379 for (int i=0; i<rank0; i++) { sprintf(tmp, "%d,", tmpShape0[i]); shapeStr += tmp; }
2380 shapeStr += ")";
2381 cout << "C_GeneralTensorProduct: Shape of arg0 is " << shapeStr << endl;
2382 shapeStr = "(";
2383 for (int i=0; i<rank1; i++) { sprintf(tmp, "%d,", tmpShape1[i]); shapeStr += tmp; }
2384 shapeStr += ")";
2385 cout << "C_GeneralTensorProduct: Shape of arg1 is " << shapeStr << endl;
2386 #endif
2387
2388 // Prepare for the loops of the product
2389 int SL=1, SM=1, SR=1;
2390 for (int i=0; i<rank0-axis_offset; i++) {
2391 SL *= tmpShape0[i];
2392 }
2393 for (int i=rank0-axis_offset; i<rank0; i++) {
2394 if (tmpShape0[i] != tmpShape1[i-(rank0-axis_offset)]) {
2395 throw DataException("C_GeneralTensorProduct: Error - incompatible shapes");
2396 }
2397 SM *= tmpShape0[i];
2398 }
2399 for (int i=axis_offset; i<rank1; i++) {
2400 SR *= tmpShape1[i];
2401 }
2402
2403 // Define the shape of the output (rank of shape is the sum of the loop ranges below)
2404 DataTypes::ShapeType shape2(rank0+rank1-2*axis_offset);
2405 { // block to limit the scope of out_index
2406 int out_index=0;
2407 for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z
2408 for (int i=axis_offset; i<rank1; i++, ++out_index) { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z
2409 }
2410
2411 if (shape2.size()>ESCRIPT_MAX_DATA_RANK)
2412 {
2413 ostringstream os;
2414 os << "C_GeneralTensorProduct: Error - Attempt to create a rank " << shape2.size() << " object. The maximum rank is " << ESCRIPT_MAX_DATA_RANK << ".";
2415 throw DataException(os.str());
2416 }
2417
2418 // Declare output Data object
2419 Data res;
2420
2421 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2422 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataConstant output
2423 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2424 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2425 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2426 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2427 }
2428 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2429
2430 // Prepare the DataConstant input
2431 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2432 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2433
2434 // Borrow DataTagged input from Data object
2435 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2436 if (tmp_1==0) { throw DataException("GTP_1 Programming error - casting to DataTagged."); }
2437
2438 // Prepare a DataTagged output 2
2439 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataTagged output
2440 res.tag();
2441 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2442 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2443
2444 // Prepare offset into DataConstant
2445 int offset_0 = tmp_0->getPointOffset(0,0);
2446 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2447
2448 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2449 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2450
2451 // Compute an MVP for the default
2452 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2453 // Compute an MVP for each tag
2454 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2455 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2456 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2457 tmp_2->addTag(i->first);
2458
2459 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2460 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2461
2462 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2463 }
2464
2465 }
2466 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2467
2468 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2469 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2470 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2471 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2472 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2473 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2474 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2475 int sampleNo_1,dataPointNo_1;
2476 int numSamples_1 = arg_1_Z.getNumSamples();
2477 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2478 int offset_0 = tmp_0->getPointOffset(0,0);
2479 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2480 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2481 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2482 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2483 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2484 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2485 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2486 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2487 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2488 }
2489 }
2490
2491 }
2492 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2493
2494 // Borrow DataTagged input from Data object
2495 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2496 if (tmp_0==0) { throw DataException("GTP_0 Programming error - casting to DataTagged."); }
2497
2498 // Prepare the DataConstant input
2499 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2500 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2501
2502 // Prepare a DataTagged output 2
2503 res = Data(0.0, shape2, arg_0_Z.getFunctionSpace()); // DataTagged output
2504 res.tag();
2505 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2506 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2507
2508 // Prepare offset into DataConstant
2509 int offset_1 = tmp_1->getPointOffset(0,0);
2510 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2511 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2512 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2513
2514 // Compute an MVP for the default
2515 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2516 // Compute an MVP for each tag
2517 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2518 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2519 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2520
2521 tmp_2->addTag(i->first);
2522 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2523 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2524 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2525 }
2526
2527 }
2528 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2529
2530 // Borrow DataTagged input from Data object
2531 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2532 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2533
2534 // Borrow DataTagged input from Data object
2535 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2536 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2537
2538 // Prepare a DataTagged output 2
2539 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace());
2540 res.tag(); // DataTagged output
2541 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2542 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2543
2544 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2545 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2546 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2547
2548 // Compute an MVP for the default
2549 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2550 // Merge the tags
2551 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2552 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2553 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2554 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2555 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2556 }
2557 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2558 tmp_2->addTag(i->first);
2559 }
2560 // Compute an MVP for each tag
2561 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2562 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2563 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2564 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2565 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2566
2567 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2568 }
2569
2570 }
2571 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2572
2573 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2574 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2575 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2576 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2577 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2578 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2579 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2580 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2581 int sampleNo_0,dataPointNo_0;
2582 int numSamples_0 = arg_0_Z.getNumSamples();
2583 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2584 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2585 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2586 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2587 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2588 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2589 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2590 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2591 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2592 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2593 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2594 }
2595 }
2596
2597 }
2598 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2599
2600 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2601 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2602 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2603 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2604 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2605 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2606 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2607 int sampleNo_0,dataPointNo_0;
2608 int numSamples_0 = arg_0_Z.getNumSamples();
2609 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2610 int offset_1 = tmp_1->getPointOffset(0,0);
2611 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2612 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2613 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2614 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2615 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2616 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2617 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2618 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2619 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2620 }
2621 }
2622
2623
2624 }
2625 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2626
2627 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2628 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2629 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2630 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2631 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2632 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2633 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2634 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2635 int sampleNo_0,dataPointNo_0;
2636 int numSamples_0 = arg_0_Z.getNumSamples();
2637 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2638 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2639 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2640 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2641 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2642 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2643 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2644 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2645 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2646 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2647 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2648 }
2649 }
2650
2651 }
2652 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2653
2654 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2655 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2656 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2657 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2658 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2659 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2660 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2661 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2662 int sampleNo_0,dataPointNo_0;
2663 int numSamples_0 = arg_0_Z.getNumSamples();
2664 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2665 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2666 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2667 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2668 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2669 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2670 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2671 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2672 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2673 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2674 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2675 }
2676 }
2677
2678 }
2679 else {
2680 throw DataException("Error - C_GeneralTensorProduct: unknown combination of inputs");
2681 }
2682
2683 return res;
2684 }
2685
2686 DataAbstract*
2687 Data::borrowData() const
2688 {
2689 return m_data.get();
2690 }
2691
2692 // Not all that happy about returning a non-const from a const
2693 DataAbstract_ptr
2694 Data::borrowDataPtr() const
2695 {
2696 return m_data;
2697 }
2698
2699 // Not all that happy about returning a non-const from a const
2700 DataReady_ptr
2701 Data::borrowReadyPtr() const
2702 {
2703 DataReady_ptr dr=dynamic_pointer_cast<DataReady>(m_data);
2704 EsysAssert((dr!=0), "Error - casting to DataReady.");
2705 return dr;
2706 }
2707
2708 std::string
2709 Data::toString() const
2710 {
2711 if (!m_data->isEmpty() &&
2712 !m_data->isLazy() &&
2713 getLength()>escriptParams.getInt("TOO_MANY_LINES"))
2714 {
2715 stringstream temp;
2716 temp << "Summary: inf="<< inf_const() << " sup=" << sup_const() << " data points=" << getNumDataPoints();
2717 return temp.str();
2718 }
2719 return m_data->toString();
2720 }
2721
2722
2723 // This method is not thread-safe
2724 DataTypes::ValueType::reference
2725 Data::getDataAtOffsetRW(DataTypes::ValueType::size_type i)
2726 {
2727 checkExclusiveWrite();
2728 return getReady()->getDataAtOffsetRW(i);
2729 }
2730
2731 // This method is not thread-safe
2732 DataTypes::ValueType::const_reference
2733 Data::getDataAtOffsetRO(DataTypes::ValueType::size_type i)
2734 {
2735 forceResolve();
2736 return getReady()->getDataAtOffsetRO(i);
2737 }
2738
2739
2740 // DataTypes::ValueType::const_reference
2741 // Data::getDataAtOffsetRO(DataTypes::ValueType::size_type i) const
2742 // {
2743 // if (isLazy())
2744 // {
2745 // throw DataException("Programmer error - getDataAtOffsetRO() not permitted on Lazy Data (object is const which prevents resolving).");
2746 // }
2747 // return getReady()->getDataAtOffsetRO(i);
2748 // }
2749
2750
2751 DataTypes::ValueType::const_reference
2752 Data::getDataPointRO(int sampleNo, int dataPointNo)
2753 {
2754 forceResolve();
2755 if (!isReady())
2756 {
2757 throw DataException("Programmer error -getDataPointRO() not permitted on Lazy Data.");
2758 }
2759 else
2760 {
2761 const DataReady* dr=getReady();
2762 return dr->getDataAtOffsetRO(dr->getPointOffset(sampleNo, dataPointNo));
2763 }
2764 }
2765
2766
2767
2768
2769 DataTypes::ValueType::reference
2770 Data::getDataPointRW(int sampleNo, int dataPointNo)
2771 {
2772 checkExclusiveWrite();
2773 DataReady* dr=getReady();
2774 return dr->getDataAtOffsetRW(dr->getPointOffset(sampleNo, dataPointNo));
2775 }
2776
2777 BufferGroup*
2778 Data::allocSampleBuffer() const
2779 {
2780 if (isLazy())
2781 {
2782 #ifdef _OPENMP
2783 int tnum=omp_get_max_threads();
2784 #else
2785 int tnum=1;
2786 #endif
2787 return new BufferGroup(getSampleBufferSize(),tnum);
2788 }
2789 else
2790 {
2791 return NULL;
2792 }
2793 }
2794
2795 void
2796 Data::freeSampleBuffer(BufferGroup* bufferg)
2797 {
2798 if (bufferg!=0)
2799 {
2800 delete bufferg;
2801 }
2802 }
2803
2804
2805 /* Member functions specific to the MPI implementation */
2806
2807 void
2808 Data::print()
2809 {
2810 int i,j;
2811
2812 printf( "Data is %dX%d\n", getNumSamples(), getNumDataPointsPerSample() );
2813 for( i=0; i<getNumSamples(); i++ )
2814 {
2815 printf( "[%6d]", i );
2816 for( j=0; j<getNumDataPointsPerSample(); j++ )
2817 printf( "\t%10.7g", (getSampleDataRW(i))[j] ); // doesn't really need RW access
2818 printf( "\n" );
2819 }
2820 }
2821 void
2822 Data::dump(const std::string fileName) const
2823 {
2824 try
2825 {
2826 if (isLazy())
2827 {
2828 Data temp(*this); // this is to get a non-const object which we can resolve
2829 temp.resolve();
2830 temp.dump(fileName);
2831 }
2832 else
2833 {
2834 return m_data->dump(fileName);
2835 }
2836 }
2837 catch (std::exception& e)
2838 {
2839 cout << e.what() << endl;
2840 }
2841 }
2842
2843 int
2844 Data::get_MPISize() const
2845 {
2846 int size;
2847 #ifdef PASO_MPI
2848 int error;
2849 error = MPI_Comm_size( get_MPIComm(), &size );
2850 #else
2851 size = 1;
2852 #endif
2853 return size;
2854 }
2855
2856 int
2857 Data::get_MPIRank() const
2858 {
2859 int rank;
2860 #ifdef PASO_MPI
2861 int error;
2862 error = MPI_Comm_rank( get_MPIComm(), &rank );
2863 #else
2864 rank = 0;
2865 #endif
2866 return rank;
2867 }
2868
2869 MPI_Comm
2870 Data::get_MPIComm() const
2871 {
2872 #ifdef PASO_MPI
2873 return MPI_COMM_WORLD;
2874 #else
2875 return -1;
2876 #endif
2877 }
2878
2879

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26