/[escript]/trunk/escript/src/Data.cpp
ViewVC logotype

Contents of /trunk/escript/src/Data.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2458 - (show annotations)
Wed Jun 3 06:18:21 2009 UTC (9 years, 10 months ago) by jfenwick
File size: 80637 byte(s)
Various numarray erasures

1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2008 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 #include "Data.h"
16
17 #include "DataExpanded.h"
18 #include "DataConstant.h"
19 #include "DataTagged.h"
20 #include "DataEmpty.h"
21 #include "DataLazy.h"
22 #include "FunctionSpaceFactory.h"
23 #include "AbstractContinuousDomain.h"
24 #include "UnaryFuncs.h"
25 #include "FunctionSpaceException.h"
26 #include "EscriptParams.h"
27
28 extern "C" {
29 #include "esysUtils/blocktimer.h"
30 }
31
32 #include <fstream>
33 #include <algorithm>
34 #include <vector>
35 #include <functional>
36 #include <sstream> // so we can throw messages about ranks
37
38 #include <boost/python/dict.hpp>
39 #include <boost/python/extract.hpp>
40 #include <boost/python/long.hpp>
41 #include "WrappedArray.h"
42
43 using namespace std;
44 using namespace boost::python;
45 using namespace boost;
46 using namespace escript;
47
48 // ensure the current object is not a DataLazy
49 // The idea was that we could add an optional warning whenever a resolve is forced
50 // #define forceResolve() if (isLazy()) {#resolve();}
51
52 #define AUTOLAZYON escriptParams.getAUTOLAZY()
53 #define MAKELAZYOP(X) if (isLazy() || (AUTOLAZYON && m_data->isExpanded())) \
54 {\
55 DataLazy* c=new DataLazy(borrowDataPtr(),X);\
56 return Data(c);\
57 }
58 #define MAKELAZYOPOFF(X,Y) if (isLazy() || (AUTOLAZYON && m_data->isExpanded())) \
59 {\
60 DataLazy* c=new DataLazy(borrowDataPtr(),X,Y);\
61 return Data(c);\
62 }
63
64 #define MAKELAZYBINSELF(R,X) if (isLazy() || R.isLazy() || (AUTOLAZYON && (isExpanded() || R.isExpanded()))) \
65 {\
66 DataLazy* c=new DataLazy(m_data,R.borrowDataPtr(),X);\
67 /* m_data=c->getPtr();*/ set_m_data(c->getPtr());\
68 return (*this);\
69 }
70
71 // like the above but returns a new data rather than *this
72 #define MAKELAZYBIN(R,X) if (isLazy() || R.isLazy() || (AUTOLAZYON && (isExpanded() || R.isExpanded()))) \
73 {\
74 DataLazy* c=new DataLazy(m_data,R.borrowDataPtr(),X);\
75 return Data(c);\
76 }
77
78 #define MAKELAZYBIN2(L,R,X) if (L.isLazy() || R.isLazy() || (AUTOLAZYON && (L.isExpanded() || R.isExpanded()))) \
79 {\
80 DataLazy* c=new DataLazy(L.borrowDataPtr(),R.borrowDataPtr(),X);\
81 return Data(c);\
82 }
83
84 // Do not use the following unless you want to make copies on assignment rather than
85 // share data. CopyOnWrite should make this unnescessary.
86 // #define ASSIGNMENT_MEANS_DEEPCOPY
87
88 namespace
89 {
90
91 // This should be safer once the DataC RO changes have been brought in
92 template <class ARR>
93 boost::python::tuple
94 pointToTuple( const DataTypes::ShapeType& shape,ARR v)
95 {
96 using namespace boost::python;
97 using boost::python::list;
98 int rank=shape.size();
99 if (rank==0)
100 {
101 return make_tuple(v[0]);
102 }
103 else if (rank==1)
104 {
105 list l;
106 for (size_t i=0;i<shape[0];++i)
107 {
108 l.append(v[i]);
109 }
110 return tuple(l);
111 }
112 else if (rank==2)
113 {
114 list lj;
115 for (size_t j=0;j<shape[0];++j)
116 {
117 list li;
118 for (size_t i=0;i<shape[1];++i)
119 {
120 li.append(v[DataTypes::getRelIndex(shape,j,i)]);
121 }
122 lj.append(tuple(li));
123 }
124 return tuple(lj);
125 }
126 else if (rank==3)
127 {
128 list lk;
129 for (size_t k=0;k<shape[0];++k)
130 {
131 list lj;
132 for (size_t j=0;j<shape[1];++j)
133 {
134 list li;
135 for (size_t i=0;i<shape[2];++i)
136 {
137 li.append(v[DataTypes::getRelIndex(shape,k,j,i)]);
138 }
139 lj.append(tuple(li));
140 }
141 lk.append(tuple(lj));
142 }
143 return tuple(lk);
144 }
145 else if (rank==4)
146 {
147 list ll;
148 for (size_t l=0;l<shape[0];++l)
149 {
150 list lk;
151 for (size_t k=0;k<shape[1];++k)
152 {
153 list lj;
154 for (size_t j=0;j<shape[2];++j)
155 {
156 list li;
157 for (size_t i=0;i<shape[3];++i)
158 {
159 li.append(v[DataTypes::getRelIndex(shape,l,k,j,i)]);
160 }
161 lj.append(tuple(li));
162 }
163 lk.append(tuple(lj));
164 }
165 ll.append(tuple(lk));
166 }
167 return tuple(ll);
168 }
169 else
170 throw DataException("Unknown rank in pointToTuple.");
171 }
172
173 } // anonymous namespace
174
175 Data::Data()
176 : m_shared(false), m_lazy(false)
177 {
178 //
179 // Default data is type DataEmpty
180 DataAbstract* temp=new DataEmpty();
181 // m_data=temp->getPtr();
182 set_m_data(temp->getPtr());
183 m_protected=false;
184 }
185
186 Data::Data(double value,
187 const tuple& shape,
188 const FunctionSpace& what,
189 bool expanded)
190 : m_shared(false), m_lazy(false)
191 {
192 DataTypes::ShapeType dataPointShape;
193 for (int i = 0; i < shape.attr("__len__")(); ++i) {
194 dataPointShape.push_back(extract<const int>(shape[i]));
195 }
196
197 int len = DataTypes::noValues(dataPointShape);
198 DataVector temp_data(len,value,len);
199 initialise(temp_data, dataPointShape, what, expanded);
200 m_protected=false;
201 }
202
203 Data::Data(double value,
204 const DataTypes::ShapeType& dataPointShape,
205 const FunctionSpace& what,
206 bool expanded)
207 : m_shared(false), m_lazy(false)
208 {
209 int len = DataTypes::noValues(dataPointShape);
210
211 DataVector temp_data(len,value,len);
212 // DataArrayView temp_dataView(temp_data, dataPointShape);
213
214 // initialise(temp_dataView, what, expanded);
215 initialise(temp_data, dataPointShape, what, expanded);
216
217 m_protected=false;
218 }
219
220 Data::Data(const Data& inData)
221 : m_shared(false), m_lazy(false)
222 {
223 // m_data=inData.m_data;
224 set_m_data(inData.m_data);
225 m_protected=inData.isProtected();
226 }
227
228
229 Data::Data(const Data& inData,
230 const DataTypes::RegionType& region)
231 : m_shared(false), m_lazy(false)
232 {
233 DataAbstract_ptr dat=inData.m_data;
234 if (inData.isLazy())
235 {
236 dat=inData.m_data->resolve();
237 }
238 else
239 {
240 dat=inData.m_data;
241 }
242 //
243 // Create Data which is a slice of another Data
244 DataAbstract* tmp = dat->getSlice(region);
245 // m_data=DataAbstract_ptr(tmp);
246 set_m_data(DataAbstract_ptr(tmp));
247 m_protected=false;
248
249 }
250
251 Data::Data(const Data& inData,
252 const FunctionSpace& functionspace)
253 : m_shared(false), m_lazy(false)
254 {
255 if (inData.isEmpty())
256 {
257 throw DataException("Error - will not interpolate for instances of DataEmpty.");
258 }
259 if (inData.getFunctionSpace()==functionspace) {
260 // m_data=inData.m_data;
261 set_m_data(inData.m_data);
262 }
263 else
264 {
265
266 if (inData.isConstant()) { // for a constant function, we just need to use the new function space
267 if (!inData.probeInterpolation(functionspace))
268 { // Even though this is constant, we still need to check whether interpolation is allowed
269 throw FunctionSpaceException("Cannot interpolate across to the domain of the specified FunctionSpace. (DataConstant)");
270 }
271 // if the data is not lazy, this will just be a cast to DataReady
272 DataReady_ptr dr=inData.m_data->resolve();
273 DataConstant* dc=new DataConstant(functionspace,inData.m_data->getShape(),dr->getVectorRO());
274 // m_data=DataAbstract_ptr(dc);
275 set_m_data(DataAbstract_ptr(dc));
276 } else {
277 Data tmp(0,inData.getDataPointShape(),functionspace,true);
278 // Note: Must use a reference or pointer to a derived object
279 // in order to get polymorphic behaviour. Shouldn't really
280 // be able to create an instance of AbstractDomain but that was done
281 // as a boost:python work around which may no longer be required.
282 /*const AbstractDomain& inDataDomain=inData.getDomain();*/
283 const_Domain_ptr inDataDomain=inData.getDomain();
284 if (inDataDomain==functionspace.getDomain()) {
285 inDataDomain->interpolateOnDomain(tmp,inData);
286 } else {
287 inDataDomain->interpolateACross(tmp,inData);
288 }
289 // m_data=tmp.m_data;
290 set_m_data(tmp.m_data);
291 }
292 }
293 m_protected=false;
294 }
295
296 Data::Data(DataAbstract* underlyingdata)
297 : m_shared(false), m_lazy(false)
298 {
299 set_m_data(underlyingdata->getPtr());
300 m_protected=false;
301 }
302
303 Data::Data(DataAbstract_ptr underlyingdata)
304 : m_shared(false), m_lazy(false)
305 {
306 set_m_data(underlyingdata);
307 m_protected=false;
308 }
309
310 Data::Data(const DataTypes::ValueType& value,
311 const DataTypes::ShapeType& shape,
312 const FunctionSpace& what,
313 bool expanded)
314 : m_shared(false), m_lazy(false)
315 {
316 initialise(value,shape,what,expanded);
317 m_protected=false;
318 }
319
320
321 Data::Data(const object& value,
322 const FunctionSpace& what,
323 bool expanded)
324 : m_shared(false), m_lazy(false)
325 {
326 WrappedArray w(value);
327 initialise(w,what,expanded);
328 m_protected=false;
329 }
330
331
332 Data::Data(const object& value,
333 const Data& other)
334 : m_shared(false), m_lazy(false)
335 {
336 WrappedArray w(value);
337
338 // extract the shape of the array
339 const DataTypes::ShapeType& tempShape=w.getShape();
340 if (w.getRank()==0) {
341
342
343 // get the space for the data vector
344 int len1 = DataTypes::noValues(tempShape);
345 DataVector temp_data(len1, 0.0, len1);
346 temp_data.copyFromArray(w,1);
347
348 int len = DataTypes::noValues(other.getDataPointShape());
349
350 DataVector temp2_data(len, temp_data[0], len);
351 DataConstant* t=new DataConstant(other.getFunctionSpace(),other.getDataPointShape(),temp2_data);
352 // m_data=DataAbstract_ptr(t);
353 set_m_data(DataAbstract_ptr(t));
354
355 } else {
356 //
357 // Create a DataConstant with the same sample shape as other
358 DataConstant* t=new DataConstant(w,other.getFunctionSpace());
359 // m_data=DataAbstract_ptr(t);
360 set_m_data(DataAbstract_ptr(t));
361 }
362 m_protected=false;
363 }
364
365 Data::~Data()
366 {
367 set_m_data(DataAbstract_ptr());
368 }
369
370
371 // only call in thread safe contexts.
372 // This method should be atomic
373 void Data::set_m_data(DataAbstract_ptr p)
374 {
375 if (m_data.get()!=0) // release old ownership
376 {
377 m_data->removeOwner(this);
378 }
379 if (p.get()!=0)
380 {
381 m_data=p;
382 m_data->addOwner(this);
383 m_shared=m_data->isShared();
384 m_lazy=m_data->isLazy();
385 }
386 }
387
388 void Data::initialise(const WrappedArray& value,
389 const FunctionSpace& what,
390 bool expanded)
391 {
392 //
393 // Construct a Data object of the appropriate type.
394 // Construct the object first as there seems to be a bug which causes
395 // undefined behaviour if an exception is thrown during construction
396 // within the shared_ptr constructor.
397 if (expanded) {
398 DataAbstract* temp=new DataExpanded(value, what);
399 // m_data=temp->getPtr();
400 set_m_data(temp->getPtr());
401 } else {
402 DataAbstract* temp=new DataConstant(value, what);
403 // m_data=temp->getPtr();
404 set_m_data(temp->getPtr());
405 }
406 }
407
408
409 void
410 Data::initialise(const DataTypes::ValueType& value,
411 const DataTypes::ShapeType& shape,
412 const FunctionSpace& what,
413 bool expanded)
414 {
415 //
416 // Construct a Data object of the appropriate type.
417 // Construct the object first as there seems to be a bug which causes
418 // undefined behaviour if an exception is thrown during construction
419 // within the shared_ptr constructor.
420 if (expanded) {
421 DataAbstract* temp=new DataExpanded(what, shape, value);
422 // m_data=temp->getPtr();
423 set_m_data(temp->getPtr());
424 } else {
425 DataAbstract* temp=new DataConstant(what, shape, value);
426 // m_data=temp->getPtr();
427 set_m_data(temp->getPtr());
428 }
429 }
430
431
432 escriptDataC
433 Data::getDataC()
434 {
435 escriptDataC temp;
436 temp.m_dataPtr=(void*)this;
437 return temp;
438 }
439
440 escriptDataC
441 Data::getDataC() const
442 {
443 escriptDataC temp;
444 temp.m_dataPtr=(void*)this;
445 return temp;
446 }
447
448 size_t
449 Data::getSampleBufferSize() const
450 {
451 return m_data->getSampleBufferSize();
452 }
453
454
455 const boost::python::tuple
456 Data::getShapeTuple() const
457 {
458 const DataTypes::ShapeType& shape=getDataPointShape();
459 switch(getDataPointRank()) {
460 case 0:
461 return make_tuple();
462 case 1:
463 return make_tuple(long_(shape[0]));
464 case 2:
465 return make_tuple(long_(shape[0]),long_(shape[1]));
466 case 3:
467 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]));
468 case 4:
469 return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]),long_(shape[3]));
470 default:
471 throw DataException("Error - illegal Data rank.");
472 }
473 }
474
475
476 // The different name is needed because boost has trouble with overloaded functions.
477 // It can't work out what type the function is based soley on its name.
478 // There are ways to fix this involving creating function pointer variables for each form
479 // but there doesn't seem to be a need given that the methods have the same name from the python point of view
480 Data
481 Data::copySelf()
482 {
483 DataAbstract* temp=m_data->deepCopy();
484 return Data(temp);
485 }
486
487 void
488 Data::copy(const Data& other)
489 {
490 DataAbstract* temp=other.m_data->deepCopy();
491 DataAbstract_ptr p=temp->getPtr();
492 // m_data=p;
493 set_m_data(p);
494 }
495
496
497 Data
498 Data::delay()
499 {
500 DataLazy* dl=new DataLazy(m_data);
501 return Data(dl);
502 }
503
504 void
505 Data::delaySelf()
506 {
507 if (!isLazy())
508 {
509 // m_data=(new DataLazy(m_data))->getPtr();
510 set_m_data((new DataLazy(m_data))->getPtr());
511 }
512 }
513
514
515 // For lazy data, it would seem that DataTagged will need to be treated differently since even after setting all tags
516 // to zero, all the tags from all the DataTags would be in the result.
517 // However since they all have the same value (0) whether they are there or not should not matter.
518 // So I have decided that for all types this method will create a constant 0.
519 // It can be promoted up as required.
520 // A possible efficiency concern might be expanded->constant->expanded which has an extra memory management
521 // but we can deal with that if it arises.
522 //
523 void
524 Data::setToZero()
525 {
526 if (isEmpty())
527 {
528 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
529 }
530 if (isLazy())
531 {
532 DataTypes::ValueType v(getNoValues(),0);
533 DataConstant* dc=new DataConstant(getFunctionSpace(),getDataPointShape(),v);
534 DataLazy* dl=new DataLazy(dc->getPtr());
535 set_m_data(dl->getPtr());
536 }
537 else
538 {
539 exclusiveWrite();
540 m_data->setToZero();
541 }
542 }
543
544
545 void
546 Data::copyWithMask(const Data& other,
547 const Data& mask)
548 {
549 // 1. Interpolate if required so all Datas use the same FS as this
550 // 2. Tag or Expand so that all Data's are the same type
551 // 3. Iterate over the data vectors copying values where mask is >0
552 if (other.isEmpty() || mask.isEmpty())
553 {
554 throw DataException("Error - copyWithMask not permitted using instances of DataEmpty.");
555 }
556 Data other2(other);
557 Data mask2(mask);
558 other2.resolve();
559 mask2.resolve();
560 this->resolve();
561 FunctionSpace myFS=getFunctionSpace();
562 FunctionSpace oFS=other2.getFunctionSpace();
563 FunctionSpace mFS=mask2.getFunctionSpace();
564 if (oFS!=myFS)
565 {
566 if (other2.probeInterpolation(myFS))
567 {
568 other2=other2.interpolate(myFS);
569 }
570 else
571 {
572 throw DataException("Error - copyWithMask: other FunctionSpace is not compatible with this one.");
573 }
574 }
575 if (mFS!=myFS)
576 {
577 if (mask2.probeInterpolation(myFS))
578 {
579 mask2=mask2.interpolate(myFS);
580 }
581 else
582 {
583 throw DataException("Error - copyWithMask: mask FunctionSpace is not compatible with this one.");
584 }
585 }
586 // Ensure that all args have the same type
587 if (this->isExpanded() || mask2.isExpanded() || other2.isExpanded())
588 {
589 this->expand();
590 other2.expand();
591 mask2.expand();
592 }
593 else if (this->isTagged() || mask2.isTagged() || other2.isTagged())
594 {
595 this->tag();
596 other2.tag();
597 mask2.tag();
598 }
599 else if (this->isConstant() && mask2.isConstant() && other2.isConstant())
600 {
601 }
602 else
603 {
604 throw DataException("Error - Unknown DataAbstract passed to copyWithMask.");
605 }
606 unsigned int selfrank=getDataPointRank();
607 unsigned int otherrank=other2.getDataPointRank();
608 unsigned int maskrank=mask2.getDataPointRank();
609 if ((selfrank==0) && (otherrank>0 || maskrank>0))
610 {
611 // to get here we must be copying from a large object into a scalar
612 // I am not allowing this.
613 // If you are calling copyWithMask then you are considering keeping some existing values
614 // and so I'm going to assume that you don't want your data objects getting a new shape.
615 throw DataException("Attempt to copyWithMask into a scalar from an object or mask with rank>0.");
616 }
617 exclusiveWrite();
618 // Now we iterate over the elements
619 DataVector& self=getReady()->getVectorRW();;
620 const DataVector& ovec=other2.getReadyPtr()->getVectorRO();
621 const DataVector& mvec=mask2.getReadyPtr()->getVectorRO();
622
623 if ((selfrank>0) && (otherrank==0) &&(maskrank==0))
624 {
625 // Not allowing this combination.
626 // it is not clear what the rank of the target should be.
627 // Should it be filled with the scalar (rank stays the same);
628 // or should the target object be reshaped to be a scalar as well.
629 throw DataException("Attempt to copyWithMask from scalar mask and data into non-scalar target.");
630 }
631 if ((selfrank>0) && (otherrank>0) &&(maskrank==0))
632 {
633 if (mvec[0]>0) // copy whole object if scalar is >0
634 {
635 copy(other);
636 }
637 return;
638 }
639 if (isTagged()) // so all objects involved will also be tagged
640 {
641 // note the !
642 if (!((getDataPointShape()==mask2.getDataPointShape()) &&
643 ((other2.getDataPointShape()==mask2.getDataPointShape()) || (otherrank==0))))
644 {
645 throw DataException("copyWithMask, shape mismatch.");
646 }
647
648 // We need to consider the possibility that tags are missing or in the wrong order
649 // My guiding assumption here is: All tagged Datas are assumed to have the default value for
650 // all tags which are not explicitly defined
651
652 const DataTagged* mptr=dynamic_cast<const DataTagged*>(mask2.m_data.get());
653 const DataTagged* optr=dynamic_cast<const DataTagged*>(other2.m_data.get());
654 DataTagged* tptr=dynamic_cast<DataTagged*>(m_data.get());
655
656 // first, add any tags missing from other or mask
657 const DataTagged::DataMapType& olookup=optr->getTagLookup();
658 const DataTagged::DataMapType& mlookup=mptr->getTagLookup();
659 const DataTagged::DataMapType& tlookup=tptr->getTagLookup();
660 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
661 for (i=olookup.begin();i!=olookup.end();i++)
662 {
663 tptr->addTag(i->first);
664 }
665 for (i=mlookup.begin();i!=mlookup.end();i++) {
666 tptr->addTag(i->first);
667 }
668 // now we know that *this has all the required tags but they aren't guaranteed to be in
669 // the same order
670
671 // There are two possibilities: 1. all objects have the same rank. 2. other is a scalar
672 if ((selfrank==otherrank) && (otherrank==maskrank))
673 {
674 for (i=tlookup.begin();i!=tlookup.end();i++)
675 {
676 // get the target offset
677 DataTypes::ValueType::size_type toff=tptr->getOffsetForTag(i->first);
678 DataTypes::ValueType::size_type moff=mptr->getOffsetForTag(i->first);
679 DataTypes::ValueType::size_type ooff=optr->getOffsetForTag(i->first);
680 for (int j=0;j<getDataPointSize();++j)
681 {
682 if (mvec[j+moff]>0)
683 {
684 self[j+toff]=ovec[j+ooff];
685 }
686 }
687 }
688 // now for the default value
689 for (int j=0;j<getDataPointSize();++j)
690 {
691 if (mvec[j+mptr->getDefaultOffset()]>0)
692 {
693 self[j+tptr->getDefaultOffset()]=ovec[j+optr->getDefaultOffset()];
694 }
695 }
696 }
697 else // other is a scalar
698 {
699 for (i=tlookup.begin();i!=tlookup.end();i++)
700 {
701 // get the target offset
702 DataTypes::ValueType::size_type toff=tptr->getOffsetForTag(i->first);
703 DataTypes::ValueType::size_type moff=mptr->getOffsetForTag(i->first);
704 DataTypes::ValueType::size_type ooff=optr->getOffsetForTag(i->first);
705 for (int j=0;j<getDataPointSize();++j)
706 {
707 if (mvec[j+moff]>0)
708 {
709 self[j+toff]=ovec[ooff];
710 }
711 }
712 }
713 // now for the default value
714 for (int j=0;j<getDataPointSize();++j)
715 {
716 if (mvec[j+mptr->getDefaultOffset()]>0)
717 {
718 self[j+tptr->getDefaultOffset()]=ovec[0];
719 }
720 }
721 }
722
723 return; // ugly
724 }
725 // mixed scalar and non-scalar operation
726 if ((selfrank>0) && (otherrank==0) && (mask2.getDataPointShape()==getDataPointShape()))
727 {
728 size_t num_points=self.size();
729 // OPENMP 3.0 allows unsigned loop vars.
730 #if defined(_OPENMP) && (_OPENMP < 200805)
731 long i;
732 #else
733 size_t i;
734 #endif
735 size_t psize=getDataPointSize();
736 #pragma omp parallel for private(i) schedule(static)
737 for (i=0;i<num_points;++i)
738 {
739 if (mvec[i]>0)
740 {
741 self[i]=ovec[i/psize]; // since this is expanded there is one scalar
742 } // dest point
743 }
744 return; // ugly!
745 }
746 // tagged data is already taken care of so we only need to worry about shapes
747 // special cases with scalars are already dealt with so all we need to worry about is shape
748 if ((getDataPointShape()!=other2.getDataPointShape()) || getDataPointShape()!=mask2.getDataPointShape())
749 {
750 ostringstream oss;
751 oss <<"Error - size mismatch in arguments to copyWithMask.";
752 oss << "\nself_shape=" << DataTypes::shapeToString(getDataPointShape());
753 oss << " other2_shape=" << DataTypes::shapeToString(other2.getDataPointShape());
754 oss << " mask2_shape=" << DataTypes::shapeToString(mask2.getDataPointShape());
755 throw DataException(oss.str());
756 }
757 size_t num_points=self.size();
758
759 // OPENMP 3.0 allows unsigned loop vars.
760 #if defined(_OPENMP) && (_OPENMP < 200805)
761 long i;
762 #else
763 size_t i;
764 #endif
765 #pragma omp parallel for private(i) schedule(static)
766 for (i=0;i<num_points;++i)
767 {
768 if (mvec[i]>0)
769 {
770 self[i]=ovec[i];
771 }
772 }
773 }
774
775 bool
776 Data::isExpanded() const
777 {
778 DataExpanded* temp=dynamic_cast<DataExpanded*>(m_data.get());
779 return (temp!=0);
780 }
781
782 bool
783 Data::actsExpanded() const
784 {
785 return m_data->actsExpanded();
786
787 }
788
789
790 bool
791 Data::isTagged() const
792 {
793 DataTagged* temp=dynamic_cast<DataTagged*>(m_data.get());
794 return (temp!=0);
795 }
796
797 bool
798 Data::isEmpty() const
799 {
800 DataEmpty* temp=dynamic_cast<DataEmpty*>(m_data.get());
801 return (temp!=0);
802 }
803
804 bool
805 Data::isConstant() const
806 {
807 DataConstant* temp=dynamic_cast<DataConstant*>(m_data.get());
808 return (temp!=0);
809 }
810
811 bool
812 Data::isLazy() const
813 {
814 return m_lazy; // not asking m_data because we need to be able to ask this while m_data is changing
815 }
816
817 // at the moment this is synonymous with !isLazy() but that could change
818 bool
819 Data::isReady() const
820 {
821 return (dynamic_cast<DataReady*>(m_data.get())!=0);
822 }
823
824
825 void
826 Data::setProtection()
827 {
828 m_protected=true;
829 }
830
831 bool
832 Data::isProtected() const
833 {
834 return m_protected;
835 }
836
837
838
839 void
840 Data::expand()
841 {
842 if (isConstant()) {
843 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
844 DataAbstract* temp=new DataExpanded(*tempDataConst);
845 // m_data=temp->getPtr();
846 set_m_data(temp->getPtr());
847 } else if (isTagged()) {
848 DataTagged* tempDataTag=dynamic_cast<DataTagged*>(m_data.get());
849 DataAbstract* temp=new DataExpanded(*tempDataTag);
850 // m_data=temp->getPtr();
851 set_m_data(temp->getPtr());
852 } else if (isExpanded()) {
853 //
854 // do nothing
855 } else if (isEmpty()) {
856 throw DataException("Error - Expansion of DataEmpty not possible.");
857 } else if (isLazy()) {
858 resolve();
859 expand(); // resolve might not give us expanded data
860 } else {
861 throw DataException("Error - Expansion not implemented for this Data type.");
862 }
863 }
864
865 void
866 Data::tag()
867 {
868 if (isConstant()) {
869 DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
870 DataAbstract* temp=new DataTagged(*tempDataConst);
871 // m_data=temp->getPtr();
872 set_m_data(temp->getPtr());
873 } else if (isTagged()) {
874 // do nothing
875 } else if (isExpanded()) {
876 throw DataException("Error - Creating tag data from DataExpanded not possible.");
877 } else if (isEmpty()) {
878 throw DataException("Error - Creating tag data from DataEmpty not possible.");
879 } else if (isLazy()) {
880 DataAbstract_ptr res=m_data->resolve();
881 if (m_data->isExpanded())
882 {
883 throw DataException("Error - data would resolve to DataExpanded, tagging is not possible.");
884 }
885 // m_data=res;
886 set_m_data(res);
887 tag();
888 } else {
889 throw DataException("Error - Tagging not implemented for this Data type.");
890 }
891 }
892
893 void
894 Data::resolve()
895 {
896 if (isLazy())
897 {
898 // m_data=m_data->resolve();
899 set_m_data(m_data->resolve());
900 }
901 }
902
903 void
904 Data::requireWrite()
905 {
906 resolve();
907 exclusiveWrite();
908 }
909
910 Data
911 Data::oneOver() const
912 {
913 MAKELAZYOP(RECIP)
914 return C_TensorUnaryOperation(*this, bind1st(divides<double>(),1.));
915 }
916
917 Data
918 Data::wherePositive() const
919 {
920 MAKELAZYOP(GZ)
921 return C_TensorUnaryOperation(*this, bind2nd(greater<double>(),0.0));
922 }
923
924 Data
925 Data::whereNegative() const
926 {
927 MAKELAZYOP(LZ)
928 return C_TensorUnaryOperation(*this, bind2nd(less<double>(),0.0));
929 }
930
931 Data
932 Data::whereNonNegative() const
933 {
934 MAKELAZYOP(GEZ)
935 return C_TensorUnaryOperation(*this, bind2nd(greater_equal<double>(),0.0));
936 }
937
938 Data
939 Data::whereNonPositive() const
940 {
941 MAKELAZYOP(LEZ)
942 return C_TensorUnaryOperation(*this, bind2nd(less_equal<double>(),0.0));
943 }
944
945 Data
946 Data::whereZero(double tol) const
947 {
948 // Data dataAbs=abs();
949 // return C_TensorUnaryOperation(dataAbs, bind2nd(less_equal<double>(),tol));
950 MAKELAZYOPOFF(EZ,tol)
951 return C_TensorUnaryOperation(*this, bind2nd(AbsLTE(),tol));
952
953 }
954
955 Data
956 Data::whereNonZero(double tol) const
957 {
958 // Data dataAbs=abs();
959 // return C_TensorUnaryOperation(dataAbs, bind2nd(greater<double>(),tol));
960 MAKELAZYOPOFF(NEZ,tol)
961 return C_TensorUnaryOperation(*this, bind2nd(AbsGT(),tol));
962
963 }
964
965 Data
966 Data::interpolate(const FunctionSpace& functionspace) const
967 {
968 return Data(*this,functionspace);
969 }
970
971 bool
972 Data::probeInterpolation(const FunctionSpace& functionspace) const
973 {
974 return getFunctionSpace().probeInterpolation(functionspace);
975 }
976
977 Data
978 Data::gradOn(const FunctionSpace& functionspace) const
979 {
980 if (isEmpty())
981 {
982 throw DataException("Error - operation not permitted on instances of DataEmpty.");
983 }
984 double blocktimer_start = blocktimer_time();
985 if (functionspace.getDomain()!=getDomain())
986 throw DataException("Error - gradient cannot be calculated on different domains.");
987 DataTypes::ShapeType grad_shape=getDataPointShape();
988 grad_shape.push_back(functionspace.getDim());
989 Data out(0.0,grad_shape,functionspace,true);
990 getDomain()->setToGradient(out,*this);
991 blocktimer_increment("grad()", blocktimer_start);
992 return out;
993 }
994
995 Data
996 Data::grad() const
997 {
998 if (isEmpty())
999 {
1000 throw DataException("Error - operation not permitted on instances of DataEmpty.");
1001 }
1002 return gradOn(escript::function(*getDomain()));
1003 }
1004
1005 int
1006 Data::getDataPointSize() const
1007 {
1008 return m_data->getNoValues();
1009 }
1010
1011 DataTypes::ValueType::size_type
1012 Data::getLength() const
1013 {
1014 return m_data->getLength();
1015 }
1016
1017 const boost::python::object
1018 Data::getValueOfDataPointAsTuple(int dataPointNo)
1019 {
1020 forceResolve();
1021 if (getNumDataPointsPerSample()>0) {
1022 int sampleNo = dataPointNo/getNumDataPointsPerSample();
1023 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1024 //
1025 // Check a valid sample number has been supplied
1026 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
1027 throw DataException("Error - Data::getValueOfDataPointAsTuple: invalid sampleNo.");
1028 }
1029
1030 //
1031 // Check a valid data point number has been supplied
1032 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
1033 throw DataException("Error - Data::getValueOfDataPointAsTuple: invalid dataPointNoInSample.");
1034 }
1035 // TODO: global error handling
1036 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
1037 return pointToTuple(getDataPointShape(),&(getDataAtOffsetRO(offset)));
1038 }
1039 else
1040 {
1041 // The old method would return an empty numArray of the given shape
1042 // I'm going to throw an exception because if we have zero points per sample we have problems
1043 throw DataException("Error - need at least 1 datapoint per sample.");
1044 }
1045
1046 }
1047
1048
1049 void
1050 Data::setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object)
1051 {
1052 // this will throw if the value cannot be represented
1053 setValueOfDataPointToArray(dataPointNo,py_object);
1054 }
1055
1056 void
1057 Data::setValueOfDataPointToArray(int dataPointNo, const boost::python::object& obj)
1058 {
1059 if (isProtected()) {
1060 throw DataException("Error - attempt to update protected Data object.");
1061 }
1062 forceResolve();
1063
1064 WrappedArray w(obj);
1065 //
1066 // check rank
1067 if (static_cast<unsigned int>(w.getRank())<getDataPointRank())
1068 throw DataException("Rank of array does not match Data object rank");
1069
1070 //
1071 // check shape of array
1072 for (unsigned int i=0; i<getDataPointRank(); i++) {
1073 if (w.getShape()[i]!=getDataPointShape()[i])
1074 throw DataException("Shape of array does not match Data object rank");
1075 }
1076 //
1077 // make sure data is expanded:
1078 //
1079 if (!isExpanded()) {
1080 expand();
1081 }
1082 if (getNumDataPointsPerSample()>0) {
1083 int sampleNo = dataPointNo/getNumDataPointsPerSample();
1084 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1085 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,w);
1086 } else {
1087 m_data->copyToDataPoint(-1, 0,w);
1088 }
1089 }
1090
1091 void
1092 Data::setValueOfDataPoint(int dataPointNo, const double value)
1093 {
1094 if (isProtected()) {
1095 throw DataException("Error - attempt to update protected Data object.");
1096 }
1097 //
1098 // make sure data is expanded:
1099 forceResolve();
1100 if (!isExpanded()) {
1101 expand();
1102 }
1103 if (getNumDataPointsPerSample()>0) {
1104 int sampleNo = dataPointNo/getNumDataPointsPerSample();
1105 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1106 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,value);
1107 } else {
1108 m_data->copyToDataPoint(-1, 0,value);
1109 }
1110 }
1111
1112 const
1113 boost::python::object
1114 Data::getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo)
1115 {
1116 // This could be lazier than it is now
1117 forceResolve();
1118
1119 // copy datapoint into a buffer
1120 // broadcast buffer to all nodes
1121 // convert buffer to tuple
1122 // return tuple
1123
1124 const DataTypes::ShapeType& dataPointShape = getDataPointShape();
1125 size_t length=DataTypes::noValues(dataPointShape);
1126
1127 // added for the MPI communication
1128 double *tmpData = new double[length];
1129
1130 // updated for the MPI case
1131 if( get_MPIRank()==procNo ){
1132 if (getNumDataPointsPerSample()>0) {
1133 int sampleNo = dataPointNo/getNumDataPointsPerSample();
1134 int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1135 //
1136 // Check a valid sample number has been supplied
1137 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
1138 throw DataException("Error - Data::convertToNumArray: invalid sampleNo.");
1139 }
1140
1141 //
1142 // Check a valid data point number has been supplied
1143 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
1144 throw DataException("Error - Data::convertToNumArray: invalid dataPointNoInSample.");
1145 }
1146 // TODO: global error handling
1147 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
1148
1149 memcpy(tmpData,&(getDataAtOffsetRO(offset)),length*sizeof(double));
1150 }
1151 }
1152 #ifdef PASO_MPI
1153 // broadcast the data to all other processes
1154 MPI_Bcast( tmpData, length, MPI_DOUBLE, procNo, get_MPIComm() );
1155 #endif
1156
1157 boost::python::tuple t=pointToTuple(dataPointShape,tmpData);
1158 delete [] tmpData;
1159 //
1160 // return the loaded array
1161 return t;
1162
1163 }
1164
1165
1166 boost::python::object
1167 Data::integrateToTuple_const() const
1168 {
1169 if (isLazy())
1170 {
1171 throw DataException("Error - cannot integrate for constant lazy data.");
1172 }
1173 return integrateWorker();
1174 }
1175
1176 boost::python::object
1177 Data::integrateToTuple()
1178 {
1179 if (isLazy())
1180 {
1181 expand();
1182 }
1183 return integrateWorker();
1184
1185 }
1186
1187 boost::python::object
1188 Data::integrateWorker() const
1189 {
1190 DataTypes::ShapeType shape = getDataPointShape();
1191 int dataPointSize = getDataPointSize();
1192
1193 //
1194 // calculate the integral values
1195 vector<double> integrals(dataPointSize);
1196 vector<double> integrals_local(dataPointSize);
1197 const AbstractContinuousDomain* dom=dynamic_cast<const AbstractContinuousDomain*>(getDomain().get());
1198 if (dom==0)
1199 {
1200 throw DataException("Can not integrate over non-continuous domains.");
1201 }
1202 #ifdef PASO_MPI
1203 dom->setToIntegrals(integrals_local,*this);
1204 // Global sum: use an array instead of a vector because elements of array are guaranteed to be contiguous in memory
1205 double *tmp = new double[dataPointSize];
1206 double *tmp_local = new double[dataPointSize];
1207 for (int i=0; i<dataPointSize; i++) { tmp_local[i] = integrals_local[i]; }
1208 MPI_Allreduce( &tmp_local[0], &tmp[0], dataPointSize, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD );
1209 for (int i=0; i<dataPointSize; i++) { integrals[i] = tmp[i]; }
1210 tuple result=pointToTuple(shape,tmp);
1211 delete[] tmp;
1212 delete[] tmp_local;
1213 #else
1214 dom->setToIntegrals(integrals,*this);
1215 /* double *tmp = new double[dataPointSize];
1216 for (int i=0; i<dataPointSize; i++) { tmp[i]=integrals[i]; }*/
1217 tuple result=pointToTuple(shape,integrals);
1218 // delete tmp;
1219 #endif
1220
1221
1222 return result;
1223 }
1224
1225 Data
1226 Data::sin() const
1227 {
1228 MAKELAZYOP(SIN)
1229 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sin);
1230 }
1231
1232 Data
1233 Data::cos() const
1234 {
1235 MAKELAZYOP(COS)
1236 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cos);
1237 }
1238
1239 Data
1240 Data::tan() const
1241 {
1242 MAKELAZYOP(TAN)
1243 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tan);
1244 }
1245
1246 Data
1247 Data::asin() const
1248 {
1249 MAKELAZYOP(ASIN)
1250 return C_TensorUnaryOperation<double (*)(double)>(*this, ::asin);
1251 }
1252
1253 Data
1254 Data::acos() const
1255 {
1256 MAKELAZYOP(ACOS)
1257 return C_TensorUnaryOperation<double (*)(double)>(*this, ::acos);
1258 }
1259
1260
1261 Data
1262 Data::atan() const
1263 {
1264 MAKELAZYOP(ATAN)
1265 return C_TensorUnaryOperation<double (*)(double)>(*this, ::atan);
1266 }
1267
1268 Data
1269 Data::sinh() const
1270 {
1271 MAKELAZYOP(SINH)
1272 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sinh);
1273 }
1274
1275 Data
1276 Data::cosh() const
1277 {
1278 MAKELAZYOP(COSH)
1279 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cosh);
1280 }
1281
1282 Data
1283 Data::tanh() const
1284 {
1285 MAKELAZYOP(TANH)
1286 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tanh);
1287 }
1288
1289
1290 Data
1291 Data::erf() const
1292 {
1293 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1294 throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");
1295 #else
1296 MAKELAZYOP(ERF)
1297 return C_TensorUnaryOperation(*this, ::erf);
1298 #endif
1299 }
1300
1301 Data
1302 Data::asinh() const
1303 {
1304 MAKELAZYOP(ASINH)
1305 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1306 return C_TensorUnaryOperation(*this, escript::asinh_substitute);
1307 #else
1308 return C_TensorUnaryOperation(*this, ::asinh);
1309 #endif
1310 }
1311
1312 Data
1313 Data::acosh() const
1314 {
1315 MAKELAZYOP(ACOSH)
1316 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1317 return C_TensorUnaryOperation(*this, escript::acosh_substitute);
1318 #else
1319 return C_TensorUnaryOperation(*this, ::acosh);
1320 #endif
1321 }
1322
1323 Data
1324 Data::atanh() const
1325 {
1326 MAKELAZYOP(ATANH)
1327 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1328 return C_TensorUnaryOperation(*this, escript::atanh_substitute);
1329 #else
1330 return C_TensorUnaryOperation(*this, ::atanh);
1331 #endif
1332 }
1333
1334 Data
1335 Data::log10() const
1336 {
1337 MAKELAZYOP(LOG10)
1338 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log10);
1339 }
1340
1341 Data
1342 Data::log() const
1343 {
1344 MAKELAZYOP(LOG)
1345 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log);
1346 }
1347
1348 Data
1349 Data::sign() const
1350 {
1351 MAKELAZYOP(SIGN)
1352 return C_TensorUnaryOperation(*this, escript::fsign);
1353 }
1354
1355 Data
1356 Data::abs() const
1357 {
1358 MAKELAZYOP(ABS)
1359 return C_TensorUnaryOperation<double (*)(double)>(*this, ::fabs);
1360 }
1361
1362 Data
1363 Data::neg() const
1364 {
1365 MAKELAZYOP(NEG)
1366 return C_TensorUnaryOperation(*this, negate<double>());
1367 }
1368
1369 Data
1370 Data::pos() const
1371 {
1372 // not doing lazy check here is deliberate.
1373 // since a deep copy of lazy data should be cheap, I'll just let it happen now
1374 Data result;
1375 // perform a deep copy
1376 result.copy(*this);
1377 return result;
1378 }
1379
1380 Data
1381 Data::exp() const
1382 {
1383 MAKELAZYOP(EXP)
1384 return C_TensorUnaryOperation<double (*)(double)>(*this, ::exp);
1385 }
1386
1387 Data
1388 Data::sqrt() const
1389 {
1390 MAKELAZYOP(SQRT)
1391 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sqrt);
1392 }
1393
1394 double
1395 Data::Lsup_const() const
1396 {
1397 if (isLazy())
1398 {
1399 throw DataException("Error - cannot compute Lsup for constant lazy data.");
1400 }
1401 return LsupWorker();
1402 }
1403
1404 double
1405 Data::Lsup()
1406 {
1407 if (isLazy())
1408 {
1409 resolve();
1410 }
1411 return LsupWorker();
1412 }
1413
1414 double
1415 Data::sup_const() const
1416 {
1417 if (isLazy())
1418 {
1419 throw DataException("Error - cannot compute sup for constant lazy data.");
1420 }
1421 return supWorker();
1422 }
1423
1424 double
1425 Data::sup()
1426 {
1427 if (isLazy())
1428 {
1429 resolve();
1430 }
1431 return supWorker();
1432 }
1433
1434 double
1435 Data::inf_const() const
1436 {
1437 if (isLazy())
1438 {
1439 throw DataException("Error - cannot compute inf for constant lazy data.");
1440 }
1441 return infWorker();
1442 }
1443
1444 double
1445 Data::inf()
1446 {
1447 if (isLazy())
1448 {
1449 resolve();
1450 }
1451 return infWorker();
1452 }
1453
1454 double
1455 Data::LsupWorker() const
1456 {
1457 double localValue;
1458 //
1459 // set the initial absolute maximum value to zero
1460
1461 AbsMax abs_max_func;
1462 localValue = algorithm(abs_max_func,0);
1463 #ifdef PASO_MPI
1464 double globalValue;
1465 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1466 return globalValue;
1467 #else
1468 return localValue;
1469 #endif
1470 }
1471
1472 double
1473 Data::supWorker() const
1474 {
1475 double localValue;
1476 //
1477 // set the initial maximum value to min possible double
1478 FMax fmax_func;
1479 localValue = algorithm(fmax_func,numeric_limits<double>::max()*-1);
1480 #ifdef PASO_MPI
1481 double globalValue;
1482 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1483 return globalValue;
1484 #else
1485 return localValue;
1486 #endif
1487 }
1488
1489 double
1490 Data::infWorker() const
1491 {
1492 double localValue;
1493 //
1494 // set the initial minimum value to max possible double
1495 FMin fmin_func;
1496 localValue = algorithm(fmin_func,numeric_limits<double>::max());
1497 #ifdef PASO_MPI
1498 double globalValue;
1499 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD );
1500 return globalValue;
1501 #else
1502 return localValue;
1503 #endif
1504 }
1505
1506 /* TODO */
1507 /* global reduction */
1508 Data
1509 Data::maxval() const
1510 {
1511 if (isLazy())
1512 {
1513 Data temp(*this); // to get around the fact that you can't resolve a const Data
1514 temp.resolve();
1515 return temp.maxval();
1516 }
1517 //
1518 // set the initial maximum value to min possible double
1519 FMax fmax_func;
1520 return dp_algorithm(fmax_func,numeric_limits<double>::max()*-1);
1521 }
1522
1523 Data
1524 Data::minval() const
1525 {
1526 if (isLazy())
1527 {
1528 Data temp(*this); // to get around the fact that you can't resolve a const Data
1529 temp.resolve();
1530 return temp.minval();
1531 }
1532 //
1533 // set the initial minimum value to max possible double
1534 FMin fmin_func;
1535 return dp_algorithm(fmin_func,numeric_limits<double>::max());
1536 }
1537
1538 Data
1539 Data::swapaxes(const int axis0, const int axis1) const
1540 {
1541 if (isLazy())
1542 {
1543 Data temp(*this);
1544 temp.resolve();
1545 return temp.swapaxes(axis0,axis1);
1546 }
1547 int axis0_tmp,axis1_tmp;
1548 DataTypes::ShapeType s=getDataPointShape();
1549 DataTypes::ShapeType ev_shape;
1550 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1551 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1552 int rank=getDataPointRank();
1553 if (rank<2) {
1554 throw DataException("Error - Data::swapaxes argument must have at least rank 2.");
1555 }
1556 if (axis0<0 || axis0>rank-1) {
1557 throw DataException("Error - Data::swapaxes: axis0 must be between 0 and rank-1=" + rank-1);
1558 }
1559 if (axis1<0 || axis1>rank-1) {
1560 throw DataException("Error - Data::swapaxes: axis1 must be between 0 and rank-1=" + rank-1);
1561 }
1562 if (axis0 == axis1) {
1563 throw DataException("Error - Data::swapaxes: axis indices must be different.");
1564 }
1565 if (axis0 > axis1) {
1566 axis0_tmp=axis1;
1567 axis1_tmp=axis0;
1568 } else {
1569 axis0_tmp=axis0;
1570 axis1_tmp=axis1;
1571 }
1572 for (int i=0; i<rank; i++) {
1573 if (i == axis0_tmp) {
1574 ev_shape.push_back(s[axis1_tmp]);
1575 } else if (i == axis1_tmp) {
1576 ev_shape.push_back(s[axis0_tmp]);
1577 } else {
1578 ev_shape.push_back(s[i]);
1579 }
1580 }
1581 Data ev(0.,ev_shape,getFunctionSpace());
1582 ev.typeMatchRight(*this);
1583 m_data->swapaxes(ev.m_data.get(), axis0_tmp, axis1_tmp);
1584 return ev;
1585
1586 }
1587
1588 Data
1589 Data::symmetric() const
1590 {
1591 // check input
1592 DataTypes::ShapeType s=getDataPointShape();
1593 if (getDataPointRank()==2) {
1594 if(s[0] != s[1])
1595 throw DataException("Error - Data::symmetric can only be calculated for rank 2 object with equal first and second dimension.");
1596 }
1597 else if (getDataPointRank()==4) {
1598 if(!(s[0] == s[2] && s[1] == s[3]))
1599 throw DataException("Error - Data::symmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1600 }
1601 else {
1602 throw DataException("Error - Data::symmetric can only be calculated for rank 2 or 4 object.");
1603 }
1604 MAKELAZYOP(SYM)
1605 Data ev(0.,getDataPointShape(),getFunctionSpace());
1606 ev.typeMatchRight(*this);
1607 m_data->symmetric(ev.m_data.get());
1608 return ev;
1609 }
1610
1611 Data
1612 Data::nonsymmetric() const
1613 {
1614 MAKELAZYOP(NSYM)
1615 // check input
1616 DataTypes::ShapeType s=getDataPointShape();
1617 if (getDataPointRank()==2) {
1618 if(s[0] != s[1])
1619 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 object with equal first and second dimension.");
1620 DataTypes::ShapeType ev_shape;
1621 ev_shape.push_back(s[0]);
1622 ev_shape.push_back(s[1]);
1623 Data ev(0.,ev_shape,getFunctionSpace());
1624 ev.typeMatchRight(*this);
1625 m_data->nonsymmetric(ev.m_data.get());
1626 return ev;
1627 }
1628 else if (getDataPointRank()==4) {
1629 if(!(s[0] == s[2] && s[1] == s[3]))
1630 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1631 DataTypes::ShapeType ev_shape;
1632 ev_shape.push_back(s[0]);
1633 ev_shape.push_back(s[1]);
1634 ev_shape.push_back(s[2]);
1635 ev_shape.push_back(s[3]);
1636 Data ev(0.,ev_shape,getFunctionSpace());
1637 ev.typeMatchRight(*this);
1638 m_data->nonsymmetric(ev.m_data.get());
1639 return ev;
1640 }
1641 else {
1642 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 or 4 object.");
1643 }
1644 }
1645
1646 Data
1647 Data::trace(int axis_offset) const
1648 {
1649 MAKELAZYOPOFF(TRACE,axis_offset)
1650 if ((axis_offset<0) || (axis_offset>getDataPointRank()))
1651 {
1652 throw DataException("Error - Data::trace, axis_offset must be between 0 and rank-2 inclusive.");
1653 }
1654 DataTypes::ShapeType s=getDataPointShape();
1655 if (getDataPointRank()==2) {
1656 DataTypes::ShapeType ev_shape;
1657 Data ev(0.,ev_shape,getFunctionSpace());
1658 ev.typeMatchRight(*this);
1659 m_data->trace(ev.m_data.get(), axis_offset);
1660 return ev;
1661 }
1662 if (getDataPointRank()==3) {
1663 DataTypes::ShapeType ev_shape;
1664 if (axis_offset==0) {
1665 int s2=s[2];
1666 ev_shape.push_back(s2);
1667 }
1668 else if (axis_offset==1) {
1669 int s0=s[0];
1670 ev_shape.push_back(s0);
1671 }
1672 Data ev(0.,ev_shape,getFunctionSpace());
1673 ev.typeMatchRight(*this);
1674 m_data->trace(ev.m_data.get(), axis_offset);
1675 return ev;
1676 }
1677 if (getDataPointRank()==4) {
1678 DataTypes::ShapeType ev_shape;
1679 if (axis_offset==0) {
1680 ev_shape.push_back(s[2]);
1681 ev_shape.push_back(s[3]);
1682 }
1683 else if (axis_offset==1) {
1684 ev_shape.push_back(s[0]);
1685 ev_shape.push_back(s[3]);
1686 }
1687 else if (axis_offset==2) {
1688 ev_shape.push_back(s[0]);
1689 ev_shape.push_back(s[1]);
1690 }
1691 Data ev(0.,ev_shape,getFunctionSpace());
1692 ev.typeMatchRight(*this);
1693 m_data->trace(ev.m_data.get(), axis_offset);
1694 return ev;
1695 }
1696 else {
1697 throw DataException("Error - Data::trace can only be calculated for rank 2, 3 or 4 object.");
1698 }
1699 }
1700
1701 Data
1702 Data::transpose(int axis_offset) const
1703 {
1704 MAKELAZYOPOFF(TRANS,axis_offset)
1705 DataTypes::ShapeType s=getDataPointShape();
1706 DataTypes::ShapeType ev_shape;
1707 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1708 // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1709 int rank=getDataPointRank();
1710 if (axis_offset<0 || axis_offset>rank) {
1711 throw DataException("Error - Data::transpose must have 0 <= axis_offset <= rank=" + rank);
1712 }
1713 for (int i=0; i<rank; i++) {
1714 int index = (axis_offset+i)%rank;
1715 ev_shape.push_back(s[index]); // Append to new shape
1716 }
1717 Data ev(0.,ev_shape,getFunctionSpace());
1718 ev.typeMatchRight(*this);
1719 m_data->transpose(ev.m_data.get(), axis_offset);
1720 return ev;
1721 }
1722
1723 Data
1724 Data::eigenvalues() const
1725 {
1726 if (isLazy())
1727 {
1728 Data temp(*this); // to get around the fact that you can't resolve a const Data
1729 temp.resolve();
1730 return temp.eigenvalues();
1731 }
1732 // check input
1733 DataTypes::ShapeType s=getDataPointShape();
1734 if (getDataPointRank()!=2)
1735 throw DataException("Error - Data::eigenvalues can only be calculated for rank 2 object.");
1736 if(s[0] != s[1])
1737 throw DataException("Error - Data::eigenvalues can only be calculated for object with equal first and second dimension.");
1738 // create return
1739 DataTypes::ShapeType ev_shape(1,s[0]);
1740 Data ev(0.,ev_shape,getFunctionSpace());
1741 ev.typeMatchRight(*this);
1742 m_data->eigenvalues(ev.m_data.get());
1743 return ev;
1744 }
1745
1746 const boost::python::tuple
1747 Data::eigenvalues_and_eigenvectors(const double tol) const
1748 {
1749 if (isLazy())
1750 {
1751 Data temp(*this); // to get around the fact that you can't resolve a const Data
1752 temp.resolve();
1753 return temp.eigenvalues_and_eigenvectors(tol);
1754 }
1755 DataTypes::ShapeType s=getDataPointShape();
1756 if (getDataPointRank()!=2)
1757 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for rank 2 object.");
1758 if(s[0] != s[1])
1759 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for object with equal first and second dimension.");
1760 // create return
1761 DataTypes::ShapeType ev_shape(1,s[0]);
1762 Data ev(0.,ev_shape,getFunctionSpace());
1763 ev.typeMatchRight(*this);
1764 DataTypes::ShapeType V_shape(2,s[0]);
1765 Data V(0.,V_shape,getFunctionSpace());
1766 V.typeMatchRight(*this);
1767 m_data->eigenvalues_and_eigenvectors(ev.m_data.get(),V.m_data.get(),tol);
1768 return make_tuple(boost::python::object(ev),boost::python::object(V));
1769 }
1770
1771 const boost::python::tuple
1772 Data::minGlobalDataPoint() const
1773 {
1774 // NB: calc_minGlobalDataPoint( had to be split off from minGlobalDataPoint( as boost::make_tuple causes an
1775 // abort (for unknown reasons) if there are openmp directives with it in the
1776 // surrounding function
1777
1778 int DataPointNo;
1779 int ProcNo;
1780 calc_minGlobalDataPoint(ProcNo,DataPointNo);
1781 return make_tuple(ProcNo,DataPointNo);
1782 }
1783
1784 void
1785 Data::calc_minGlobalDataPoint(int& ProcNo,
1786 int& DataPointNo) const
1787 {
1788 if (isLazy())
1789 {
1790 Data temp(*this); // to get around the fact that you can't resolve a const Data
1791 temp.resolve();
1792 return temp.calc_minGlobalDataPoint(ProcNo,DataPointNo);
1793 }
1794 int i,j;
1795 int lowi=0,lowj=0;
1796 double min=numeric_limits<double>::max();
1797
1798 Data temp=minval();
1799
1800 int numSamples=temp.getNumSamples();
1801 int numDPPSample=temp.getNumDataPointsPerSample();
1802
1803 double next,local_min;
1804 int local_lowi=0,local_lowj=0;
1805
1806 #pragma omp parallel firstprivate(local_lowi,local_lowj) private(next,local_min)
1807 {
1808 local_min=min;
1809 #pragma omp for private(i,j) schedule(static)
1810 for (i=0; i<numSamples; i++) {
1811 for (j=0; j<numDPPSample; j++) {
1812 next=temp.getDataAtOffsetRO(temp.getDataOffset(i,j));
1813 if (next<local_min) {
1814 local_min=next;
1815 local_lowi=i;
1816 local_lowj=j;
1817 }
1818 }
1819 }
1820 #pragma omp critical
1821 if (local_min<min) {
1822 min=local_min;
1823 lowi=local_lowi;
1824 lowj=local_lowj;
1825 }
1826 }
1827
1828 #ifdef PASO_MPI
1829 // determine the processor on which the minimum occurs
1830 next = temp.getDataPointRO(lowi,lowj);
1831 int lowProc = 0;
1832 double *globalMins = new double[get_MPISize()+1];
1833 int error;
1834 error = MPI_Gather ( &next, 1, MPI_DOUBLE, globalMins, 1, MPI_DOUBLE, 0, get_MPIComm() );
1835
1836 if( get_MPIRank()==0 ){
1837 next = globalMins[lowProc];
1838 for( i=1; i<get_MPISize(); i++ )
1839 if( next>globalMins[i] ){
1840 lowProc = i;
1841 next = globalMins[i];
1842 }
1843 }
1844 MPI_Bcast( &lowProc, 1, MPI_DOUBLE, 0, get_MPIComm() );
1845
1846 delete [] globalMins;
1847 ProcNo = lowProc;
1848 #else
1849 ProcNo = 0;
1850 #endif
1851 DataPointNo = lowj + lowi * numDPPSample;
1852 }
1853
1854 void
1855 Data::saveDX(std::string fileName) const
1856 {
1857 if (isEmpty())
1858 {
1859 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1860 }
1861 if (isLazy())
1862 {
1863 Data temp(*this); // to get around the fact that you can't resolve a const Data
1864 temp.resolve();
1865 temp.saveDX(fileName);
1866 return;
1867 }
1868 boost::python::dict args;
1869 args["data"]=boost::python::object(this);
1870 getDomain()->saveDX(fileName,args);
1871 return;
1872 }
1873
1874 void
1875 Data::saveVTK(std::string fileName) const
1876 {
1877 if (isEmpty())
1878 {
1879 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
1880 }
1881 if (isLazy())
1882 {
1883 Data temp(*this); // to get around the fact that you can't resolve a const Data
1884 temp.resolve();
1885 temp.saveVTK(fileName);
1886 return;
1887 }
1888 boost::python::dict args;
1889 args["data"]=boost::python::object(this);
1890 getDomain()->saveVTK(fileName,args,"","");
1891 return;
1892 }
1893
1894 Data&
1895 Data::operator+=(const Data& right)
1896 {
1897 if (isProtected()) {
1898 throw DataException("Error - attempt to update protected Data object.");
1899 }
1900 MAKELAZYBINSELF(right,ADD) // for lazy + is equivalent to +=
1901 exclusiveWrite(); // Since Lazy data does not modify its leaves we only need to worry here
1902 binaryOp(right,plus<double>());
1903 return (*this);
1904 }
1905
1906 Data&
1907 Data::operator+=(const boost::python::object& right)
1908 {
1909 if (isProtected()) {
1910 throw DataException("Error - attempt to update protected Data object.");
1911 }
1912 Data tmp(right,getFunctionSpace(),false);
1913 (*this)+=tmp;
1914 return *this;
1915 }
1916
1917 // Hmmm, operator= makes a deep copy but the copy constructor does not?
1918 Data&
1919 Data::operator=(const Data& other)
1920 {
1921 #if defined ASSIGNMENT_MEANS_DEEPCOPY
1922 // This should not be used.
1923 // Just leaving this here until I have completed transition
1924 copy(other);
1925 #else
1926 m_protected=false; // since any changes should be caught by exclusiveWrite();
1927 // m_data=other.m_data;
1928 set_m_data(other.m_data);
1929 #endif
1930 return (*this);
1931 }
1932
1933 Data&
1934 Data::operator-=(const Data& right)
1935 {
1936 if (isProtected()) {
1937 throw DataException("Error - attempt to update protected Data object.");
1938 }
1939 MAKELAZYBINSELF(right,SUB)
1940 exclusiveWrite();
1941 binaryOp(right,minus<double>());
1942 return (*this);
1943 }
1944
1945 Data&
1946 Data::operator-=(const boost::python::object& right)
1947 {
1948 if (isProtected()) {
1949 throw DataException("Error - attempt to update protected Data object.");
1950 }
1951 Data tmp(right,getFunctionSpace(),false);
1952 (*this)-=tmp;
1953 return (*this);
1954 }
1955
1956 Data&
1957 Data::operator*=(const Data& right)
1958 {
1959 if (isProtected()) {
1960 throw DataException("Error - attempt to update protected Data object.");
1961 }
1962 MAKELAZYBINSELF(right,MUL)
1963 exclusiveWrite();
1964 binaryOp(right,multiplies<double>());
1965 return (*this);
1966 }
1967
1968 Data&
1969 Data::operator*=(const boost::python::object& right)
1970 {
1971 if (isProtected()) {
1972 throw DataException("Error - attempt to update protected Data object.");
1973 }
1974 Data tmp(right,getFunctionSpace(),false);
1975 (*this)*=tmp;
1976 return (*this);
1977 }
1978
1979 Data&
1980 Data::operator/=(const Data& right)
1981 {
1982 if (isProtected()) {
1983 throw DataException("Error - attempt to update protected Data object.");
1984 }
1985 MAKELAZYBINSELF(right,DIV)
1986 exclusiveWrite();
1987 binaryOp(right,divides<double>());
1988 return (*this);
1989 }
1990
1991 Data&
1992 Data::operator/=(const boost::python::object& right)
1993 {
1994 if (isProtected()) {
1995 throw DataException("Error - attempt to update protected Data object.");
1996 }
1997 Data tmp(right,getFunctionSpace(),false);
1998 (*this)/=tmp;
1999 return (*this);
2000 }
2001
2002 Data
2003 Data::rpowO(const boost::python::object& left) const
2004 {
2005 Data left_d(left,*this);
2006 return left_d.powD(*this);
2007 }
2008
2009 Data
2010 Data::powO(const boost::python::object& right) const
2011 {
2012 Data tmp(right,getFunctionSpace(),false);
2013 return powD(tmp);
2014 }
2015
2016 Data
2017 Data::powD(const Data& right) const
2018 {
2019 MAKELAZYBIN(right,POW)
2020 return C_TensorBinaryOperation<double (*)(double, double)>(*this, right, ::pow);
2021 }
2022
2023 //
2024 // NOTE: It is essential to specify the namespace this operator belongs to
2025 Data
2026 escript::operator+(const Data& left, const Data& right)
2027 {
2028 MAKELAZYBIN2(left,right,ADD)
2029 return C_TensorBinaryOperation(left, right, plus<double>());
2030 }
2031
2032 //
2033 // NOTE: It is essential to specify the namespace this operator belongs to
2034 Data
2035 escript::operator-(const Data& left, const Data& right)
2036 {
2037 MAKELAZYBIN2(left,right,SUB)
2038 return C_TensorBinaryOperation(left, right, minus<double>());
2039 }
2040
2041 //
2042 // NOTE: It is essential to specify the namespace this operator belongs to
2043 Data
2044 escript::operator*(const Data& left, const Data& right)
2045 {
2046 MAKELAZYBIN2(left,right,MUL)
2047 return C_TensorBinaryOperation(left, right, multiplies<double>());
2048 }
2049
2050 //
2051 // NOTE: It is essential to specify the namespace this operator belongs to
2052 Data
2053 escript::operator/(const Data& left, const Data& right)
2054 {
2055 MAKELAZYBIN2(left,right,DIV)
2056 return C_TensorBinaryOperation(left, right, divides<double>());
2057 }
2058
2059 //
2060 // NOTE: It is essential to specify the namespace this operator belongs to
2061 Data
2062 escript::operator+(const Data& left, const boost::python::object& right)
2063 {
2064 Data tmp(right,left.getFunctionSpace(),false);
2065 MAKELAZYBIN2(left,tmp,ADD)
2066 return left+tmp;
2067 }
2068
2069 //
2070 // NOTE: It is essential to specify the namespace this operator belongs to
2071 Data
2072 escript::operator-(const Data& left, const boost::python::object& right)
2073 {
2074 Data tmp(right,left.getFunctionSpace(),false);
2075 MAKELAZYBIN2(left,tmp,SUB)
2076 return left-tmp;
2077 }
2078
2079 //
2080 // NOTE: It is essential to specify the namespace this operator belongs to
2081 Data
2082 escript::operator*(const Data& left, const boost::python::object& right)
2083 {
2084 Data tmp(right,left.getFunctionSpace(),false);
2085 MAKELAZYBIN2(left,tmp,MUL)
2086 return left*tmp;
2087 }
2088
2089 //
2090 // NOTE: It is essential to specify the namespace this operator belongs to
2091 Data
2092 escript::operator/(const Data& left, const boost::python::object& right)
2093 {
2094 Data tmp(right,left.getFunctionSpace(),false);
2095 MAKELAZYBIN2(left,tmp,DIV)
2096 return left/tmp;
2097 }
2098
2099 //
2100 // NOTE: It is essential to specify the namespace this operator belongs to
2101 Data
2102 escript::operator+(const boost::python::object& left, const Data& right)
2103 {
2104 Data tmp(left,right.getFunctionSpace(),false);
2105 MAKELAZYBIN2(tmp,right,ADD)
2106 return tmp+right;
2107 }
2108
2109 //
2110 // NOTE: It is essential to specify the namespace this operator belongs to
2111 Data
2112 escript::operator-(const boost::python::object& left, const Data& right)
2113 {
2114 Data tmp(left,right.getFunctionSpace(),false);
2115 MAKELAZYBIN2(tmp,right,SUB)
2116 return tmp-right;
2117 }
2118
2119 //
2120 // NOTE: It is essential to specify the namespace this operator belongs to
2121 Data
2122 escript::operator*(const boost::python::object& left, const Data& right)
2123 {
2124 Data tmp(left,right.getFunctionSpace(),false);
2125 MAKELAZYBIN2(tmp,right,MUL)
2126 return tmp*right;
2127 }
2128
2129 //
2130 // NOTE: It is essential to specify the namespace this operator belongs to
2131 Data
2132 escript::operator/(const boost::python::object& left, const Data& right)
2133 {
2134 Data tmp(left,right.getFunctionSpace(),false);
2135 MAKELAZYBIN2(tmp,right,DIV)
2136 return tmp/right;
2137 }
2138
2139
2140 /* TODO */
2141 /* global reduction */
2142 Data
2143 Data::getItem(const boost::python::object& key) const
2144 {
2145
2146 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2147
2148 if (slice_region.size()!=getDataPointRank()) {
2149 throw DataException("Error - slice size does not match Data rank.");
2150 }
2151
2152 return getSlice(slice_region);
2153 }
2154
2155 /* TODO */
2156 /* global reduction */
2157 Data
2158 Data::getSlice(const DataTypes::RegionType& region) const
2159 {
2160 return Data(*this,region);
2161 }
2162
2163 /* TODO */
2164 /* global reduction */
2165 void
2166 Data::setItemO(const boost::python::object& key,
2167 const boost::python::object& value)
2168 {
2169 Data tempData(value,getFunctionSpace());
2170 setItemD(key,tempData);
2171 }
2172
2173 void
2174 Data::setItemD(const boost::python::object& key,
2175 const Data& value)
2176 {
2177 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2178 if (slice_region.size()!=getDataPointRank()) {
2179 throw DataException("Error - slice size does not match Data rank.");
2180 }
2181 exclusiveWrite();
2182 if (getFunctionSpace()!=value.getFunctionSpace()) {
2183 setSlice(Data(value,getFunctionSpace()),slice_region);
2184 } else {
2185 setSlice(value,slice_region);
2186 }
2187 }
2188
2189 void
2190 Data::setSlice(const Data& value,
2191 const DataTypes::RegionType& region)
2192 {
2193 if (isProtected()) {
2194 throw DataException("Error - attempt to update protected Data object.");
2195 }
2196 forceResolve();
2197 exclusiveWrite(); // In case someone finds a way to call this without going through setItemD
2198 Data tempValue(value);
2199 typeMatchLeft(tempValue);
2200 typeMatchRight(tempValue);
2201 getReady()->setSlice(tempValue.m_data.get(),region);
2202 }
2203
2204 void
2205 Data::typeMatchLeft(Data& right) const
2206 {
2207 if (right.isLazy() && !isLazy())
2208 {
2209 right.resolve();
2210 }
2211 if (isExpanded()){
2212 right.expand();
2213 } else if (isTagged()) {
2214 if (right.isConstant()) {
2215 right.tag();
2216 }
2217 }
2218 }
2219
2220 void
2221 Data::typeMatchRight(const Data& right)
2222 {
2223 if (isLazy() && !right.isLazy())
2224 {
2225 resolve();
2226 }
2227 if (isTagged()) {
2228 if (right.isExpanded()) {
2229 expand();
2230 }
2231 } else if (isConstant()) {
2232 if (right.isExpanded()) {
2233 expand();
2234 } else if (right.isTagged()) {
2235 tag();
2236 }
2237 }
2238 }
2239
2240 // The normal TaggedValue adds the tag if it is not already present
2241 // This form does not. It throws instead.
2242 // This is because the names are maintained by the domain and cannot be added
2243 // without knowing the tag number to map it to.
2244 void
2245 Data::setTaggedValueByName(std::string name,
2246 const boost::python::object& value)
2247 {
2248 if (getFunctionSpace().getDomain()->isValidTagName(name)) {
2249 forceResolve();
2250 exclusiveWrite();
2251 int tagKey=getFunctionSpace().getDomain()->getTag(name);
2252 setTaggedValue(tagKey,value);
2253 }
2254 else
2255 { // The
2256 throw DataException("Error - unknown tag in setTaggedValueByName.");
2257 }
2258 }
2259
2260 void
2261 Data::setTaggedValue(int tagKey,
2262 const boost::python::object& value)
2263 {
2264 if (isProtected()) {
2265 throw DataException("Error - attempt to update protected Data object.");
2266 }
2267 //
2268 // Ensure underlying data object is of type DataTagged
2269 forceResolve();
2270 exclusiveWrite();
2271 if (isConstant()) tag();
2272 WrappedArray w(value);
2273
2274 DataVector temp_data2;
2275 temp_data2.copyFromArray(w,1);
2276
2277 m_data->setTaggedValue(tagKey,w.getShape(), temp_data2);
2278 }
2279
2280
2281 void
2282 Data::setTaggedValueFromCPP(int tagKey,
2283 const DataTypes::ShapeType& pointshape,
2284 const DataTypes::ValueType& value,
2285 int dataOffset)
2286 {
2287 if (isProtected()) {
2288 throw DataException("Error - attempt to update protected Data object.");
2289 }
2290 //
2291 // Ensure underlying data object is of type DataTagged
2292 forceResolve();
2293 if (isConstant()) tag();
2294 exclusiveWrite();
2295 //
2296 // Call DataAbstract::setTaggedValue
2297 m_data->setTaggedValue(tagKey,pointshape, value, dataOffset);
2298 }
2299
2300 int
2301 Data::getTagNumber(int dpno)
2302 {
2303 if (isEmpty())
2304 {
2305 throw DataException("Error - operation not permitted on instances of DataEmpty.");
2306 }
2307 return getFunctionSpace().getTagFromDataPointNo(dpno);
2308 }
2309
2310
2311 ostream& escript::operator<<(ostream& o, const Data& data)
2312 {
2313 o << data.toString();
2314 return o;
2315 }
2316
2317 Data
2318 escript::C_GeneralTensorProduct(Data& arg_0,
2319 Data& arg_1,
2320 int axis_offset,
2321 int transpose)
2322 {
2323 // General tensor product: res(SL x SR) = arg_0(SL x SM) * arg_1(SM x SR)
2324 // SM is the product of the last axis_offset entries in arg_0.getShape().
2325
2326 // deal with any lazy data
2327 // if (arg_0.isLazy()) {arg_0.resolve();}
2328 // if (arg_1.isLazy()) {arg_1.resolve();}
2329 if (arg_0.isLazy() || arg_1.isLazy() || (AUTOLAZYON && (arg_0.isExpanded() || arg_1.isExpanded())))
2330 {
2331 DataLazy* c=new DataLazy(arg_0.borrowDataPtr(), arg_1.borrowDataPtr(), PROD, axis_offset,transpose);
2332 return Data(c);
2333 }
2334
2335 // Interpolate if necessary and find an appropriate function space
2336 Data arg_0_Z, arg_1_Z;
2337 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2338 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2339 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2340 arg_1_Z = Data(arg_1);
2341 }
2342 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2343 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2344 arg_0_Z =Data(arg_0);
2345 }
2346 else {
2347 throw DataException("Error - C_GeneralTensorProduct: arguments have incompatible function spaces.");
2348 }
2349 } else {
2350 arg_0_Z = Data(arg_0);
2351 arg_1_Z = Data(arg_1);
2352 }
2353 // Get rank and shape of inputs
2354 int rank0 = arg_0_Z.getDataPointRank();
2355 int rank1 = arg_1_Z.getDataPointRank();
2356 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
2357 const DataTypes::ShapeType& shape1 = arg_1_Z.getDataPointShape();
2358
2359 // Prepare for the loops of the product and verify compatibility of shapes
2360 int start0=0, start1=0;
2361 if (transpose == 0) {}
2362 else if (transpose == 1) { start0 = axis_offset; }
2363 else if (transpose == 2) { start1 = rank1-axis_offset; }
2364 else { throw DataException("C_GeneralTensorProduct: Error - transpose should be 0, 1 or 2"); }
2365
2366
2367 // Adjust the shapes for transpose
2368 DataTypes::ShapeType tmpShape0(rank0); // pre-sizing the vectors rather
2369 DataTypes::ShapeType tmpShape1(rank1); // than using push_back
2370 for (int i=0; i<rank0; i++) { tmpShape0[i]=shape0[(i+start0)%rank0]; }
2371 for (int i=0; i<rank1; i++) { tmpShape1[i]=shape1[(i+start1)%rank1]; }
2372
2373 #if 0
2374 // For debugging: show shape after transpose
2375 char tmp[100];
2376 std::string shapeStr;
2377 shapeStr = "(";
2378 for (int i=0; i<rank0; i++) { sprintf(tmp, "%d,", tmpShape0[i]); shapeStr += tmp; }
2379 shapeStr += ")";
2380 cout << "C_GeneralTensorProduct: Shape of arg0 is " << shapeStr << endl;
2381 shapeStr = "(";
2382 for (int i=0; i<rank1; i++) { sprintf(tmp, "%d,", tmpShape1[i]); shapeStr += tmp; }
2383 shapeStr += ")";
2384 cout << "C_GeneralTensorProduct: Shape of arg1 is " << shapeStr << endl;
2385 #endif
2386
2387 // Prepare for the loops of the product
2388 int SL=1, SM=1, SR=1;
2389 for (int i=0; i<rank0-axis_offset; i++) {
2390 SL *= tmpShape0[i];
2391 }
2392 for (int i=rank0-axis_offset; i<rank0; i++) {
2393 if (tmpShape0[i] != tmpShape1[i-(rank0-axis_offset)]) {
2394 throw DataException("C_GeneralTensorProduct: Error - incompatible shapes");
2395 }
2396 SM *= tmpShape0[i];
2397 }
2398 for (int i=axis_offset; i<rank1; i++) {
2399 SR *= tmpShape1[i];
2400 }
2401
2402 // Define the shape of the output (rank of shape is the sum of the loop ranges below)
2403 DataTypes::ShapeType shape2(rank0+rank1-2*axis_offset);
2404 { // block to limit the scope of out_index
2405 int out_index=0;
2406 for (int i=0; i<rank0-axis_offset; i++, ++out_index) { shape2[out_index]=tmpShape0[i]; } // First part of arg_0_Z
2407 for (int i=axis_offset; i<rank1; i++, ++out_index) { shape2[out_index]=tmpShape1[i]; } // Last part of arg_1_Z
2408 }
2409
2410 if (shape2.size()>ESCRIPT_MAX_DATA_RANK)
2411 {
2412 ostringstream os;
2413 os << "C_GeneralTensorProduct: Error - Attempt to create a rank " << shape2.size() << " object. The maximum rank is " << ESCRIPT_MAX_DATA_RANK << ".";
2414 throw DataException(os.str());
2415 }
2416
2417 // Declare output Data object
2418 Data res;
2419
2420 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2421 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataConstant output
2422 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2423 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2424 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2425 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2426 }
2427 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2428
2429 // Prepare the DataConstant input
2430 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2431 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2432
2433 // Borrow DataTagged input from Data object
2434 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2435 if (tmp_1==0) { throw DataException("GTP_1 Programming error - casting to DataTagged."); }
2436
2437 // Prepare a DataTagged output 2
2438 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace()); // DataTagged output
2439 res.tag();
2440 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2441 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2442
2443 // Prepare offset into DataConstant
2444 int offset_0 = tmp_0->getPointOffset(0,0);
2445 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2446
2447 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2448 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2449
2450 // Compute an MVP for the default
2451 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2452 // Compute an MVP for each tag
2453 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2454 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2455 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2456 tmp_2->addTag(i->first);
2457
2458 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2459 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2460
2461 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2462 }
2463
2464 }
2465 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2466
2467 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2468 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2469 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2470 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2471 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2472 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2473 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2474 int sampleNo_1,dataPointNo_1;
2475 int numSamples_1 = arg_1_Z.getNumSamples();
2476 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2477 int offset_0 = tmp_0->getPointOffset(0,0);
2478 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2479 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2480 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2481 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2482 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2483 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2484 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2485 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2486 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2487 }
2488 }
2489
2490 }
2491 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2492
2493 // Borrow DataTagged input from Data object
2494 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2495 if (tmp_0==0) { throw DataException("GTP_0 Programming error - casting to DataTagged."); }
2496
2497 // Prepare the DataConstant input
2498 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2499 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2500
2501 // Prepare a DataTagged output 2
2502 res = Data(0.0, shape2, arg_0_Z.getFunctionSpace()); // DataTagged output
2503 res.tag();
2504 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2505 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2506
2507 // Prepare offset into DataConstant
2508 int offset_1 = tmp_1->getPointOffset(0,0);
2509 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2510 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2511 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2512
2513 // Compute an MVP for the default
2514 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2515 // Compute an MVP for each tag
2516 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2517 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2518 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2519
2520 tmp_2->addTag(i->first);
2521 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2522 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2523 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2524 }
2525
2526 }
2527 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2528
2529 // Borrow DataTagged input from Data object
2530 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2531 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2532
2533 // Borrow DataTagged input from Data object
2534 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2535 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2536
2537 // Prepare a DataTagged output 2
2538 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace());
2539 res.tag(); // DataTagged output
2540 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2541 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2542
2543 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2544 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2545 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2546
2547 // Compute an MVP for the default
2548 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2549 // Merge the tags
2550 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2551 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2552 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2553 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2554 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2555 }
2556 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2557 tmp_2->addTag(i->first);
2558 }
2559 // Compute an MVP for each tag
2560 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2561 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2562 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2563 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2564 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2565
2566 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2567 }
2568
2569 }
2570 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2571
2572 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2573 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2574 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2575 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2576 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2577 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2578 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2579 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2580 int sampleNo_0,dataPointNo_0;
2581 int numSamples_0 = arg_0_Z.getNumSamples();
2582 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2583 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2584 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2585 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2586 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2587 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2588 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2589 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2590 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2591 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2592 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2593 }
2594 }
2595
2596 }
2597 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2598
2599 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2600 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2601 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2602 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2603 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2604 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataConstant."); }
2605 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2606 int sampleNo_0,dataPointNo_0;
2607 int numSamples_0 = arg_0_Z.getNumSamples();
2608 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2609 int offset_1 = tmp_1->getPointOffset(0,0);
2610 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2611 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2612 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2613 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2614 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2615 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2616 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2617 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2618 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2619 }
2620 }
2621
2622
2623 }
2624 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2625
2626 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2627 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2628 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2629 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2630 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2631 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2632 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataTagged."); }
2633 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2634 int sampleNo_0,dataPointNo_0;
2635 int numSamples_0 = arg_0_Z.getNumSamples();
2636 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2637 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2638 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2639 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2640 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2641 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2642 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2643 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2644 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2645 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2646 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2647 }
2648 }
2649
2650 }
2651 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2652
2653 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2654 res = Data(0.0, shape2, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2655 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2656 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2657 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2658 if (tmp_0==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2659 if (tmp_1==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2660 if (tmp_2==0) { throw DataException("GTP Programming error - casting to DataExpanded."); }
2661 int sampleNo_0,dataPointNo_0;
2662 int numSamples_0 = arg_0_Z.getNumSamples();
2663 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2664 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2665 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2666 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2667 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2668 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2669 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2670 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2671 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2672 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2673 matrix_matrix_product(SL, SM, SR, ptr_0, ptr_1, ptr_2, transpose);
2674 }
2675 }
2676
2677 }
2678 else {
2679 throw DataException("Error - C_GeneralTensorProduct: unknown combination of inputs");
2680 }
2681
2682 return res;
2683 }
2684
2685 DataAbstract*
2686 Data::borrowData() const
2687 {
2688 return m_data.get();
2689 }
2690
2691 // Not all that happy about returning a non-const from a const
2692 DataAbstract_ptr
2693 Data::borrowDataPtr() const
2694 {
2695 return m_data;
2696 }
2697
2698 // Not all that happy about returning a non-const from a const
2699 DataReady_ptr
2700 Data::borrowReadyPtr() const
2701 {
2702 DataReady_ptr dr=dynamic_pointer_cast<DataReady>(m_data);
2703 EsysAssert((dr!=0), "Error - casting to DataReady.");
2704 return dr;
2705 }
2706
2707 std::string
2708 Data::toString() const
2709 {
2710 if (!m_data->isEmpty() &&
2711 !m_data->isLazy() &&
2712 getLength()>escriptParams.getInt("TOO_MANY_LINES"))
2713 {
2714 stringstream temp;
2715 temp << "Summary: inf="<< inf_const() << " sup=" << sup_const() << " data points=" << getNumDataPoints();
2716 return temp.str();
2717 }
2718 return m_data->toString();
2719 }
2720
2721
2722 // This method is not thread-safe
2723 DataTypes::ValueType::reference
2724 Data::getDataAtOffsetRW(DataTypes::ValueType::size_type i)
2725 {
2726 checkExclusiveWrite();
2727 return getReady()->getDataAtOffsetRW(i);
2728 }
2729
2730 // This method is not thread-safe
2731 DataTypes::ValueType::const_reference
2732 Data::getDataAtOffsetRO(DataTypes::ValueType::size_type i)
2733 {
2734 forceResolve();
2735 return getReady()->getDataAtOffsetRO(i);
2736 }
2737
2738
2739 // DataTypes::ValueType::const_reference
2740 // Data::getDataAtOffsetRO(DataTypes::ValueType::size_type i) const
2741 // {
2742 // if (isLazy())
2743 // {
2744 // throw DataException("Programmer error - getDataAtOffsetRO() not permitted on Lazy Data (object is const which prevents resolving).");
2745 // }
2746 // return getReady()->getDataAtOffsetRO(i);
2747 // }
2748
2749
2750 DataTypes::ValueType::const_reference
2751 Data::getDataPointRO(int sampleNo, int dataPointNo)
2752 {
2753 forceResolve();
2754 if (!isReady())
2755 {
2756 throw DataException("Programmer error -getDataPointRO() not permitted on Lazy Data.");
2757 }
2758 else
2759 {
2760 const DataReady* dr=getReady();
2761 return dr->getDataAtOffsetRO(dr->getPointOffset(sampleNo, dataPointNo));
2762 }
2763 }
2764
2765
2766
2767
2768 DataTypes::ValueType::reference
2769 Data::getDataPointRW(int sampleNo, int dataPointNo)
2770 {
2771 checkExclusiveWrite();
2772 DataReady* dr=getReady();
2773 return dr->getDataAtOffsetRW(dr->getPointOffset(sampleNo, dataPointNo));
2774 }
2775
2776 BufferGroup*
2777 Data::allocSampleBuffer() const
2778 {
2779 if (isLazy())
2780 {
2781 #ifdef _OPENMP
2782 int tnum=omp_get_max_threads();
2783 #else
2784 int tnum=1;
2785 #endif
2786 return new BufferGroup(getSampleBufferSize(),tnum);
2787 }
2788 else
2789 {
2790 return NULL;
2791 }
2792 }
2793
2794 void
2795 Data::freeSampleBuffer(BufferGroup* bufferg)
2796 {
2797 if (bufferg!=0)
2798 {
2799 delete bufferg;
2800 }
2801 }
2802
2803
2804 /* Member functions specific to the MPI implementation */
2805
2806 void
2807 Data::print()
2808 {
2809 int i,j;
2810
2811 printf( "Data is %dX%d\n", getNumSamples(), getNumDataPointsPerSample() );
2812 for( i=0; i<getNumSamples(); i++ )
2813 {
2814 printf( "[%6d]", i );
2815 for( j=0; j<getNumDataPointsPerSample(); j++ )
2816 printf( "\t%10.7g", (getSampleDataRW(i))[j] ); // doesn't really need RW access
2817 printf( "\n" );
2818 }
2819 }
2820 void
2821 Data::dump(const std::string fileName) const
2822 {
2823 try
2824 {
2825 if (isLazy())
2826 {
2827 Data temp(*this); // this is to get a non-const object which we can resolve
2828 temp.resolve();
2829 temp.dump(fileName);
2830 }
2831 else
2832 {
2833 return m_data->dump(fileName);
2834 }
2835 }
2836 catch (std::exception& e)
2837 {
2838 cout << e.what() << endl;
2839 }
2840 }
2841
2842 int
2843 Data::get_MPISize() const
2844 {
2845 int size;
2846 #ifdef PASO_MPI
2847 int error;
2848 error = MPI_Comm_size( get_MPIComm(), &size );
2849 #else
2850 size = 1;
2851 #endif
2852 return size;
2853 }
2854
2855 int
2856 Data::get_MPIRank() const
2857 {
2858 int rank;
2859 #ifdef PASO_MPI
2860 int error;
2861 error = MPI_Comm_rank( get_MPIComm(), &rank );
2862 #else
2863 rank = 0;
2864 #endif
2865 return rank;
2866 }
2867
2868 MPI_Comm
2869 Data::get_MPIComm() const
2870 {
2871 #ifdef PASO_MPI
2872 return MPI_COMM_WORLD;
2873 #else
2874 return -1;
2875 #endif
2876 }
2877
2878

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26