/[escript]/trunk/escript/src/Data.cpp
ViewVC logotype

Annotation of /trunk/escript/src/Data.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2724 - (hide annotations)
Sun Oct 18 23:50:18 2009 UTC (10 years, 5 months ago) by jfenwick
File size: 96629 byte(s)
And again

<
1 jgs 480
2 ksteube 1312 /*******************************************************
3 ksteube 1811 *
4 jfenwick 2548 * Copyright (c) 2003-2009 by University of Queensland
5 ksteube 1811 * Earth Systems Science Computational Center (ESSCC)
6     * http://www.uq.edu.au/esscc
7     *
8     * Primary Business: Queensland, Australia
9     * Licensed under the Open Software License version 3.0
10     * http://www.opensource.org/licenses/osl-3.0.php
11     *
12     *******************************************************/
13 ksteube 1312
14 ksteube 1811
15 jgs 474 #include "Data.h"
16 jgs 94
17 jgs 480 #include "DataExpanded.h"
18     #include "DataConstant.h"
19     #include "DataTagged.h"
20     #include "DataEmpty.h"
21 jfenwick 2005 #include "DataLazy.h"
22 jgs 480 #include "FunctionSpaceFactory.h"
23     #include "AbstractContinuousDomain.h"
24     #include "UnaryFuncs.h"
25 jfenwick 1796 #include "FunctionSpaceException.h"
26 jfenwick 1897 #include "EscriptParams.h"
27 jfenwick 1796
28 ksteube 1312 extern "C" {
29 phornby 2078 #include "esysUtils/blocktimer.h"
30 ksteube 1312 }
31 jgs 480
32 jgs 119 #include <fstream>
33 jgs 94 #include <algorithm>
34     #include <vector>
35     #include <functional>
36 jfenwick 2086 #include <sstream> // so we can throw messages about ranks
37 jgs 94
38 jgs 153 #include <boost/python/dict.hpp>
39 jgs 94 #include <boost/python/extract.hpp>
40     #include <boost/python/long.hpp>
41 jfenwick 2271 #include "WrappedArray.h"
42 jgs 94
43     using namespace std;
44     using namespace boost::python;
45     using namespace boost;
46     using namespace escript;
47    
48 jfenwick 2005 // ensure the current object is not a DataLazy
49     // The idea was that we could add an optional warning whenever a resolve is forced
50 jfenwick 2271 // #define forceResolve() if (isLazy()) {#resolve();}
51    
52 jfenwick 2146 #define AUTOLAZYON escriptParams.getAUTOLAZY()
53     #define MAKELAZYOP(X) if (isLazy() || (AUTOLAZYON && m_data->isExpanded())) \
54     {\
55     DataLazy* c=new DataLazy(borrowDataPtr(),X);\
56     return Data(c);\
57     }
58     #define MAKELAZYOPOFF(X,Y) if (isLazy() || (AUTOLAZYON && m_data->isExpanded())) \
59     {\
60     DataLazy* c=new DataLazy(borrowDataPtr(),X,Y);\
61     return Data(c);\
62     }
63 jfenwick 2005
64 jfenwick 2496 #define MAKELAZYOP2(X,Y,Z) if (isLazy() || (AUTOLAZYON && m_data->isExpanded())) \
65     {\
66     DataLazy* c=new DataLazy(borrowDataPtr(),X,Y,Z);\
67     return Data(c);\
68     }
69    
70 jfenwick 2146 #define MAKELAZYBINSELF(R,X) if (isLazy() || R.isLazy() || (AUTOLAZYON && (isExpanded() || R.isExpanded()))) \
71     {\
72     DataLazy* c=new DataLazy(m_data,R.borrowDataPtr(),X);\
73 jfenwick 2271 /* m_data=c->getPtr();*/ set_m_data(c->getPtr());\
74 jfenwick 2146 return (*this);\
75     }
76    
77     // like the above but returns a new data rather than *this
78     #define MAKELAZYBIN(R,X) if (isLazy() || R.isLazy() || (AUTOLAZYON && (isExpanded() || R.isExpanded()))) \
79     {\
80     DataLazy* c=new DataLazy(m_data,R.borrowDataPtr(),X);\
81     return Data(c);\
82     }
83    
84     #define MAKELAZYBIN2(L,R,X) if (L.isLazy() || R.isLazy() || (AUTOLAZYON && (L.isExpanded() || R.isExpanded()))) \
85     {\
86     DataLazy* c=new DataLazy(L.borrowDataPtr(),R.borrowDataPtr(),X);\
87     return Data(c);\
88     }
89    
90 jfenwick 2721 #define CHECK_DO_CRES escriptParams.getRESOLVE_COLLECTIVE()
91    
92 jfenwick 2271 namespace
93     {
94    
95 jfenwick 2459 template <class ARR>
96     inline
97     boost::python::tuple
98     pointToTuple1(const DataTypes::ShapeType& shape, ARR v, unsigned long offset)
99     {
100     using namespace boost::python;
101     using boost::python::tuple;
102     using boost::python::list;
103    
104     list l;
105     unsigned int dim=shape[0];
106     for (size_t i=0;i<dim;++i)
107     {
108     l.append(v[i+offset]);
109     }
110     return tuple(l);
111     }
112    
113     template <class ARR>
114     inline
115     boost::python::tuple
116     pointToTuple2(const DataTypes::ShapeType& shape, ARR v, unsigned long offset)
117     {
118     using namespace boost::python;
119     using boost::python::tuple;
120     using boost::python::list;
121    
122     unsigned int shape0=shape[0];
123     unsigned int shape1=shape[1];
124     list lj;
125     for (size_t j=0;j<shape0;++j)
126     {
127     list li;
128     for (size_t i=0;i<shape1;++i)
129     {
130     li.append(v[offset+DataTypes::getRelIndex(shape,j,i)]);
131     }
132     lj.append(tuple(li));
133     }
134     return tuple(lj);
135     }
136    
137     template <class ARR>
138     inline
139     boost::python::tuple
140     pointToTuple3(const DataTypes::ShapeType& shape, ARR v, unsigned long offset)
141     {
142     using namespace boost::python;
143     using boost::python::tuple;
144     using boost::python::list;
145    
146     unsigned int shape0=shape[0];
147     unsigned int shape1=shape[1];
148     unsigned int shape2=shape[2];
149    
150     list lk;
151     for (size_t k=0;k<shape0;++k)
152     {
153     list lj;
154     for (size_t j=0;j<shape1;++j)
155     {
156     list li;
157     for (size_t i=0;i<shape2;++i)
158     {
159 jfenwick 2482 li.append(v[offset+DataTypes::getRelIndex(shape,k,j,i)]);
160 jfenwick 2459 }
161     lj.append(tuple(li));
162     }
163     lk.append(tuple(lj));
164     }
165     return tuple(lk);
166     }
167    
168     template <class ARR>
169     inline
170     boost::python::tuple
171     pointToTuple4(const DataTypes::ShapeType& shape, ARR v, unsigned long offset)
172     {
173     using namespace boost::python;
174     using boost::python::tuple;
175     using boost::python::list;
176    
177     unsigned int shape0=shape[0];
178     unsigned int shape1=shape[1];
179     unsigned int shape2=shape[2];
180     unsigned int shape3=shape[3];
181    
182     list ll;
183     for (size_t l=0;l<shape0;++l)
184     {
185     list lk;
186     for (size_t k=0;k<shape1;++k)
187     {
188     list lj;
189     for (size_t j=0;j<shape2;++j)
190     {
191     list li;
192     for (size_t i=0;i<shape3;++i)
193     {
194 jfenwick 2482 li.append(v[offset+DataTypes::getRelIndex(shape,l,k,j,i)]);
195 jfenwick 2459 }
196     lj.append(tuple(li));
197     }
198     lk.append(tuple(lj));
199     }
200     ll.append(tuple(lk));
201     }
202     return tuple(ll);
203     }
204    
205    
206 jfenwick 2271 // This should be safer once the DataC RO changes have been brought in
207     template <class ARR>
208     boost::python::tuple
209     pointToTuple( const DataTypes::ShapeType& shape,ARR v)
210     {
211     using namespace boost::python;
212     using boost::python::list;
213     int rank=shape.size();
214     if (rank==0)
215     {
216     return make_tuple(v[0]);
217     }
218     else if (rank==1)
219     {
220 jfenwick 2459 return pointToTuple1(shape,v,0);
221 jfenwick 2271 }
222     else if (rank==2)
223     {
224 jfenwick 2459 return pointToTuple2(shape,v,0);
225 jfenwick 2271 }
226     else if (rank==3)
227     {
228 jfenwick 2459 return pointToTuple3(shape,v,0);
229 jfenwick 2271 }
230     else if (rank==4)
231     {
232 jfenwick 2459 return pointToTuple4(shape,v,0);
233 jfenwick 2271 }
234     else
235     throw DataException("Unknown rank in pointToTuple.");
236     }
237    
238     } // anonymous namespace
239    
240 jgs 94 Data::Data()
241 jfenwick 2271 : m_shared(false), m_lazy(false)
242 jgs 94 {
243     //
244     // Default data is type DataEmpty
245     DataAbstract* temp=new DataEmpty();
246 jfenwick 2271 // m_data=temp->getPtr();
247     set_m_data(temp->getPtr());
248 gross 783 m_protected=false;
249 jgs 94 }
250    
251     Data::Data(double value,
252     const tuple& shape,
253     const FunctionSpace& what,
254     bool expanded)
255 jfenwick 2271 : m_shared(false), m_lazy(false)
256 jgs 94 {
257 jfenwick 1796 DataTypes::ShapeType dataPointShape;
258 jgs 94 for (int i = 0; i < shape.attr("__len__")(); ++i) {
259     dataPointShape.push_back(extract<const int>(shape[i]));
260     }
261 matt 1319
262 jfenwick 1796 int len = DataTypes::noValues(dataPointShape);
263 matt 1319 DataVector temp_data(len,value,len);
264 jfenwick 1796 initialise(temp_data, dataPointShape, what, expanded);
265 gross 783 m_protected=false;
266 jgs 94 }
267    
268     Data::Data(double value,
269 jfenwick 1796 const DataTypes::ShapeType& dataPointShape,
270 jgs 94 const FunctionSpace& what,
271     bool expanded)
272 jfenwick 2271 : m_shared(false), m_lazy(false)
273 jgs 94 {
274 jfenwick 1796 int len = DataTypes::noValues(dataPointShape);
275 matt 1319 DataVector temp_data(len,value,len);
276 jfenwick 1796 initialise(temp_data, dataPointShape, what, expanded);
277 gross 783 m_protected=false;
278 jgs 94 }
279    
280 jgs 102 Data::Data(const Data& inData)
281 jfenwick 2271 : m_shared(false), m_lazy(false)
282 jgs 94 {
283 jfenwick 2271 set_m_data(inData.m_data);
284 gross 783 m_protected=inData.isProtected();
285 jgs 94 }
286    
287 jfenwick 1796
288 jgs 94 Data::Data(const Data& inData,
289 jfenwick 1796 const DataTypes::RegionType& region)
290 jfenwick 2271 : m_shared(false), m_lazy(false)
291 jgs 94 {
292 jfenwick 2005 DataAbstract_ptr dat=inData.m_data;
293     if (inData.isLazy())
294     {
295     dat=inData.m_data->resolve();
296     }
297     else
298     {
299     dat=inData.m_data;
300     }
301 jgs 94 //
302 jgs 102 // Create Data which is a slice of another Data
303 jfenwick 2005 DataAbstract* tmp = dat->getSlice(region);
304 jfenwick 2271 set_m_data(DataAbstract_ptr(tmp));
305 gross 783 m_protected=false;
306 jfenwick 2271
307 jgs 94 }
308    
309     Data::Data(const Data& inData,
310     const FunctionSpace& functionspace)
311 jfenwick 2271 : m_shared(false), m_lazy(false)
312 jgs 94 {
313 jfenwick 1803 if (inData.isEmpty())
314     {
315     throw DataException("Error - will not interpolate for instances of DataEmpty.");
316     }
317 jgs 94 if (inData.getFunctionSpace()==functionspace) {
318 jfenwick 2271 set_m_data(inData.m_data);
319 jfenwick 2005 }
320     else
321     {
322    
323     if (inData.isConstant()) { // for a constant function, we just need to use the new function space
324     if (!inData.probeInterpolation(functionspace))
325     { // Even though this is constant, we still need to check whether interpolation is allowed
326 jfenwick 2087 throw FunctionSpaceException("Cannot interpolate across to the domain of the specified FunctionSpace. (DataConstant)");
327 jfenwick 2005 }
328     // if the data is not lazy, this will just be a cast to DataReady
329     DataReady_ptr dr=inData.m_data->resolve();
330 jfenwick 2271 DataConstant* dc=new DataConstant(functionspace,inData.m_data->getShape(),dr->getVectorRO());
331     // m_data=DataAbstract_ptr(dc);
332     set_m_data(DataAbstract_ptr(dc));
333 jgs 94 } else {
334 jfenwick 2005 Data tmp(0,inData.getDataPointShape(),functionspace,true);
335     // Note: Must use a reference or pointer to a derived object
336     // in order to get polymorphic behaviour. Shouldn't really
337     // be able to create an instance of AbstractDomain but that was done
338     // as a boost:python work around which may no longer be required.
339     /*const AbstractDomain& inDataDomain=inData.getDomain();*/
340     const_Domain_ptr inDataDomain=inData.getDomain();
341     if (inDataDomain==functionspace.getDomain()) {
342     inDataDomain->interpolateOnDomain(tmp,inData);
343     } else {
344     inDataDomain->interpolateACross(tmp,inData);
345     }
346 jfenwick 2271 // m_data=tmp.m_data;
347     set_m_data(tmp.m_data);
348 jgs 94 }
349     }
350 gross 783 m_protected=false;
351 jgs 94 }
352    
353 jfenwick 1796 Data::Data(DataAbstract* underlyingdata)
354 jfenwick 2271 : m_shared(false), m_lazy(false)
355 jgs 94 {
356 jfenwick 2271 set_m_data(underlyingdata->getPtr());
357 jfenwick 1796 m_protected=false;
358 jgs 94 }
359    
360 jfenwick 2005 Data::Data(DataAbstract_ptr underlyingdata)
361 jfenwick 2271 : m_shared(false), m_lazy(false)
362 jfenwick 2005 {
363 jfenwick 2271 set_m_data(underlyingdata);
364 jfenwick 2005 m_protected=false;
365     }
366    
367 jfenwick 1796 Data::Data(const DataTypes::ValueType& value,
368     const DataTypes::ShapeType& shape,
369     const FunctionSpace& what,
370     bool expanded)
371 jfenwick 2271 : m_shared(false), m_lazy(false)
372 jfenwick 1796 {
373     initialise(value,shape,what,expanded);
374     m_protected=false;
375 jgs 94 }
376    
377 jfenwick 1796
378 jgs 94 Data::Data(const object& value,
379     const FunctionSpace& what,
380     bool expanded)
381 jfenwick 2271 : m_shared(false), m_lazy(false)
382 jgs 94 {
383 jfenwick 2271 WrappedArray w(value);
384     initialise(w,what,expanded);
385 gross 783 m_protected=false;
386 jgs 94 }
387    
388 matt 1319
389 jgs 94 Data::Data(const object& value,
390     const Data& other)
391 jfenwick 2271 : m_shared(false), m_lazy(false)
392 jgs 94 {
393 jfenwick 2271 WrappedArray w(value);
394 matt 1319
395 jfenwick 2458 // extract the shape of the array
396 jfenwick 2271 const DataTypes::ShapeType& tempShape=w.getShape();
397     if (w.getRank()==0) {
398 matt 1319
399    
400 jfenwick 1796 // get the space for the data vector
401     int len1 = DataTypes::noValues(tempShape);
402     DataVector temp_data(len1, 0.0, len1);
403 jfenwick 2271 temp_data.copyFromArray(w,1);
404 jfenwick 1796
405     int len = DataTypes::noValues(other.getDataPointShape());
406    
407 jfenwick 2271 DataVector temp2_data(len, temp_data[0], len);
408 jfenwick 1796 DataConstant* t=new DataConstant(other.getFunctionSpace(),other.getDataPointShape(),temp2_data);
409 jfenwick 2271 // m_data=DataAbstract_ptr(t);
410     set_m_data(DataAbstract_ptr(t));
411 jfenwick 1796
412 jgs 94 } else {
413     //
414     // Create a DataConstant with the same sample shape as other
415 jfenwick 2271 DataConstant* t=new DataConstant(w,other.getFunctionSpace());
416     // m_data=DataAbstract_ptr(t);
417     set_m_data(DataAbstract_ptr(t));
418 jgs 94 }
419 gross 783 m_protected=false;
420 jgs 94 }
421    
422 jgs 151 Data::~Data()
423     {
424 jfenwick 2271 set_m_data(DataAbstract_ptr());
425 jgs 151 }
426    
427 jfenwick 1796
428 jfenwick 2271 // only call in thread safe contexts.
429     // This method should be atomic
430     void Data::set_m_data(DataAbstract_ptr p)
431     {
432     if (m_data.get()!=0) // release old ownership
433     {
434     m_data->removeOwner(this);
435     }
436     if (p.get()!=0)
437     {
438     m_data=p;
439     m_data->addOwner(this);
440     m_shared=m_data->isShared();
441     m_lazy=m_data->isLazy();
442     }
443     }
444 jfenwick 1796
445 jfenwick 2271 void Data::initialise(const WrappedArray& value,
446 jfenwick 1796 const FunctionSpace& what,
447     bool expanded)
448     {
449     //
450     // Construct a Data object of the appropriate type.
451     // Construct the object first as there seems to be a bug which causes
452     // undefined behaviour if an exception is thrown during construction
453     // within the shared_ptr constructor.
454     if (expanded) {
455     DataAbstract* temp=new DataExpanded(value, what);
456 jfenwick 2271 // m_data=temp->getPtr();
457     set_m_data(temp->getPtr());
458 jfenwick 1796 } else {
459     DataAbstract* temp=new DataConstant(value, what);
460 jfenwick 2271 // m_data=temp->getPtr();
461     set_m_data(temp->getPtr());
462 jfenwick 1796 }
463     }
464    
465    
466     void
467     Data::initialise(const DataTypes::ValueType& value,
468     const DataTypes::ShapeType& shape,
469     const FunctionSpace& what,
470     bool expanded)
471     {
472     //
473     // Construct a Data object of the appropriate type.
474     // Construct the object first as there seems to be a bug which causes
475     // undefined behaviour if an exception is thrown during construction
476     // within the shared_ptr constructor.
477     if (expanded) {
478     DataAbstract* temp=new DataExpanded(what, shape, value);
479 jfenwick 2271 // m_data=temp->getPtr();
480     set_m_data(temp->getPtr());
481 jfenwick 1796 } else {
482     DataAbstract* temp=new DataConstant(what, shape, value);
483 jfenwick 2271 // m_data=temp->getPtr();
484     set_m_data(temp->getPtr());
485 jfenwick 1796 }
486     }
487    
488    
489 jgs 94 escriptDataC
490     Data::getDataC()
491     {
492     escriptDataC temp;
493     temp.m_dataPtr=(void*)this;
494     return temp;
495     }
496    
497     escriptDataC
498     Data::getDataC() const
499     {
500     escriptDataC temp;
501     temp.m_dataPtr=(void*)this;
502     return temp;
503     }
504    
505 jfenwick 2271 size_t
506     Data::getSampleBufferSize() const
507     {
508     return m_data->getSampleBufferSize();
509     }
510    
511    
512 jgs 121 const boost::python::tuple
513 jgs 94 Data::getShapeTuple() const
514     {
515 jfenwick 1796 const DataTypes::ShapeType& shape=getDataPointShape();
516 jgs 94 switch(getDataPointRank()) {
517     case 0:
518     return make_tuple();
519     case 1:
520     return make_tuple(long_(shape[0]));
521     case 2:
522     return make_tuple(long_(shape[0]),long_(shape[1]));
523     case 3:
524     return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]));
525     case 4:
526     return make_tuple(long_(shape[0]),long_(shape[1]),long_(shape[2]),long_(shape[3]));
527     default:
528     throw DataException("Error - illegal Data rank.");
529     }
530     }
531 jfenwick 1799
532    
533     // The different name is needed because boost has trouble with overloaded functions.
534     // It can't work out what type the function is based soley on its name.
535     // There are ways to fix this involving creating function pointer variables for each form
536     // but there doesn't seem to be a need given that the methods have the same name from the python point of view
537 jfenwick 2105 Data
538 jfenwick 1799 Data::copySelf()
539     {
540     DataAbstract* temp=m_data->deepCopy();
541 jfenwick 2105 return Data(temp);
542 jfenwick 1799 }
543    
544 jgs 94 void
545     Data::copy(const Data& other)
546     {
547 jfenwick 1799 DataAbstract* temp=other.m_data->deepCopy();
548 jfenwick 1872 DataAbstract_ptr p=temp->getPtr();
549 jfenwick 2271 // m_data=p;
550     set_m_data(p);
551 jgs 94 }
552    
553 gross 1118
554 jfenwick 2005 Data
555     Data::delay()
556     {
557 jfenwick 2721 if (!isLazy())
558     {
559     DataLazy* dl=new DataLazy(m_data);
560     return Data(dl);
561     }
562     return *this;
563 jfenwick 2005 }
564    
565 jgs 94 void
566 jfenwick 2005 Data::delaySelf()
567     {
568     if (!isLazy())
569     {
570 jfenwick 2271 // m_data=(new DataLazy(m_data))->getPtr();
571     set_m_data((new DataLazy(m_data))->getPtr());
572 jfenwick 2005 }
573     }
574    
575 jfenwick 2271
576     // For lazy data, it would seem that DataTagged will need to be treated differently since even after setting all tags
577     // to zero, all the tags from all the DataTags would be in the result.
578     // However since they all have the same value (0) whether they are there or not should not matter.
579     // So I have decided that for all types this method will create a constant 0.
580     // It can be promoted up as required.
581     // A possible efficiency concern might be expanded->constant->expanded which has an extra memory management
582     // but we can deal with that if it arises.
583     //
584 jfenwick 2005 void
585 gross 1118 Data::setToZero()
586     {
587 jfenwick 1803 if (isEmpty())
588 gross 1118 {
589 jfenwick 1803 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
590     }
591 jfenwick 2271 if (isLazy())
592     {
593     DataTypes::ValueType v(getNoValues(),0);
594     DataConstant* dc=new DataConstant(getFunctionSpace(),getDataPointShape(),v);
595     DataLazy* dl=new DataLazy(dc->getPtr());
596     set_m_data(dl->getPtr());
597     }
598     else
599     {
600     exclusiveWrite();
601     m_data->setToZero();
602     }
603 gross 1118 }
604    
605 jfenwick 2271
606 gross 1118 void
607 jgs 94 Data::copyWithMask(const Data& other,
608     const Data& mask)
609     {
610 jfenwick 1855 // 1. Interpolate if required so all Datas use the same FS as this
611     // 2. Tag or Expand so that all Data's are the same type
612     // 3. Iterate over the data vectors copying values where mask is >0
613 jfenwick 1803 if (other.isEmpty() || mask.isEmpty())
614     {
615     throw DataException("Error - copyWithMask not permitted using instances of DataEmpty.");
616     }
617 jfenwick 1855 Data other2(other);
618     Data mask2(mask);
619 jfenwick 2005 other2.resolve();
620     mask2.resolve();
621     this->resolve();
622 jfenwick 1855 FunctionSpace myFS=getFunctionSpace();
623     FunctionSpace oFS=other2.getFunctionSpace();
624     FunctionSpace mFS=mask2.getFunctionSpace();
625     if (oFS!=myFS)
626     {
627     if (other2.probeInterpolation(myFS))
628     {
629     other2=other2.interpolate(myFS);
630     }
631     else
632     {
633     throw DataException("Error - copyWithMask: other FunctionSpace is not compatible with this one.");
634     }
635     }
636     if (mFS!=myFS)
637     {
638     if (mask2.probeInterpolation(myFS))
639     {
640     mask2=mask2.interpolate(myFS);
641     }
642     else
643     {
644     throw DataException("Error - copyWithMask: mask FunctionSpace is not compatible with this one.");
645     }
646     }
647     // Ensure that all args have the same type
648     if (this->isExpanded() || mask2.isExpanded() || other2.isExpanded())
649     {
650     this->expand();
651     other2.expand();
652     mask2.expand();
653     }
654     else if (this->isTagged() || mask2.isTagged() || other2.isTagged())
655     {
656     this->tag();
657     other2.tag();
658     mask2.tag();
659     }
660     else if (this->isConstant() && mask2.isConstant() && other2.isConstant())
661     {
662     }
663     else
664     {
665     throw DataException("Error - Unknown DataAbstract passed to copyWithMask.");
666     }
667 jfenwick 2246 unsigned int selfrank=getDataPointRank();
668     unsigned int otherrank=other2.getDataPointRank();
669     unsigned int maskrank=mask2.getDataPointRank();
670     if ((selfrank==0) && (otherrank>0 || maskrank>0))
671     {
672     // to get here we must be copying from a large object into a scalar
673     // I am not allowing this.
674     // If you are calling copyWithMask then you are considering keeping some existing values
675     // and so I'm going to assume that you don't want your data objects getting a new shape.
676     throw DataException("Attempt to copyWithMask into a scalar from an object or mask with rank>0.");
677     }
678 jfenwick 2271 exclusiveWrite();
679 jfenwick 1855 // Now we iterate over the elements
680 jfenwick 2271 DataVector& self=getReady()->getVectorRW();;
681     const DataVector& ovec=other2.getReadyPtr()->getVectorRO();
682     const DataVector& mvec=mask2.getReadyPtr()->getVectorRO();
683 jfenwick 2246
684     if ((selfrank>0) && (otherrank==0) &&(maskrank==0))
685 jfenwick 1855 {
686 jfenwick 2246 // Not allowing this combination.
687     // it is not clear what the rank of the target should be.
688     // Should it be filled with the scalar (rank stays the same);
689     // or should the target object be reshaped to be a scalar as well.
690     throw DataException("Attempt to copyWithMask from scalar mask and data into non-scalar target.");
691 jfenwick 1855 }
692 jfenwick 2246 if ((selfrank>0) && (otherrank>0) &&(maskrank==0))
693     {
694     if (mvec[0]>0) // copy whole object if scalar is >0
695     {
696     copy(other);
697     }
698     return;
699     }
700     if (isTagged()) // so all objects involved will also be tagged
701     {
702     // note the !
703     if (!((getDataPointShape()==mask2.getDataPointShape()) &&
704     ((other2.getDataPointShape()==mask2.getDataPointShape()) || (otherrank==0))))
705     {
706     throw DataException("copyWithMask, shape mismatch.");
707     }
708    
709     // We need to consider the possibility that tags are missing or in the wrong order
710     // My guiding assumption here is: All tagged Datas are assumed to have the default value for
711     // all tags which are not explicitly defined
712    
713     const DataTagged* mptr=dynamic_cast<const DataTagged*>(mask2.m_data.get());
714     const DataTagged* optr=dynamic_cast<const DataTagged*>(other2.m_data.get());
715     DataTagged* tptr=dynamic_cast<DataTagged*>(m_data.get());
716    
717     // first, add any tags missing from other or mask
718     const DataTagged::DataMapType& olookup=optr->getTagLookup();
719     const DataTagged::DataMapType& mlookup=mptr->getTagLookup();
720 jfenwick 2260 const DataTagged::DataMapType& tlookup=tptr->getTagLookup();
721 jfenwick 2246 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
722     for (i=olookup.begin();i!=olookup.end();i++)
723     {
724     tptr->addTag(i->first);
725     }
726     for (i=mlookup.begin();i!=mlookup.end();i++) {
727     tptr->addTag(i->first);
728     }
729     // now we know that *this has all the required tags but they aren't guaranteed to be in
730     // the same order
731    
732     // There are two possibilities: 1. all objects have the same rank. 2. other is a scalar
733     if ((selfrank==otherrank) && (otherrank==maskrank))
734     {
735 jfenwick 2260 for (i=tlookup.begin();i!=tlookup.end();i++)
736 jfenwick 2246 {
737     // get the target offset
738     DataTypes::ValueType::size_type toff=tptr->getOffsetForTag(i->first);
739     DataTypes::ValueType::size_type moff=mptr->getOffsetForTag(i->first);
740     DataTypes::ValueType::size_type ooff=optr->getOffsetForTag(i->first);
741 jfenwick 2260 for (int j=0;j<getDataPointSize();++j)
742 jfenwick 2246 {
743 jfenwick 2260 if (mvec[j+moff]>0)
744 jfenwick 2246 {
745 jfenwick 2260 self[j+toff]=ovec[j+ooff];
746 jfenwick 2246 }
747     }
748     }
749 jfenwick 2260 // now for the default value
750     for (int j=0;j<getDataPointSize();++j)
751     {
752     if (mvec[j+mptr->getDefaultOffset()]>0)
753     {
754     self[j+tptr->getDefaultOffset()]=ovec[j+optr->getDefaultOffset()];
755     }
756     }
757 jfenwick 2246 }
758     else // other is a scalar
759     {
760 jfenwick 2260 for (i=tlookup.begin();i!=tlookup.end();i++)
761 jfenwick 2246 {
762     // get the target offset
763     DataTypes::ValueType::size_type toff=tptr->getOffsetForTag(i->first);
764     DataTypes::ValueType::size_type moff=mptr->getOffsetForTag(i->first);
765     DataTypes::ValueType::size_type ooff=optr->getOffsetForTag(i->first);
766 jfenwick 2260 for (int j=0;j<getDataPointSize();++j)
767 jfenwick 2246 {
768 jfenwick 2260 if (mvec[j+moff]>0)
769 jfenwick 2246 {
770 jfenwick 2260 self[j+toff]=ovec[ooff];
771 jfenwick 2246 }
772     }
773     }
774 jfenwick 2260 // now for the default value
775     for (int j=0;j<getDataPointSize();++j)
776     {
777     if (mvec[j+mptr->getDefaultOffset()]>0)
778     {
779     self[j+tptr->getDefaultOffset()]=ovec[0];
780     }
781     }
782 jfenwick 2246 }
783    
784     return; // ugly
785     }
786     // mixed scalar and non-scalar operation
787     if ((selfrank>0) && (otherrank==0) && (mask2.getDataPointShape()==getDataPointShape()))
788     {
789     size_t num_points=self.size();
790     // OPENMP 3.0 allows unsigned loop vars.
791     #if defined(_OPENMP) && (_OPENMP < 200805)
792     long i;
793     #else
794     size_t i;
795 jfenwick 2260 #endif
796     size_t psize=getDataPointSize();
797 jfenwick 2246 #pragma omp parallel for private(i) schedule(static)
798     for (i=0;i<num_points;++i)
799     {
800     if (mvec[i]>0)
801     {
802 jfenwick 2260 self[i]=ovec[i/psize]; // since this is expanded there is one scalar
803     } // dest point
804 jfenwick 2246 }
805     return; // ugly!
806     }
807     // tagged data is already taken care of so we only need to worry about shapes
808     // special cases with scalars are already dealt with so all we need to worry about is shape
809     if ((getDataPointShape()!=other2.getDataPointShape()) || getDataPointShape()!=mask2.getDataPointShape())
810     {
811     ostringstream oss;
812     oss <<"Error - size mismatch in arguments to copyWithMask.";
813     oss << "\nself_shape=" << DataTypes::shapeToString(getDataPointShape());
814     oss << " other2_shape=" << DataTypes::shapeToString(other2.getDataPointShape());
815     oss << " mask2_shape=" << DataTypes::shapeToString(mask2.getDataPointShape());
816     throw DataException(oss.str());
817     }
818 jfenwick 1855 size_t num_points=self.size();
819 phornby 1921
820     // OPENMP 3.0 allows unsigned loop vars.
821     #if defined(_OPENMP) && (_OPENMP < 200805)
822     long i;
823     #else
824 phornby 1918 size_t i;
825 phornby 1921 #endif
826 jfenwick 1855 #pragma omp parallel for private(i) schedule(static)
827     for (i=0;i<num_points;++i)
828     {
829     if (mvec[i]>0)
830     {
831     self[i]=ovec[i];
832     }
833     }
834     }
835 jgs 94
836     bool
837     Data::isExpanded() const
838     {
839     DataExpanded* temp=dynamic_cast<DataExpanded*>(m_data.get());
840     return (temp!=0);
841     }
842    
843     bool
844 jfenwick 2271 Data::actsExpanded() const
845     {
846     return m_data->actsExpanded();
847    
848     }
849    
850    
851     bool
852 jgs 94 Data::isTagged() const
853     {
854     DataTagged* temp=dynamic_cast<DataTagged*>(m_data.get());
855     return (temp!=0);
856     }
857    
858     bool
859     Data::isEmpty() const
860     {
861     DataEmpty* temp=dynamic_cast<DataEmpty*>(m_data.get());
862     return (temp!=0);
863     }
864    
865     bool
866     Data::isConstant() const
867     {
868     DataConstant* temp=dynamic_cast<DataConstant*>(m_data.get());
869     return (temp!=0);
870     }
871    
872 jfenwick 2005 bool
873     Data::isLazy() const
874     {
875 jfenwick 2271 return m_lazy; // not asking m_data because we need to be able to ask this while m_data is changing
876 jfenwick 2005 }
877    
878     // at the moment this is synonymous with !isLazy() but that could change
879     bool
880     Data::isReady() const
881     {
882     return (dynamic_cast<DataReady*>(m_data.get())!=0);
883     }
884    
885    
886 jgs 94 void
887 ksteube 1312 Data::setProtection()
888     {
889 gross 783 m_protected=true;
890     }
891    
892     bool
893 ksteube 1312 Data::isProtected() const
894     {
895 gross 783 return m_protected;
896     }
897    
898    
899    
900     void
901 jgs 94 Data::expand()
902     {
903     if (isConstant()) {
904     DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
905     DataAbstract* temp=new DataExpanded(*tempDataConst);
906 jfenwick 2271 // m_data=temp->getPtr();
907     set_m_data(temp->getPtr());
908 jgs 94 } else if (isTagged()) {
909     DataTagged* tempDataTag=dynamic_cast<DataTagged*>(m_data.get());
910     DataAbstract* temp=new DataExpanded(*tempDataTag);
911 jfenwick 2271 // m_data=temp->getPtr();
912     set_m_data(temp->getPtr());
913 jgs 94 } else if (isExpanded()) {
914     //
915     // do nothing
916     } else if (isEmpty()) {
917     throw DataException("Error - Expansion of DataEmpty not possible.");
918 jfenwick 2005 } else if (isLazy()) {
919     resolve();
920     expand(); // resolve might not give us expanded data
921 jgs 94 } else {
922     throw DataException("Error - Expansion not implemented for this Data type.");
923     }
924     }
925    
926     void
927     Data::tag()
928     {
929     if (isConstant()) {
930     DataConstant* tempDataConst=dynamic_cast<DataConstant*>(m_data.get());
931     DataAbstract* temp=new DataTagged(*tempDataConst);
932 jfenwick 2271 // m_data=temp->getPtr();
933     set_m_data(temp->getPtr());
934 jgs 94 } else if (isTagged()) {
935     // do nothing
936     } else if (isExpanded()) {
937     throw DataException("Error - Creating tag data from DataExpanded not possible.");
938     } else if (isEmpty()) {
939     throw DataException("Error - Creating tag data from DataEmpty not possible.");
940 jfenwick 2005 } else if (isLazy()) {
941     DataAbstract_ptr res=m_data->resolve();
942     if (m_data->isExpanded())
943     {
944     throw DataException("Error - data would resolve to DataExpanded, tagging is not possible.");
945     }
946 jfenwick 2271 // m_data=res;
947     set_m_data(res);
948 jfenwick 2005 tag();
949 jgs 94 } else {
950     throw DataException("Error - Tagging not implemented for this Data type.");
951     }
952     }
953    
954 jfenwick 2005 void
955     Data::resolve()
956     {
957     if (isLazy())
958     {
959 jfenwick 2271 // m_data=m_data->resolve();
960     set_m_data(m_data->resolve());
961 jfenwick 2005 }
962     }
963    
964 jfenwick 2271 void
965     Data::requireWrite()
966     {
967     resolve();
968     exclusiveWrite();
969     }
970 jfenwick 2005
971 gross 854 Data
972     Data::oneOver() const
973 jgs 102 {
974 jfenwick 2146 MAKELAZYOP(RECIP)
975 matt 1334 return C_TensorUnaryOperation(*this, bind1st(divides<double>(),1.));
976 jgs 102 }
977    
978 jgs 94 Data
979 gross 698 Data::wherePositive() const
980 jgs 94 {
981 jfenwick 2146 MAKELAZYOP(GZ)
982 matt 1334 return C_TensorUnaryOperation(*this, bind2nd(greater<double>(),0.0));
983 jgs 94 }
984    
985     Data
986 gross 698 Data::whereNegative() const
987 jgs 102 {
988 jfenwick 2146 MAKELAZYOP(LZ)
989 matt 1334 return C_TensorUnaryOperation(*this, bind2nd(less<double>(),0.0));
990 jgs 102 }
991    
992     Data
993 gross 698 Data::whereNonNegative() const
994 jgs 94 {
995 jfenwick 2146 MAKELAZYOP(GEZ)
996 matt 1334 return C_TensorUnaryOperation(*this, bind2nd(greater_equal<double>(),0.0));
997 jgs 94 }
998    
999     Data
1000 gross 698 Data::whereNonPositive() const
1001 jgs 94 {
1002 jfenwick 2146 MAKELAZYOP(LEZ)
1003 matt 1334 return C_TensorUnaryOperation(*this, bind2nd(less_equal<double>(),0.0));
1004 jgs 94 }
1005    
1006     Data
1007 jgs 571 Data::whereZero(double tol) const
1008 jgs 94 {
1009 jfenwick 2147 // Data dataAbs=abs();
1010     // return C_TensorUnaryOperation(dataAbs, bind2nd(less_equal<double>(),tol));
1011     MAKELAZYOPOFF(EZ,tol)
1012     return C_TensorUnaryOperation(*this, bind2nd(AbsLTE(),tol));
1013    
1014 jgs 94 }
1015    
1016     Data
1017 jgs 571 Data::whereNonZero(double tol) const
1018 jgs 102 {
1019 jfenwick 2147 // Data dataAbs=abs();
1020     // return C_TensorUnaryOperation(dataAbs, bind2nd(greater<double>(),tol));
1021     MAKELAZYOPOFF(NEZ,tol)
1022     return C_TensorUnaryOperation(*this, bind2nd(AbsGT(),tol));
1023    
1024 jgs 102 }
1025    
1026     Data
1027 jgs 94 Data::interpolate(const FunctionSpace& functionspace) const
1028     {
1029     return Data(*this,functionspace);
1030     }
1031    
1032     bool
1033     Data::probeInterpolation(const FunctionSpace& functionspace) const
1034     {
1035 jfenwick 2005 return getFunctionSpace().probeInterpolation(functionspace);
1036 jgs 94 }
1037    
1038     Data
1039     Data::gradOn(const FunctionSpace& functionspace) const
1040     {
1041 jfenwick 1803 if (isEmpty())
1042     {
1043     throw DataException("Error - operation not permitted on instances of DataEmpty.");
1044     }
1045 matt 1353 double blocktimer_start = blocktimer_time();
1046 jgs 94 if (functionspace.getDomain()!=getDomain())
1047     throw DataException("Error - gradient cannot be calculated on different domains.");
1048 jfenwick 1796 DataTypes::ShapeType grad_shape=getDataPointShape();
1049 jgs 94 grad_shape.push_back(functionspace.getDim());
1050     Data out(0.0,grad_shape,functionspace,true);
1051 jfenwick 1872 getDomain()->setToGradient(out,*this);
1052 matt 1353 blocktimer_increment("grad()", blocktimer_start);
1053 jgs 94 return out;
1054     }
1055    
1056     Data
1057     Data::grad() const
1058     {
1059 jfenwick 1803 if (isEmpty())
1060     {
1061     throw DataException("Error - operation not permitted on instances of DataEmpty.");
1062     }
1063 jfenwick 1872 return gradOn(escript::function(*getDomain()));
1064 jgs 94 }
1065    
1066     int
1067     Data::getDataPointSize() const
1068     {
1069 jfenwick 1796 return m_data->getNoValues();
1070 jgs 94 }
1071    
1072 jfenwick 2459
1073 jfenwick 1796 DataTypes::ValueType::size_type
1074 jgs 94 Data::getLength() const
1075     {
1076     return m_data->getLength();
1077     }
1078    
1079 jfenwick 2481
1080 jfenwick 2459 // There is no parallelism here ... elements need to be added in the correct order.
1081     // If we could presize the list and then fill in the elements it might work
1082     // This would need setting elements to be threadsafe.
1083     // Having mulitple C threads calling into one interpreter is aparently a no-no.
1084 jfenwick 2271 const boost::python::object
1085 jfenwick 2459 Data::toListOfTuples(bool scalarastuple)
1086     {
1087     using namespace boost::python;
1088 jfenwick 2461 using boost::python::list;
1089 jfenwick 2459 if (get_MPISize()>1)
1090     {
1091     throw DataException("::toListOfTuples is not available for MPI with more than one process.");
1092     }
1093     unsigned int rank=getDataPointRank();
1094     unsigned int size=getDataPointSize();
1095 jfenwick 2461
1096 jfenwick 2459 int npoints=getNumDataPoints();
1097     expand(); // This will also resolve if required
1098     const DataTypes::ValueType& vec=getReady()->getVectorRO();
1099 jfenwick 2461 boost::python::list temp;
1100     temp.append(object());
1101     boost::python::list res(temp*npoints);// presize the list by the "[None] * npoints" trick
1102 jfenwick 2459 if (rank==0)
1103     {
1104     long count;
1105     if (scalarastuple)
1106     {
1107     for (count=0;count<npoints;++count)
1108     {
1109 jfenwick 2461 res[count]=make_tuple(vec[count]);
1110 jfenwick 2459 }
1111     }
1112     else
1113     {
1114     for (count=0;count<npoints;++count)
1115     {
1116 jfenwick 2461 res[count]=vec[count];
1117 jfenwick 2459 }
1118     }
1119     }
1120     else if (rank==1)
1121     {
1122     size_t count;
1123     size_t offset=0;
1124     for (count=0;count<npoints;++count,offset+=size)
1125     {
1126 jfenwick 2461 res[count]=pointToTuple1(getDataPointShape(), vec, offset);
1127 jfenwick 2459 }
1128     }
1129     else if (rank==2)
1130     {
1131     size_t count;
1132     size_t offset=0;
1133     for (count=0;count<npoints;++count,offset+=size)
1134     {
1135 jfenwick 2461 res[count]=pointToTuple2(getDataPointShape(), vec, offset);
1136 jfenwick 2459 }
1137     }
1138     else if (rank==3)
1139     {
1140     size_t count;
1141     size_t offset=0;
1142     for (count=0;count<npoints;++count,offset+=size)
1143     {
1144 jfenwick 2461 res[count]=pointToTuple3(getDataPointShape(), vec, offset);
1145 jfenwick 2459 }
1146     }
1147     else if (rank==4)
1148     {
1149     size_t count;
1150     size_t offset=0;
1151     for (count=0;count<npoints;++count,offset+=size)
1152     {
1153 jfenwick 2461 res[count]=pointToTuple4(getDataPointShape(), vec, offset);
1154 jfenwick 2459 }
1155     }
1156     else
1157     {
1158     throw DataException("Unknown rank in ::toListOfTuples()");
1159     }
1160     return res;
1161     }
1162    
1163     const boost::python::object
1164 jfenwick 2271 Data::getValueOfDataPointAsTuple(int dataPointNo)
1165     {
1166     forceResolve();
1167     if (getNumDataPointsPerSample()>0) {
1168 gross 921 int sampleNo = dataPointNo/getNumDataPointsPerSample();
1169     int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1170     //
1171     // Check a valid sample number has been supplied
1172 trankine 924 if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
1173 jfenwick 2271 throw DataException("Error - Data::getValueOfDataPointAsTuple: invalid sampleNo.");
1174 gross 921 }
1175 ksteube 1312
1176 gross 921 //
1177     // Check a valid data point number has been supplied
1178 trankine 924 if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
1179 jfenwick 2271 throw DataException("Error - Data::getValueOfDataPointAsTuple: invalid dataPointNoInSample.");
1180 gross 921 }
1181     // TODO: global error handling
1182 jfenwick 1796 DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
1183 jfenwick 2271 return pointToTuple(getDataPointShape(),&(getDataAtOffsetRO(offset)));
1184     }
1185     else
1186     {
1187 jfenwick 2471 // The pre-numpy method would return an empty array of the given shape
1188 jfenwick 2271 // I'm going to throw an exception because if we have zero points per sample we have problems
1189     throw DataException("Error - need at least 1 datapoint per sample.");
1190     }
1191 ksteube 1312
1192 jgs 117 }
1193 jfenwick 1796
1194 jfenwick 2271
1195 gross 921 void
1196 ksteube 1312 Data::setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object)
1197 jgs 121 {
1198 gross 1034 // this will throw if the value cannot be represented
1199 jfenwick 2271 setValueOfDataPointToArray(dataPointNo,py_object);
1200 gross 1034 }
1201    
1202     void
1203 jfenwick 2271 Data::setValueOfDataPointToArray(int dataPointNo, const boost::python::object& obj)
1204 gross 1034 {
1205 gross 921 if (isProtected()) {
1206     throw DataException("Error - attempt to update protected Data object.");
1207     }
1208 jfenwick 2271
1209     WrappedArray w(obj);
1210 gross 921 //
1211     // check rank
1212 jfenwick 2271 if (static_cast<unsigned int>(w.getRank())<getDataPointRank())
1213 jfenwick 2458 throw DataException("Rank of array does not match Data object rank");
1214 bcumming 790
1215 jgs 121 //
1216 jfenwick 2458 // check shape of array
1217 phornby 1953 for (unsigned int i=0; i<getDataPointRank(); i++) {
1218 jfenwick 2271 if (w.getShape()[i]!=getDataPointShape()[i])
1219 jfenwick 2458 throw DataException("Shape of array does not match Data object rank");
1220 jgs 121 }
1221 jfenwick 2721
1222     exclusiveWrite();
1223    
1224 jgs 121 //
1225 gross 921 // make sure data is expanded:
1226 gross 1859 //
1227 gross 921 if (!isExpanded()) {
1228     expand();
1229 jgs 121 }
1230 gross 921 if (getNumDataPointsPerSample()>0) {
1231     int sampleNo = dataPointNo/getNumDataPointsPerSample();
1232     int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1233 jfenwick 2271 m_data->copyToDataPoint(sampleNo, dataPointNoInSample,w);
1234 gross 921 } else {
1235 jfenwick 2271 m_data->copyToDataPoint(-1, 0,w);
1236 gross 921 }
1237     }
1238 jgs 121
1239 gross 922 void
1240     Data::setValueOfDataPoint(int dataPointNo, const double value)
1241     {
1242     if (isProtected()) {
1243     throw DataException("Error - attempt to update protected Data object.");
1244     }
1245     //
1246     // make sure data is expanded:
1247 jfenwick 2721 exclusiveWrite();
1248 gross 922 if (!isExpanded()) {
1249     expand();
1250     }
1251     if (getNumDataPointsPerSample()>0) {
1252     int sampleNo = dataPointNo/getNumDataPointsPerSample();
1253     int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1254     m_data->copyToDataPoint(sampleNo, dataPointNoInSample,value);
1255     } else {
1256     m_data->copyToDataPoint(-1, 0,value);
1257     }
1258     }
1259    
1260 ksteube 1312 const
1261 jfenwick 2271 boost::python::object
1262     Data::getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo)
1263     {
1264     // This could be lazier than it is now
1265     forceResolve();
1266 jgs 121
1267 jfenwick 2271 // copy datapoint into a buffer
1268     // broadcast buffer to all nodes
1269     // convert buffer to tuple
1270     // return tuple
1271 jgs 121
1272 jfenwick 2271 const DataTypes::ShapeType& dataPointShape = getDataPointShape();
1273     size_t length=DataTypes::noValues(dataPointShape);
1274 jgs 121
1275 gross 921 // added for the MPI communication
1276     double *tmpData = new double[length];
1277 bcumming 790
1278 jfenwick 2271 // updated for the MPI case
1279     if( get_MPIRank()==procNo ){
1280     if (getNumDataPointsPerSample()>0) {
1281     int sampleNo = dataPointNo/getNumDataPointsPerSample();
1282     int dataPointNoInSample = dataPointNo - sampleNo * getNumDataPointsPerSample();
1283     //
1284     // Check a valid sample number has been supplied
1285     if ((sampleNo >= getNumSamples()) || (sampleNo < 0 )) {
1286 jfenwick 2471 throw DataException("Error - Data::getValueOfGlobalDataPointAsTuple: invalid sampleNo.");
1287 bcumming 790 }
1288    
1289 jfenwick 2271 //
1290     // Check a valid data point number has been supplied
1291     if ((dataPointNoInSample >= getNumDataPointsPerSample()) || (dataPointNoInSample < 0)) {
1292 jfenwick 2471 throw DataException("Error - Data::getValueOfGlobalDataPointAsTuple: invalid dataPointNoInSample.");
1293 bcumming 790 }
1294 jfenwick 2271 // TODO: global error handling
1295     DataTypes::ValueType::size_type offset=getDataOffset(sampleNo, dataPointNoInSample);
1296 bcumming 790
1297 jfenwick 2271 memcpy(tmpData,&(getDataAtOffsetRO(offset)),length*sizeof(double));
1298     }
1299     }
1300     #ifdef PASO_MPI
1301     // broadcast the data to all other processes
1302     MPI_Bcast( tmpData, length, MPI_DOUBLE, procNo, get_MPIComm() );
1303     #endif
1304    
1305     boost::python::tuple t=pointToTuple(dataPointShape,tmpData);
1306     delete [] tmpData;
1307 jgs 121 //
1308     // return the loaded array
1309 jfenwick 2271 return t;
1310    
1311 jgs 121 }
1312    
1313 gross 921
1314 jfenwick 2271 boost::python::object
1315     Data::integrateToTuple_const() const
1316 jfenwick 2005 {
1317     if (isLazy())
1318     {
1319     throw DataException("Error - cannot integrate for constant lazy data.");
1320     }
1321     return integrateWorker();
1322     }
1323 gross 921
1324 jfenwick 2271 boost::python::object
1325     Data::integrateToTuple()
1326 jgs 94 {
1327 jfenwick 2005 if (isLazy())
1328     {
1329 jfenwick 2721 expand(); // Can't do a non-resolving version of this without changing the domain object
1330     } // see the dom->setToIntegrals call. Not saying it can't be done, just not doing it yet.
1331 jfenwick 2005 return integrateWorker();
1332 jfenwick 2271
1333 jfenwick 2005 }
1334    
1335 jfenwick 2271 boost::python::object
1336 jfenwick 2005 Data::integrateWorker() const
1337     {
1338 jfenwick 1796 DataTypes::ShapeType shape = getDataPointShape();
1339 ksteube 1312 int dataPointSize = getDataPointSize();
1340 jgs 94
1341     //
1342     // calculate the integral values
1343 ksteube 1312 vector<double> integrals(dataPointSize);
1344     vector<double> integrals_local(dataPointSize);
1345 jfenwick 2089 const AbstractContinuousDomain* dom=dynamic_cast<const AbstractContinuousDomain*>(getDomain().get());
1346     if (dom==0)
1347     {
1348     throw DataException("Can not integrate over non-continuous domains.");
1349     }
1350 ksteube 1312 #ifdef PASO_MPI
1351 jfenwick 2089 dom->setToIntegrals(integrals_local,*this);
1352 ksteube 1312 // Global sum: use an array instead of a vector because elements of array are guaranteed to be contiguous in memory
1353     double *tmp = new double[dataPointSize];
1354     double *tmp_local = new double[dataPointSize];
1355     for (int i=0; i<dataPointSize; i++) { tmp_local[i] = integrals_local[i]; }
1356     MPI_Allreduce( &tmp_local[0], &tmp[0], dataPointSize, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD );
1357     for (int i=0; i<dataPointSize; i++) { integrals[i] = tmp[i]; }
1358 jfenwick 2271 tuple result=pointToTuple(shape,tmp);
1359 ksteube 1312 delete[] tmp;
1360     delete[] tmp_local;
1361     #else
1362 jfenwick 2089 dom->setToIntegrals(integrals,*this);
1363 jfenwick 2271 /* double *tmp = new double[dataPointSize];
1364     for (int i=0; i<dataPointSize; i++) { tmp[i]=integrals[i]; }*/
1365     tuple result=pointToTuple(shape,integrals);
1366     // delete tmp;
1367 ksteube 1312 #endif
1368 jgs 94
1369    
1370 jfenwick 2271 return result;
1371 jgs 94 }
1372    
1373     Data
1374     Data::sin() const
1375     {
1376 jfenwick 2146 MAKELAZYOP(SIN)
1377 matt 1349 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sin);
1378 jgs 94 }
1379    
1380     Data
1381     Data::cos() const
1382     {
1383 jfenwick 2146 MAKELAZYOP(COS)
1384 matt 1349 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cos);
1385 jgs 94 }
1386    
1387     Data
1388     Data::tan() const
1389     {
1390 jfenwick 2146 MAKELAZYOP(TAN)
1391 matt 1349 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tan);
1392 jgs 94 }
1393    
1394     Data
1395 jgs 150 Data::asin() const
1396     {
1397 jfenwick 2146 MAKELAZYOP(ASIN)
1398 matt 1350 return C_TensorUnaryOperation<double (*)(double)>(*this, ::asin);
1399 jgs 150 }
1400    
1401     Data
1402     Data::acos() const
1403     {
1404 jfenwick 2146 MAKELAZYOP(ACOS)
1405 matt 1350 return C_TensorUnaryOperation<double (*)(double)>(*this, ::acos);
1406 jgs 150 }
1407    
1408 phornby 1026
1409 jgs 150 Data
1410     Data::atan() const
1411     {
1412 jfenwick 2146 MAKELAZYOP(ATAN)
1413 matt 1350 return C_TensorUnaryOperation<double (*)(double)>(*this, ::atan);
1414 jgs 150 }
1415    
1416     Data
1417     Data::sinh() const
1418     {
1419 jfenwick 2146 MAKELAZYOP(SINH)
1420 jfenwick 2005 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sinh);
1421 jgs 150 }
1422    
1423     Data
1424     Data::cosh() const
1425     {
1426 jfenwick 2146 MAKELAZYOP(COSH)
1427 jfenwick 2005 return C_TensorUnaryOperation<double (*)(double)>(*this, ::cosh);
1428 jgs 150 }
1429    
1430     Data
1431     Data::tanh() const
1432     {
1433 jfenwick 2146 MAKELAZYOP(TANH)
1434 jfenwick 2005 return C_TensorUnaryOperation<double (*)(double)>(*this, ::tanh);
1435 jgs 150 }
1436    
1437 phornby 1026
1438 jgs 150 Data
1439 phornby 1026 Data::erf() const
1440     {
1441 phornby 2048 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1442 gross 1028 throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");
1443     #else
1444 jfenwick 2146 MAKELAZYOP(ERF)
1445 matt 1334 return C_TensorUnaryOperation(*this, ::erf);
1446 phornby 1026 #endif
1447     }
1448    
1449     Data
1450 jgs 150 Data::asinh() const
1451     {
1452 jfenwick 2146 MAKELAZYOP(ASINH)
1453 phornby 2048 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1454 matt 1334 return C_TensorUnaryOperation(*this, escript::asinh_substitute);
1455 phornby 1032 #else
1456 matt 1334 return C_TensorUnaryOperation(*this, ::asinh);
1457 phornby 1032 #endif
1458 jgs 150 }
1459    
1460     Data
1461     Data::acosh() const
1462     {
1463 jfenwick 2146 MAKELAZYOP(ACOSH)
1464 phornby 2048 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1465 matt 1334 return C_TensorUnaryOperation(*this, escript::acosh_substitute);
1466 phornby 1032 #else
1467 matt 1334 return C_TensorUnaryOperation(*this, ::acosh);
1468 phornby 1032 #endif
1469 jgs 150 }
1470    
1471     Data
1472     Data::atanh() const
1473     {
1474 jfenwick 2146 MAKELAZYOP(ATANH)
1475 phornby 2048 #if defined (_WIN32) && !defined(__INTEL_COMPILER)
1476 matt 1334 return C_TensorUnaryOperation(*this, escript::atanh_substitute);
1477 phornby 1032 #else
1478 matt 1334 return C_TensorUnaryOperation(*this, ::atanh);
1479 phornby 1032 #endif
1480 jgs 150 }
1481    
1482     Data
1483 gross 286 Data::log10() const
1484 jfenwick 2146 {
1485     MAKELAZYOP(LOG10)
1486 matt 1350 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log10);
1487 jgs 94 }
1488    
1489     Data
1490 gross 286 Data::log() const
1491 jgs 94 {
1492 jfenwick 2146 MAKELAZYOP(LOG)
1493 matt 1350 return C_TensorUnaryOperation<double (*)(double)>(*this, ::log);
1494 jgs 94 }
1495    
1496 jgs 106 Data
1497     Data::sign() const
1498 jgs 94 {
1499 jfenwick 2146 MAKELAZYOP(SIGN)
1500 matt 1334 return C_TensorUnaryOperation(*this, escript::fsign);
1501 jgs 94 }
1502    
1503 jgs 106 Data
1504     Data::abs() const
1505 jgs 94 {
1506 jfenwick 2146 MAKELAZYOP(ABS)
1507 matt 1350 return C_TensorUnaryOperation<double (*)(double)>(*this, ::fabs);
1508 jgs 94 }
1509    
1510 jgs 106 Data
1511     Data::neg() const
1512 jgs 94 {
1513 jfenwick 2146 MAKELAZYOP(NEG)
1514 matt 1334 return C_TensorUnaryOperation(*this, negate<double>());
1515 jgs 94 }
1516    
1517 jgs 102 Data
1518 jgs 106 Data::pos() const
1519 jgs 94 {
1520 jfenwick 2005 // not doing lazy check here is deliberate.
1521     // since a deep copy of lazy data should be cheap, I'll just let it happen now
1522 jgs 148 Data result;
1523     // perform a deep copy
1524     result.copy(*this);
1525     return result;
1526 jgs 102 }
1527    
1528     Data
1529 jgs 106 Data::exp() const
1530 jfenwick 2146 {
1531     MAKELAZYOP(EXP)
1532 matt 1350 return C_TensorUnaryOperation<double (*)(double)>(*this, ::exp);
1533 jgs 102 }
1534    
1535     Data
1536 jgs 106 Data::sqrt() const
1537 jgs 102 {
1538 jfenwick 2146 MAKELAZYOP(SQRT)
1539 matt 1350 return C_TensorUnaryOperation<double (*)(double)>(*this, ::sqrt);
1540 jgs 102 }
1541    
1542 jgs 106 double
1543 jfenwick 2005 Data::Lsup_const() const
1544 jgs 102 {
1545 jfenwick 2005 if (isLazy())
1546     {
1547     throw DataException("Error - cannot compute Lsup for constant lazy data.");
1548     }
1549     return LsupWorker();
1550     }
1551    
1552     double
1553     Data::Lsup()
1554     {
1555     if (isLazy())
1556     {
1557 jfenwick 2721 if (!actsExpanded() || CHECK_DO_CRES)
1558     {
1559     resolve();
1560     }
1561     else
1562     {
1563     #ifdef PASO_MPI
1564     return lazyAlgWorker<AbsMax>(0,MPI_MAX);
1565     #else
1566 jfenwick 2724 return lazyAlgWorker<AbsMax>(0);
1567 jfenwick 2721 #endif
1568     }
1569 jfenwick 2005 }
1570     return LsupWorker();
1571     }
1572    
1573     double
1574     Data::sup_const() const
1575     {
1576     if (isLazy())
1577     {
1578     throw DataException("Error - cannot compute sup for constant lazy data.");
1579     }
1580     return supWorker();
1581     }
1582    
1583     double
1584     Data::sup()
1585     {
1586     if (isLazy())
1587     {
1588 jfenwick 2721 if (!actsExpanded() || CHECK_DO_CRES)
1589     {
1590     resolve();
1591     }
1592     else
1593     {
1594     #ifdef PASO_MPI
1595     return lazyAlgWorker<FMax>(numeric_limits<double>::max()*-1, MPI_MAX);
1596     #else
1597 jfenwick 2724 return lazyAlgWorker<FMax>(numeric_limits<double>::max()*-1);
1598 jfenwick 2721 #endif
1599     }
1600 jfenwick 2005 }
1601     return supWorker();
1602     }
1603    
1604     double
1605     Data::inf_const() const
1606     {
1607     if (isLazy())
1608     {
1609     throw DataException("Error - cannot compute inf for constant lazy data.");
1610     }
1611     return infWorker();
1612     }
1613    
1614     double
1615     Data::inf()
1616     {
1617     if (isLazy())
1618     {
1619 jfenwick 2721 if (!actsExpanded() || CHECK_DO_CRES)
1620     {
1621     resolve();
1622     }
1623     else
1624     {
1625     #ifdef PASO_MPI
1626     return lazyAlgWorker<FMin>(numeric_limits<double>::max(), MPI_MIN);
1627     #else
1628 jfenwick 2724 return lazyAlgWorker<FMin>(numeric_limits<double>::max());
1629 jfenwick 2721 #endif
1630     }
1631 jfenwick 2005 }
1632     return infWorker();
1633     }
1634    
1635 jfenwick 2721 template <class BinaryOp>
1636 jfenwick 2005 double
1637 jfenwick 2723 #ifdef PASO_MPI
1638     Data::lazyAlgWorker(double init, MPI_Op mpiop_type)
1639     #else
1640     Data::lazyAlgWorker(double init)
1641     #endif
1642 jfenwick 2721 {
1643     if (!isLazy() || !m_data->actsExpanded())
1644     {
1645     throw DataException("Error - lazyAlgWorker can only be called on lazy(expanded) data.");
1646     }
1647     DataLazy* dl=dynamic_cast<DataLazy*>(m_data.get());
1648     EsysAssert((dl!=0), "Programming error - casting to DataLazy.");
1649     BufferGroup* bg=allocSampleBuffer();
1650     double val=init;
1651     int i=0;
1652     const size_t numsamples=getNumSamples();
1653     const size_t samplesize=getNoValues()*getNumDataPointsPerSample();
1654     BinaryOp operation;
1655     #pragma omp parallel private(i)
1656     {
1657     double localtot=init;
1658     #pragma omp for schedule(static) private(i)
1659     for (i=0;i<numsamples;++i)
1660     {
1661     size_t roffset=0;
1662     const DataTypes::ValueType* v=dl->resolveSample(*bg, i, roffset);
1663     // Now we have the sample, run operation on all points
1664     for (size_t j=0;j<samplesize;++j)
1665     {
1666     localtot=operation(localtot,(*v)[j+roffset]);
1667     }
1668     }
1669     #pragma omp critical
1670     val=operation(val,localtot);
1671     }
1672     freeSampleBuffer(bg);
1673     #ifdef PASO_MPI
1674     double globalValue;
1675     MPI_Allreduce( &val, &globalValue, 1, MPI_DOUBLE, mpiop_type, MPI_COMM_WORLD );
1676     return globalValue;
1677     #else
1678     return val;
1679     #endif
1680     }
1681    
1682     double
1683 jfenwick 2005 Data::LsupWorker() const
1684     {
1685 phornby 1455 double localValue;
1686 jgs 106 //
1687     // set the initial absolute maximum value to zero
1688 bcumming 751
1689 jgs 147 AbsMax abs_max_func;
1690 bcumming 751 localValue = algorithm(abs_max_func,0);
1691     #ifdef PASO_MPI
1692 phornby 1455 double globalValue;
1693 bcumming 751 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1694     return globalValue;
1695     #else
1696     return localValue;
1697     #endif
1698 jgs 117 }
1699    
1700     double
1701 jfenwick 2005 Data::supWorker() const
1702 jgs 102 {
1703 phornby 1455 double localValue;
1704 jgs 106 //
1705     // set the initial maximum value to min possible double
1706 jgs 147 FMax fmax_func;
1707 bcumming 751 localValue = algorithm(fmax_func,numeric_limits<double>::max()*-1);
1708     #ifdef PASO_MPI
1709 phornby 1455 double globalValue;
1710 bcumming 751 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
1711     return globalValue;
1712     #else
1713     return localValue;
1714     #endif
1715 jgs 102 }
1716    
1717 jgs 106 double
1718 jfenwick 2005 Data::infWorker() const
1719 jgs 102 {
1720 phornby 1455 double localValue;
1721 jgs 106 //
1722     // set the initial minimum value to max possible double
1723 jgs 147 FMin fmin_func;
1724 bcumming 751 localValue = algorithm(fmin_func,numeric_limits<double>::max());
1725     #ifdef PASO_MPI
1726 phornby 1455 double globalValue;
1727 bcumming 751 MPI_Allreduce( &localValue, &globalValue, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD );
1728     return globalValue;
1729     #else
1730     return localValue;
1731     #endif
1732 jgs 102 }
1733    
1734 bcumming 751 /* TODO */
1735     /* global reduction */
1736 jgs 102 Data
1737 jgs 106 Data::maxval() const
1738 jgs 102 {
1739 jfenwick 2721 MAKELAZYOP(MAXVAL)
1740 jgs 113 //
1741     // set the initial maximum value to min possible double
1742 jgs 147 FMax fmax_func;
1743     return dp_algorithm(fmax_func,numeric_limits<double>::max()*-1);
1744 jgs 102 }
1745    
1746     Data
1747 jgs 106 Data::minval() const
1748 jgs 102 {
1749 jfenwick 2721 MAKELAZYOP(MINVAL)
1750 jgs 113 //
1751     // set the initial minimum value to max possible double
1752 jgs 147 FMin fmin_func;
1753     return dp_algorithm(fmin_func,numeric_limits<double>::max());
1754 jgs 102 }
1755    
1756 jgs 123 Data
1757 gross 804 Data::swapaxes(const int axis0, const int axis1) const
1758 jgs 123 {
1759 gross 804 int axis0_tmp,axis1_tmp;
1760 jfenwick 1796 DataTypes::ShapeType s=getDataPointShape();
1761     DataTypes::ShapeType ev_shape;
1762 gross 800 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1763     // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1764     int rank=getDataPointRank();
1765 gross 804 if (rank<2) {
1766     throw DataException("Error - Data::swapaxes argument must have at least rank 2.");
1767 gross 800 }
1768 gross 804 if (axis0<0 || axis0>rank-1) {
1769     throw DataException("Error - Data::swapaxes: axis0 must be between 0 and rank-1=" + rank-1);
1770     }
1771     if (axis1<0 || axis1>rank-1) {
1772     throw DataException("Error - Data::swapaxes: axis1 must be between 0 and rank-1=" + rank-1);
1773     }
1774     if (axis0 == axis1) {
1775     throw DataException("Error - Data::swapaxes: axis indices must be different.");
1776     }
1777 jfenwick 2496 MAKELAZYOP2(SWAP,axis0,axis1)
1778     if (axis0 > axis1)
1779     {
1780     axis0_tmp=axis1;
1781     axis1_tmp=axis0;
1782 gross 804 }
1783 jfenwick 2496 else
1784     {
1785     axis0_tmp=axis0;
1786     axis1_tmp=axis1;
1787 gross 800 }
1788 jfenwick 2496 for (int i=0; i<rank; i++)
1789     {
1790     if (i == axis0_tmp)
1791     {
1792     ev_shape.push_back(s[axis1_tmp]);
1793     }
1794     else if (i == axis1_tmp)
1795     {
1796     ev_shape.push_back(s[axis0_tmp]);
1797     }
1798     else
1799     {
1800     ev_shape.push_back(s[i]);
1801     }
1802     }
1803 gross 800 Data ev(0.,ev_shape,getFunctionSpace());
1804     ev.typeMatchRight(*this);
1805 gross 804 m_data->swapaxes(ev.m_data.get(), axis0_tmp, axis1_tmp);
1806 gross 800 return ev;
1807 jgs 123 }
1808    
1809     Data
1810 ksteube 775 Data::symmetric() const
1811 jgs 123 {
1812 ksteube 775 // check input
1813 jfenwick 1796 DataTypes::ShapeType s=getDataPointShape();
1814 ksteube 775 if (getDataPointRank()==2) {
1815 ksteube 1312 if(s[0] != s[1])
1816 ksteube 775 throw DataException("Error - Data::symmetric can only be calculated for rank 2 object with equal first and second dimension.");
1817     }
1818     else if (getDataPointRank()==4) {
1819     if(!(s[0] == s[2] && s[1] == s[3]))
1820     throw DataException("Error - Data::symmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1821     }
1822     else {
1823     throw DataException("Error - Data::symmetric can only be calculated for rank 2 or 4 object.");
1824     }
1825 jfenwick 2146 MAKELAZYOP(SYM)
1826 ksteube 775 Data ev(0.,getDataPointShape(),getFunctionSpace());
1827     ev.typeMatchRight(*this);
1828     m_data->symmetric(ev.m_data.get());
1829     return ev;
1830     }
1831    
1832     Data
1833     Data::nonsymmetric() const
1834     {
1835 jfenwick 2146 MAKELAZYOP(NSYM)
1836 ksteube 775 // check input
1837 jfenwick 1796 DataTypes::ShapeType s=getDataPointShape();
1838 ksteube 775 if (getDataPointRank()==2) {
1839 ksteube 1312 if(s[0] != s[1])
1840 ksteube 775 throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 object with equal first and second dimension.");
1841 jfenwick 1796 DataTypes::ShapeType ev_shape;
1842 ksteube 775 ev_shape.push_back(s[0]);
1843     ev_shape.push_back(s[1]);
1844     Data ev(0.,ev_shape,getFunctionSpace());
1845     ev.typeMatchRight(*this);
1846     m_data->nonsymmetric(ev.m_data.get());
1847     return ev;
1848     }
1849     else if (getDataPointRank()==4) {
1850     if(!(s[0] == s[2] && s[1] == s[3]))
1851     throw DataException("Error - Data::nonsymmetric can only be calculated for rank 4 object with dim0==dim2 and dim1==dim3.");
1852 jfenwick 1796 DataTypes::ShapeType ev_shape;
1853 ksteube 775 ev_shape.push_back(s[0]);
1854     ev_shape.push_back(s[1]);
1855     ev_shape.push_back(s[2]);
1856     ev_shape.push_back(s[3]);
1857     Data ev(0.,ev_shape,getFunctionSpace());
1858     ev.typeMatchRight(*this);
1859     m_data->nonsymmetric(ev.m_data.get());
1860     return ev;
1861     }
1862     else {
1863     throw DataException("Error - Data::nonsymmetric can only be calculated for rank 2 or 4 object.");
1864     }
1865     }
1866    
1867     Data
1868 gross 800 Data::trace(int axis_offset) const
1869 jfenwick 2084 {
1870 jfenwick 2146 MAKELAZYOPOFF(TRACE,axis_offset)
1871 jfenwick 2200 if ((axis_offset<0) || (axis_offset>getDataPointRank()))
1872     {
1873     throw DataException("Error - Data::trace, axis_offset must be between 0 and rank-2 inclusive.");
1874     }
1875 jfenwick 1796 DataTypes::ShapeType s=getDataPointShape();
1876 ksteube 775 if (getDataPointRank()==2) {
1877 jfenwick 1796 DataTypes::ShapeType ev_shape;
1878 ksteube 775 Data ev(0.,ev_shape,getFunctionSpace());
1879     ev.typeMatchRight(*this);
1880 gross 800 m_data->trace(ev.m_data.get(), axis_offset);
1881 ksteube 775 return ev;
1882     }
1883     if (getDataPointRank()==3) {
1884 jfenwick 1796 DataTypes::ShapeType ev_shape;
1885 ksteube 775 if (axis_offset==0) {
1886     int s2=s[2];
1887     ev_shape.push_back(s2);
1888     }
1889     else if (axis_offset==1) {
1890     int s0=s[0];
1891     ev_shape.push_back(s0);
1892     }
1893     Data ev(0.,ev_shape,getFunctionSpace());
1894     ev.typeMatchRight(*this);
1895 gross 800 m_data->trace(ev.m_data.get(), axis_offset);
1896 ksteube 775 return ev;
1897     }
1898     if (getDataPointRank()==4) {
1899 jfenwick 1796 DataTypes::ShapeType ev_shape;
1900 ksteube 775 if (axis_offset==0) {
1901     ev_shape.push_back(s[2]);
1902     ev_shape.push_back(s[3]);
1903     }
1904     else if (axis_offset==1) {
1905     ev_shape.push_back(s[0]);
1906     ev_shape.push_back(s[3]);
1907     }
1908     else if (axis_offset==2) {
1909     ev_shape.push_back(s[0]);
1910     ev_shape.push_back(s[1]);
1911     }
1912     Data ev(0.,ev_shape,getFunctionSpace());
1913     ev.typeMatchRight(*this);
1914 gross 800 m_data->trace(ev.m_data.get(), axis_offset);
1915 ksteube 775 return ev;
1916     }
1917     else {
1918 gross 800 throw DataException("Error - Data::trace can only be calculated for rank 2, 3 or 4 object.");
1919 ksteube 775 }
1920     }
1921    
1922     Data
1923     Data::transpose(int axis_offset) const
1924 jfenwick 2005 {
1925 jfenwick 2146 MAKELAZYOPOFF(TRANS,axis_offset)
1926 jfenwick 1796 DataTypes::ShapeType s=getDataPointShape();
1927     DataTypes::ShapeType ev_shape;
1928 ksteube 775 // Here's the equivalent of python s_out=s[axis_offset:]+s[:axis_offset]
1929     // which goes thru all shape vector elements starting with axis_offset (at index=rank wrap around to 0)
1930     int rank=getDataPointRank();
1931     if (axis_offset<0 || axis_offset>rank) {
1932     throw DataException("Error - Data::transpose must have 0 <= axis_offset <= rank=" + rank);
1933     }
1934     for (int i=0; i<rank; i++) {
1935 jfenwick 2721
1936 ksteube 775 int index = (axis_offset+i)%rank;
1937     ev_shape.push_back(s[index]); // Append to new shape
1938     }
1939     Data ev(0.,ev_shape,getFunctionSpace());
1940     ev.typeMatchRight(*this);
1941     m_data->transpose(ev.m_data.get(), axis_offset);
1942     return ev;
1943 jgs 123 }
1944    
1945 gross 576 Data
1946     Data::eigenvalues() const
1947     {
1948 jfenwick 2005 if (isLazy())
1949     {
1950     Data temp(*this); // to get around the fact that you can't resolve a const Data
1951     temp.resolve();
1952     return temp.eigenvalues();
1953     }
1954 gross 576 // check input
1955 jfenwick 1796 DataTypes::ShapeType s=getDataPointShape();
1956 ksteube 1312 if (getDataPointRank()!=2)
1957 gross 576 throw DataException("Error - Data::eigenvalues can only be calculated for rank 2 object.");
1958 ksteube 1312 if(s[0] != s[1])
1959 gross 576 throw DataException("Error - Data::eigenvalues can only be calculated for object with equal first and second dimension.");
1960     // create return
1961 jfenwick 1796 DataTypes::ShapeType ev_shape(1,s[0]);
1962 gross 576 Data ev(0.,ev_shape,getFunctionSpace());
1963     ev.typeMatchRight(*this);
1964     m_data->eigenvalues(ev.m_data.get());
1965     return ev;
1966     }
1967    
1968 jgs 121 const boost::python::tuple
1969 gross 576 Data::eigenvalues_and_eigenvectors(const double tol) const
1970     {
1971 jfenwick 2005 if (isLazy())
1972     {
1973     Data temp(*this); // to get around the fact that you can't resolve a const Data
1974     temp.resolve();
1975     return temp.eigenvalues_and_eigenvectors(tol);
1976     }
1977 jfenwick 1796 DataTypes::ShapeType s=getDataPointShape();
1978 ksteube 1312 if (getDataPointRank()!=2)
1979 gross 576 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for rank 2 object.");
1980 ksteube 1312 if(s[0] != s[1])
1981 gross 576 throw DataException("Error - Data::eigenvalues and eigenvectors can only be calculated for object with equal first and second dimension.");
1982     // create return
1983 jfenwick 1796 DataTypes::ShapeType ev_shape(1,s[0]);
1984 gross 576 Data ev(0.,ev_shape,getFunctionSpace());
1985     ev.typeMatchRight(*this);
1986 jfenwick 1796 DataTypes::ShapeType V_shape(2,s[0]);
1987 gross 576 Data V(0.,V_shape,getFunctionSpace());
1988     V.typeMatchRight(*this);
1989     m_data->eigenvalues_and_eigenvectors(ev.m_data.get(),V.m_data.get(),tol);
1990     return make_tuple(boost::python::object(ev),boost::python::object(V));
1991     }
1992    
1993     const boost::python::tuple
1994 gross 921 Data::minGlobalDataPoint() const
1995 jgs 121 {
1996 gross 921 // NB: calc_minGlobalDataPoint( had to be split off from minGlobalDataPoint( as boost::make_tuple causes an
1997 jgs 148 // abort (for unknown reasons) if there are openmp directives with it in the
1998     // surrounding function
1999    
2000     int DataPointNo;
2001 gross 921 int ProcNo;
2002     calc_minGlobalDataPoint(ProcNo,DataPointNo);
2003     return make_tuple(ProcNo,DataPointNo);
2004 jgs 148 }
2005    
2006     void
2007 gross 921 Data::calc_minGlobalDataPoint(int& ProcNo,
2008     int& DataPointNo) const
2009 jgs 148 {
2010 jfenwick 2005 if (isLazy())
2011     {
2012     Data temp(*this); // to get around the fact that you can't resolve a const Data
2013     temp.resolve();
2014     return temp.calc_minGlobalDataPoint(ProcNo,DataPointNo);
2015     }
2016 jgs 148 int i,j;
2017     int lowi=0,lowj=0;
2018     double min=numeric_limits<double>::max();
2019    
2020 jgs 121 Data temp=minval();
2021    
2022     int numSamples=temp.getNumSamples();
2023     int numDPPSample=temp.getNumDataPointsPerSample();
2024    
2025 jgs 148 double next,local_min;
2026 jfenwick 1946 int local_lowi=0,local_lowj=0;
2027 jgs 121
2028 caltinay 2081 #pragma omp parallel firstprivate(local_lowi,local_lowj) private(next,local_min)
2029 jgs 148 {
2030     local_min=min;
2031     #pragma omp for private(i,j) schedule(static)
2032     for (i=0; i<numSamples; i++) {
2033     for (j=0; j<numDPPSample; j++) {
2034 jfenwick 2271 next=temp.getDataAtOffsetRO(temp.getDataOffset(i,j));
2035 jgs 148 if (next<local_min) {
2036     local_min=next;
2037     local_lowi=i;
2038     local_lowj=j;
2039     }
2040 jgs 121 }
2041     }
2042 jgs 148 #pragma omp critical
2043 jfenwick 2476 if (local_min<min) { // If we found a smaller value than our sentinel
2044 jgs 148 min=local_min;
2045     lowi=local_lowi;
2046     lowj=local_lowj;
2047     }
2048 jgs 121 }
2049    
2050 bcumming 782 #ifdef PASO_MPI
2051 jfenwick 2476 // determine the processor on which the minimum occurs
2052     next = temp.getDataPointRO(lowi,lowj);
2053     int lowProc = 0;
2054     double *globalMins = new double[get_MPISize()+1];
2055     int error;
2056     error = MPI_Gather ( &next, 1, MPI_DOUBLE, globalMins, 1, MPI_DOUBLE, 0, get_MPIComm() );
2057 ksteube 1312
2058 jfenwick 2476 if( get_MPIRank()==0 ){
2059     next = globalMins[lowProc];
2060     for( i=1; i<get_MPISize(); i++ )
2061     if( next>globalMins[i] ){
2062     lowProc = i;
2063     next = globalMins[i];
2064     }
2065     }
2066 gross 2492 MPI_Bcast( &lowProc, 1, MPI_INT, 0, get_MPIComm() );
2067 jfenwick 2653 DataPointNo = lowj + lowi * numDPPSample;
2068     MPI_Bcast(&DataPointNo, 1, MPI_INT, lowProc, get_MPIComm() );
2069 jfenwick 2476 delete [] globalMins;
2070     ProcNo = lowProc;
2071 bcumming 790 #else
2072 jfenwick 2476 ProcNo = 0;
2073 jfenwick 2653 DataPointNo = lowj + lowi * numDPPSample;
2074 bcumming 782 #endif
2075 jgs 121 }
2076    
2077 jfenwick 2476
2078     const boost::python::tuple
2079     Data::maxGlobalDataPoint() const
2080     {
2081     int DataPointNo;
2082     int ProcNo;
2083     calc_maxGlobalDataPoint(ProcNo,DataPointNo);
2084     return make_tuple(ProcNo,DataPointNo);
2085     }
2086    
2087 jgs 104 void
2088 jfenwick 2476 Data::calc_maxGlobalDataPoint(int& ProcNo,
2089     int& DataPointNo) const
2090     {
2091     if (isLazy())
2092     {
2093     Data temp(*this); // to get around the fact that you can't resolve a const Data
2094     temp.resolve();
2095     return temp.calc_maxGlobalDataPoint(ProcNo,DataPointNo);
2096     }
2097     int i,j;
2098     int highi=0,highj=0;
2099     //-------------
2100 jfenwick 2653 double max= -numeric_limits<double>::max();
2101 jfenwick 2476
2102     Data temp=maxval();
2103    
2104     int numSamples=temp.getNumSamples();
2105     int numDPPSample=temp.getNumDataPointsPerSample();
2106    
2107     double next,local_max;
2108     int local_highi=0,local_highj=0;
2109    
2110     #pragma omp parallel firstprivate(local_highi,local_highj) private(next,local_max)
2111     {
2112     local_max=max;
2113     #pragma omp for private(i,j) schedule(static)
2114     for (i=0; i<numSamples; i++) {
2115     for (j=0; j<numDPPSample; j++) {
2116     next=temp.getDataAtOffsetRO(temp.getDataOffset(i,j));
2117     if (next>local_max) {
2118     local_max=next;
2119     local_highi=i;
2120     local_highj=j;
2121     }
2122     }
2123     }
2124     #pragma omp critical
2125     if (local_max>max) { // If we found a larger value than our sentinel
2126     max=local_max;
2127     highi=local_highi;
2128     highj=local_highj;
2129     }
2130     }
2131     #ifdef PASO_MPI
2132     // determine the processor on which the maximum occurs
2133     next = temp.getDataPointRO(highi,highj);
2134     int highProc = 0;
2135     double *globalMaxs = new double[get_MPISize()+1];
2136     int error;
2137     error = MPI_Gather ( &next, 1, MPI_DOUBLE, globalMaxs, 1, MPI_DOUBLE, 0, get_MPIComm() );
2138     if( get_MPIRank()==0 ){
2139 jfenwick 2653 next = globalMaxs[highProc];
2140     for( i=1; i<get_MPISize(); i++ )
2141     {
2142     if( next<globalMaxs[i] )
2143     {
2144 jfenwick 2476 highProc = i;
2145     next = globalMaxs[i];
2146     }
2147 jfenwick 2653 }
2148 jfenwick 2476 }
2149 gross 2492 MPI_Bcast( &highProc, 1, MPI_INT, 0, get_MPIComm() );
2150 jfenwick 2653 DataPointNo = highj + highi * numDPPSample;
2151     MPI_Bcast(&DataPointNo, 1, MPI_INT, highProc, get_MPIComm() );
2152    
2153 jfenwick 2476 delete [] globalMaxs;
2154     ProcNo = highProc;
2155     #else
2156     ProcNo = 0;
2157 jfenwick 2653 DataPointNo = highj + highi * numDPPSample;
2158 jfenwick 2476 #endif
2159     }
2160    
2161     void
2162 jgs 104 Data::saveDX(std::string fileName) const
2163     {
2164 jfenwick 1803 if (isEmpty())
2165     {
2166     throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2167     }
2168 jfenwick 2005 if (isLazy())
2169     {
2170     Data temp(*this); // to get around the fact that you can't resolve a const Data
2171     temp.resolve();
2172     temp.saveDX(fileName);
2173     return;
2174     }
2175 jgs 153 boost::python::dict args;
2176     args["data"]=boost::python::object(this);
2177 jfenwick 1872 getDomain()->saveDX(fileName,args);
2178 jgs 104 return;
2179     }
2180    
2181 jgs 110 void
2182     Data::saveVTK(std::string fileName) const
2183     {
2184 jfenwick 1803 if (isEmpty())
2185     {
2186     throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2187     }
2188 jfenwick 2005 if (isLazy())
2189     {
2190     Data temp(*this); // to get around the fact that you can't resolve a const Data
2191     temp.resolve();
2192     temp.saveVTK(fileName);
2193     return;
2194     }
2195 jgs 153 boost::python::dict args;
2196     args["data"]=boost::python::object(this);
2197 gross 2421 getDomain()->saveVTK(fileName,args,"","");
2198 jgs 110 return;
2199     }
2200    
2201 jfenwick 2635
2202    
2203 jgs 102 Data&
2204     Data::operator+=(const Data& right)
2205     {
2206 gross 783 if (isProtected()) {
2207     throw DataException("Error - attempt to update protected Data object.");
2208     }
2209 jfenwick 2146 MAKELAZYBINSELF(right,ADD) // for lazy + is equivalent to +=
2210 jfenwick 2271 exclusiveWrite(); // Since Lazy data does not modify its leaves we only need to worry here
2211 jfenwick 2146 binaryOp(right,plus<double>());
2212     return (*this);
2213 jgs 94 }
2214    
2215 jgs 102 Data&
2216     Data::operator+=(const boost::python::object& right)
2217 jgs 94 {
2218 jfenwick 2005 if (isProtected()) {
2219     throw DataException("Error - attempt to update protected Data object.");
2220     }
2221 gross 854 Data tmp(right,getFunctionSpace(),false);
2222 jfenwick 2271 (*this)+=tmp;
2223     return *this;
2224 jgs 94 }
2225 jfenwick 2005
2226     // Hmmm, operator= makes a deep copy but the copy constructor does not?
2227 ksteube 1312 Data&
2228     Data::operator=(const Data& other)
2229     {
2230 jfenwick 2271 m_protected=false; // since any changes should be caught by exclusiveWrite();
2231     // m_data=other.m_data;
2232     set_m_data(other.m_data);
2233 ksteube 1312 return (*this);
2234     }
2235 jgs 94
2236 jgs 102 Data&
2237     Data::operator-=(const Data& right)
2238 jgs 94 {
2239 gross 783 if (isProtected()) {
2240     throw DataException("Error - attempt to update protected Data object.");
2241     }
2242 jfenwick 2146 MAKELAZYBINSELF(right,SUB)
2243 jfenwick 2271 exclusiveWrite();
2244 jfenwick 2146 binaryOp(right,minus<double>());
2245     return (*this);
2246 jgs 94 }
2247    
2248 jgs 102 Data&
2249     Data::operator-=(const boost::python::object& right)
2250 jgs 94 {
2251 jfenwick 2005 if (isProtected()) {
2252     throw DataException("Error - attempt to update protected Data object.");
2253     }
2254 gross 854 Data tmp(right,getFunctionSpace(),false);
2255 jfenwick 2271 (*this)-=tmp;
2256 jfenwick 2146 return (*this);
2257 jgs 94 }
2258    
2259 jgs 102 Data&
2260     Data::operator*=(const Data& right)
2261 jgs 94 {
2262 gross 783 if (isProtected()) {
2263     throw DataException("Error - attempt to update protected Data object.");
2264     }
2265 jfenwick 2146 MAKELAZYBINSELF(right,MUL)
2266 jfenwick 2271 exclusiveWrite();
2267 jfenwick 2146 binaryOp(right,multiplies<double>());
2268     return (*this);
2269 jgs 94 }
2270    
2271 jgs 102 Data&
2272     Data::operator*=(const boost::python::object& right)
2273 jfenwick 2005 {
2274     if (isProtected()) {
2275     throw DataException("Error - attempt to update protected Data object.");
2276     }
2277 gross 854 Data tmp(right,getFunctionSpace(),false);
2278 jfenwick 2271 (*this)*=tmp;
2279 jfenwick 2146 return (*this);
2280 jgs 94 }
2281    
2282 jgs 102 Data&
2283     Data::operator/=(const Data& right)
2284 jgs 94 {
2285 gross 783 if (isProtected()) {
2286     throw DataException("Error - attempt to update protected Data object.");
2287     }
2288 jfenwick 2146 MAKELAZYBINSELF(right,DIV)
2289 jfenwick 2271 exclusiveWrite();
2290 jfenwick 2146 binaryOp(right,divides<double>());
2291     return (*this);
2292 jgs 94 }
2293    
2294 jgs 102 Data&
2295     Data::operator/=(const boost::python::object& right)
2296 jgs 94 {
2297 jfenwick 2005 if (isProtected()) {
2298     throw DataException("Error - attempt to update protected Data object.");
2299     }
2300 gross 854 Data tmp(right,getFunctionSpace(),false);
2301 jfenwick 2271 (*this)/=tmp;
2302 jfenwick 2146 return (*this);
2303 jgs 94 }
2304    
2305 jgs 102 Data
2306 gross 699 Data::rpowO(const boost::python::object& left) const
2307     {
2308     Data left_d(left,*this);
2309     return left_d.powD(*this);
2310     }
2311    
2312     Data
2313 jgs 102 Data::powO(const boost::python::object& right) const
2314 jgs 94 {
2315 gross 854 Data tmp(right,getFunctionSpace(),false);
2316     return powD(tmp);
2317 jgs 94 }
2318    
2319 jgs 102 Data
2320     Data::powD(const Data& right) const
2321 jgs 94 {
2322 jfenwick 2146 MAKELAZYBIN(right,POW)
2323 matt 1350 return C_TensorBinaryOperation<double (*)(double, double)>(*this, right, ::pow);
2324 jgs 94 }
2325    
2326     //
2327 jgs 123 // NOTE: It is essential to specify the namespace this operator belongs to
2328 jgs 102 Data
2329     escript::operator+(const Data& left, const Data& right)
2330 jgs 94 {
2331 jfenwick 2146 MAKELAZYBIN2(left,right,ADD)
2332 matt 1327 return C_TensorBinaryOperation(left, right, plus<double>());
2333 jgs 94 }
2334    
2335     //
2336 jgs 123 // NOTE: It is essential to specify the namespace this operator belongs to
2337 jgs 102 Data
2338     escript::operator-(const Data& left, const Data& right)
2339 jgs 94 {
2340 jfenwick 2146 MAKELAZYBIN2(left,right,SUB)
2341 matt 1327 return C_TensorBinaryOperation(left, right, minus<double>());
2342 jgs 94 }
2343    
2344     //
2345 jgs 123 // NOTE: It is essential to specify the namespace this operator belongs to
2346 jgs 102 Data
2347     escript::operator*(const Data& left, const Data& right)
2348 jgs 94 {
2349 jfenwick 2146 MAKELAZYBIN2(left,right,MUL)
2350 matt 1327 return C_TensorBinaryOperation(left, right, multiplies<double>());
2351 jgs 94 }
2352    
2353     //
2354 jgs 123 // NOTE: It is essential to specify the namespace this operator belongs to
2355 jgs 102 Data
2356     escript::operator/(const Data& left, const Data& right)
2357 jgs 94 {
2358 jfenwick 2146 MAKELAZYBIN2(left,right,DIV)
2359 matt 1327 return C_TensorBinaryOperation(left, right, divides<double>());
2360 jgs 94 }
2361    
2362     //
2363 jgs 123 // NOTE: It is essential to specify the namespace this operator belongs to
2364 jgs 102 Data
2365     escript::operator+(const Data& left, const boost::python::object& right)
2366 jgs 94 {
2367 jfenwick 2146 Data tmp(right,left.getFunctionSpace(),false);
2368     MAKELAZYBIN2(left,tmp,ADD)
2369     return left+tmp;
2370 jgs 94 }
2371    
2372     //
2373 jgs 123 // NOTE: It is essential to specify the namespace this operator belongs to
2374 jgs 102 Data
2375     escript::operator-(const Data& left, const boost::python::object& right)
2376 jgs 94 {
2377 jfenwick 2146 Data tmp(right,left.getFunctionSpace(),false);
2378     MAKELAZYBIN2(left,tmp,SUB)
2379     return left-tmp;
2380 jgs 94 }
2381    
2382     //
2383 jgs 123 // NOTE: It is essential to specify the namespace this operator belongs to
2384 jgs 102 Data
2385     escript::operator*(const Data& left, const boost::python::object& right)
2386 jgs 94 {
2387 jfenwick 2146 Data tmp(right,left.getFunctionSpace(),false);
2388     MAKELAZYBIN2(left,tmp,MUL)
2389     return left*tmp;
2390 jgs 94 }
2391    
2392     //
2393 jgs 123 // NOTE: It is essential to specify the namespace this operator belongs to
2394 jgs 102 Data
2395     escript::operator/(const Data& left, const boost::python::object& right)
2396 jgs 94 {
2397 jfenwick 2146 Data tmp(right,left.getFunctionSpace(),false);
2398     MAKELAZYBIN2(left,tmp,DIV)
2399     return left/tmp;
2400 jgs 94 }
2401    
2402     //
2403 jgs 123 // NOTE: It is essential to specify the namespace this operator belongs to
2404 jgs 102 Data
2405     escript::operator+(const boost::python::object& left, const Data& right)
2406 jgs 94 {
2407 jfenwick 2146 Data tmp(left,right.getFunctionSpace(),false);
2408     MAKELAZYBIN2(tmp,right,ADD)
2409     return tmp+right;
2410 jgs 94 }
2411    
2412     //
2413 jgs 123 // NOTE: It is essential to specify the namespace this operator belongs to
2414 jgs 102 Data
2415     escript::operator-(const boost::python::object& left, const Data& right)
2416 jgs 94 {
2417 jfenwick 2146 Data tmp(left,right.getFunctionSpace(),false);
2418     MAKELAZYBIN2(tmp,right,SUB)
2419     return tmp-right;
2420 jgs 94 }
2421    
2422     //
2423 jgs 123 // NOTE: It is essential to specify the namespace this operator belongs to
2424 jgs 102 Data
2425     escript::operator*(const boost::python::object& left, const Data& right)
2426 jgs 94 {
2427 jfenwick 2146 Data tmp(left,right.getFunctionSpace(),false);
2428     MAKELAZYBIN2(tmp,right,MUL)
2429     return tmp*right;
2430 jgs 94 }
2431    
2432     //
2433 jgs 123 // NOTE: It is essential to specify the namespace this operator belongs to
2434 jgs 102 Data
2435     escript::operator/(const boost::python::object& left, const Data& right)
2436 jgs 94 {
2437 jfenwick 2146 Data tmp(left,right.getFunctionSpace(),false);
2438     MAKELAZYBIN2(tmp,right,DIV)
2439     return tmp/right;
2440 jgs 94 }
2441    
2442 jgs 102
2443 bcumming 751 /* TODO */
2444     /* global reduction */
2445 jgs 102 Data
2446 ksteube 1312 Data::getItem(const boost::python::object& key) const
2447 jgs 94 {
2448    
2449 jfenwick 1796 DataTypes::RegionType slice_region=DataTypes::getSliceRegion(getDataPointShape(),key);
2450 jgs 94
2451 jfenwick 1796 if (slice_region.size()!=getDataPointRank()) {
2452 jgs 102 throw DataException("Error - slice size does not match Data rank.");
2453 jgs 94 }
2454    
2455 jgs 102 return getSlice(slice_region);
2456 jgs 94 }
2457    
2458 bcumming 751 /* TODO */
2459     /* global reduction */
2460 jgs 94 Data
2461 jfenwick 1796 Data::getSlice(const DataTypes::RegionType& region) const
2462 jgs 94 {
2463 jgs 102 return Data(*this,region);
2464 jgs 94 }
2465    
2466 bcumming 751 /* TODO */
2467     /* global reduction */
2468 jgs 94 void
2469 jgs 102 Data::setItemO(const boost::python::object& key,
2470     const boost::python::object& value)
2471 jgs