/[escript]/branches/complex/escriptcore/src/Data.h
ViewVC logotype

Contents of /branches/complex/escriptcore/src/Data.h

Parent Directory Parent Directory | Revision Log Revision Log


Revision 5878 - (show annotations)
Wed Jan 20 07:23:48 2016 UTC (3 years, 2 months ago) by jfenwick
File MIME type: text/plain
File size: 99958 byte(s)
isComplex and conjugate methods added to Data.
We don't actually store complex values yet.

1
2 /*****************************************************************************
3 *
4 * Copyright (c) 2003-2016 by The University of Queensland
5 * http://www.uq.edu.au
6 *
7 * Primary Business: Queensland, Australia
8 * Licensed under the Open Software License version 3.0
9 * http://www.opensource.org/licenses/osl-3.0.php
10 *
11 * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12 * Development 2012-2013 by School of Earth Sciences
13 * Development from 2014 by Centre for Geoscience Computing (GeoComp)
14 *
15 *****************************************************************************/
16
17
18 /** \file Data.h */
19
20 #ifndef DATA_H
21 #define DATA_H
22 #include "system_dep.h"
23
24 #include "DataTypes.h"
25 #include "DataAbstract.h"
26 #include "DataAlgorithm.h"
27 #include "FunctionSpace.h"
28 #include "BinaryOp.h"
29 #include "UnaryOp.h"
30 #include "DataException.h"
31
32 #ifdef _OPENMP
33 #include <omp.h>
34 #endif
35
36 #include "esysUtils/Esys_MPI.h"
37 #include <string>
38 #include <algorithm>
39 #include <sstream>
40
41 #include <boost/shared_ptr.hpp>
42 #include <boost/python/object.hpp>
43 #include <boost/python/tuple.hpp>
44 #include <boost/math/special_functions/bessel.hpp>
45
46 namespace escript {
47
48 //
49 // Forward declaration for various implementations of Data.
50 class DataConstant;
51 class DataTagged;
52 class DataExpanded;
53 class DataLazy;
54
55 /**
56 \brief
57 Data represents a collection of datapoints.
58
59 Description:
60 Internally, the datapoints are actually stored by a DataAbstract object.
61 The specific instance of DataAbstract used may vary over the lifetime
62 of the Data object.
63 Some methods on this class return references (eg getShape()).
64 These references should not be used after an operation which changes the underlying DataAbstract object.
65 Doing so will lead to invalid memory access.
66 This should not affect any methods exposed via boost::python.
67 */
68 class Data {
69
70 public:
71
72 // These typedefs allow function names to be cast to pointers
73 // to functions of the appropriate type when calling unaryOp etc.
74 typedef double (*UnaryDFunPtr)(double);
75 typedef double (*BinaryDFunPtr)(double,double);
76
77
78 /**
79 Constructors.
80 */
81
82 /**
83 \brief
84 Default constructor.
85 Creates a DataEmpty object.
86 */
87 ESCRIPT_DLL_API
88 Data();
89
90 /**
91 \brief
92 Copy constructor.
93 WARNING: Only performs a shallow copy.
94 */
95 ESCRIPT_DLL_API
96 Data(const Data& inData);
97
98 /**
99 \brief
100 Constructor from another Data object. If "what" is different from the
101 function space of inData the inData are tried to be interpolated to what,
102 otherwise a shallow copy of inData is returned.
103 */
104 ESCRIPT_DLL_API
105 Data(const Data& inData,
106 const FunctionSpace& what);
107
108 /**
109 \brief Copy Data from an existing vector
110 */
111
112 ESCRIPT_DLL_API
113 Data(const DataTypes::FloatVectorType& value,
114 const DataTypes::ShapeType& shape,
115 const FunctionSpace& what=FunctionSpace(),
116 bool expanded=false);
117
118 /**
119 \brief
120 Constructor which creates a Data with points having the specified shape.
121
122 \param value - Input - Single value applied to all Data.
123 \param dataPointShape - Input - The shape of each data point.
124 \param what - Input - A description of what this data represents.
125 \param expanded - Input - Flag, if true fill the entire container with
126 the given value. Otherwise a more efficient storage
127 mechanism will be used.
128 */
129 ESCRIPT_DLL_API
130 Data(double value,
131 const DataTypes::ShapeType& dataPointShape=DataTypes::ShapeType(),
132 const FunctionSpace& what=FunctionSpace(),
133 bool expanded=false);
134
135 /**
136 \brief
137 Constructor which performs a deep copy of a region from another Data object.
138
139 \param inData - Input - Input Data object.
140 \param region - Input - Region to copy.
141 */
142 ESCRIPT_DLL_API
143 Data(const Data& inData,
144 const DataTypes::RegionType& region);
145
146 /**
147 \brief
148 Constructor which copies data from any object that can be treated like a python array/sequence.
149
150 \param value - Input - Input data.
151 \param what - Input - A description of what this data represents.
152 \param expanded - Input - Flag, if true fill the entire container with
153 the value. Otherwise a more efficient storage
154 mechanism will be used.
155 */
156 ESCRIPT_DLL_API
157 Data(const boost::python::object& value,
158 const FunctionSpace& what=FunctionSpace(),
159 bool expanded=false);
160
161 /**
162 \brief
163 Constructor which copies data from a wrapped array.
164
165 \param w - Input - Input data.
166 \param what - Input - A description of what this data represents.
167 \param expanded - Input - Flag, if true fill the entire container with
168 the value. Otherwise a more efficient storage
169 mechanism will be used.
170 */
171 ESCRIPT_DLL_API
172 Data(const WrappedArray& w, const FunctionSpace& what,
173 bool expanded=false);
174
175
176 /**
177 \brief
178 Constructor which creates a DataConstant.
179 Copies data from any object that can be treated like a python array/sequence.
180 All other parameters are copied from other.
181
182 \param value - Input - Input data.
183 \param other - Input - contains all other parameters.
184 */
185 ESCRIPT_DLL_API
186 Data(const boost::python::object& value,
187 const Data& other);
188
189 /**
190 \brief
191 Constructor which creates a DataConstant of "shape" with constant value.
192 */
193 ESCRIPT_DLL_API
194 Data(double value,
195 const boost::python::tuple& shape=boost::python::make_tuple(),
196 const FunctionSpace& what=FunctionSpace(),
197 bool expanded=false);
198
199 /**
200 \brief Create a Data using an existing DataAbstract. Warning: The new object assumes ownership of the pointer!
201 Once you have passed the pointer, do not delete it.
202 */
203 ESCRIPT_DLL_API
204 explicit Data(DataAbstract* underlyingdata);
205
206 /**
207 \brief Create a Data based on the supplied DataAbstract
208 */
209 ESCRIPT_DLL_API
210 explicit Data(DataAbstract_ptr underlyingdata);
211
212 /**
213 \brief
214 Destructor
215 */
216 ESCRIPT_DLL_API
217 ~Data();
218
219 /**
220 \brief Make this object a deep copy of "other".
221 */
222 ESCRIPT_DLL_API
223 void
224 copy(const Data& other);
225
226 /**
227 \brief Return a pointer to a deep copy of this object.
228 */
229 ESCRIPT_DLL_API
230 Data
231 copySelf() const;
232
233
234 /**
235 \brief produce a delayed evaluation version of this Data.
236 */
237 ESCRIPT_DLL_API
238 Data
239 delay();
240
241 /**
242 \brief convert the current data into lazy data.
243 */
244 ESCRIPT_DLL_API
245 void
246 delaySelf();
247
248
249 /**
250 Member access methods.
251 */
252
253 /**
254 \brief
255 switches on update protection
256
257 */
258 ESCRIPT_DLL_API
259 void
260 setProtection();
261
262 /**
263 \brief
264 Returns true, if the data object is protected against update
265
266 */
267 ESCRIPT_DLL_API
268 bool
269 isProtected() const;
270
271
272 /**
273 \brief
274 Return the value of a data point as a python tuple.
275 */
276 ESCRIPT_DLL_API
277 const boost::python::object
278 getValueOfDataPointAsTuple(int dataPointNo);
279
280 /**
281 \brief
282 sets the values of a data-point from a python object on this process
283 */
284 ESCRIPT_DLL_API
285 void
286 setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object);
287
288 /**
289 \brief
290 sets the values of a data-point from a array-like object on this process
291 */
292 ESCRIPT_DLL_API
293 void
294 setValueOfDataPointToArray(int dataPointNo, const boost::python::object&);
295
296 /**
297 \brief
298 sets the values of a data-point on this process
299 */
300 ESCRIPT_DLL_API
301 void
302 setValueOfDataPoint(int dataPointNo, const double);
303
304 /**
305 \brief Return a data point across all processors as a python tuple.
306 */
307 ESCRIPT_DLL_API
308 const boost::python::object
309 getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo);
310
311
312 /**
313 \brief Set the value of a global data point
314 */
315 ESCRIPT_DLL_API
316 void
317 setTupleForGlobalDataPoint(int id, int proc, boost::python::object);
318
319 /**
320 \brief
321 Return the tag number associated with the given data-point.
322
323 */
324 ESCRIPT_DLL_API
325 int
326 getTagNumber(int dpno);
327
328
329 /**
330 \brief
331 Write the data as a string. For large amounts of data, a summary is printed.
332 */
333 ESCRIPT_DLL_API
334 std::string
335 toString() const;
336
337 /**
338 \brief
339 Whatever the current Data type make this into a DataExpanded.
340 */
341 ESCRIPT_DLL_API
342 void
343 expand();
344
345 /**
346 \brief
347 If possible convert this Data to DataTagged. This will only allow
348 Constant data to be converted to tagged. An attempt to convert
349 Expanded data to tagged will throw an exception.
350 */
351 ESCRIPT_DLL_API
352 void
353 tag();
354
355 /**
356 \brief If this data is lazy, then convert it to ready data.
357 What type of ready data depends on the expression. For example, Constant+Tagged==Tagged.
358 */
359 ESCRIPT_DLL_API
360 void
361 resolve();
362
363 /**
364 \brief returns return true if data contains NaN.
365 \warning This is dependent on the ability to reliably detect NaNs on your compiler.
366 See the nancheck function in LocalOps for details.
367 */
368 ESCRIPT_DLL_API
369 bool
370 hasNaN();
371
372 /**
373 \brief replaces all NaN values with value
374 */
375 ESCRIPT_DLL_API
376 void
377 replaceNaN(double value);
378
379 /**
380 \brief Ensures data is ready for write access.
381 This means that the data will be resolved if lazy and will be copied if shared with another Data object.
382 \warning This method should only be called in single threaded sections of code. (It modifies m_data).
383 Do not create any Data objects from this one between calling requireWrite and getSampleDataRW.
384 Doing so might introduce additional sharing.
385 */
386 ESCRIPT_DLL_API
387 void
388 requireWrite();
389
390 /**
391 \brief
392 Return true if this Data is expanded.
393 \note To determine if a sample will contain separate values for each datapoint. Use actsExpanded instead.
394 */
395 ESCRIPT_DLL_API
396 bool
397 isExpanded() const;
398
399 /**
400 \brief
401 Return true if this Data is expanded or resolves to expanded.
402 That is, if it has a separate value for each datapoint in the sample.
403 */
404 ESCRIPT_DLL_API
405 bool
406 actsExpanded() const;
407
408
409 /**
410 \brief
411 Return true if this Data is tagged.
412 */
413 ESCRIPT_DLL_API
414 bool
415 isTagged() const;
416
417 /**
418 \brief
419 Return true if this Data is constant.
420 */
421 ESCRIPT_DLL_API
422 bool
423 isConstant() const;
424
425 /**
426 \brief Return true if this Data is lazy.
427 */
428 ESCRIPT_DLL_API
429 bool
430 isLazy() const;
431
432 /**
433 \brief Return true if this data is ready.
434 */
435 ESCRIPT_DLL_API
436 bool
437 isReady() const;
438
439 /**
440 \brief
441 Return true if this Data holds an instance of DataEmpty. This is _not_ the same as asking if the object
442 contains datapoints.
443 */
444 ESCRIPT_DLL_API
445 bool
446 isEmpty() const;
447
448 /**
449 \brief
450 True if components of this data are stored as complex
451 */
452 ESCRIPT_DLL_API
453 bool
454 isComplex() const;
455
456 /**
457 \brief
458 Return the function space.
459 */
460 ESCRIPT_DLL_API
461 inline
462 const FunctionSpace&
463 getFunctionSpace() const
464 {
465 return m_data->getFunctionSpace();
466 }
467
468 /**
469 \brief
470 Return the domain.
471 */
472 ESCRIPT_DLL_API
473 inline
474 // const AbstractDomain&
475 const_Domain_ptr
476 getDomain() const
477 {
478 return getFunctionSpace().getDomain();
479 }
480
481
482 /**
483 \brief
484 Return the domain.
485 TODO: For internal use only. This should be removed.
486 */
487 ESCRIPT_DLL_API
488 inline
489 // const AbstractDomain&
490 Domain_ptr
491 getDomainPython() const
492 {
493 return getFunctionSpace().getDomainPython();
494 }
495
496 /**
497 \brief
498 Return the rank of the point data.
499 */
500 ESCRIPT_DLL_API
501 inline
502 unsigned int
503 getDataPointRank() const
504 {
505 return m_data->getRank();
506 }
507
508 /**
509 \brief
510 Return the number of data points
511 */
512 ESCRIPT_DLL_API
513 inline
514 int
515 getNumDataPoints() const
516 {
517 return getNumSamples() * getNumDataPointsPerSample();
518 }
519 /**
520 \brief
521 Return the number of samples.
522 */
523 ESCRIPT_DLL_API
524 inline
525 int
526 getNumSamples() const
527 {
528 return m_data->getNumSamples();
529 }
530
531 /**
532 \brief
533 Return the number of data points per sample.
534 */
535 ESCRIPT_DLL_API
536 inline
537 int
538 getNumDataPointsPerSample() const
539 {
540 return m_data->getNumDPPSample();
541 }
542
543 /**
544 \brief
545 Returns true if the number of data points per sample and the number of
546 samples match the respective argument. DataEmpty always returns true.
547 */
548 ESCRIPT_DLL_API
549 inline
550 bool numSamplesEqual(int numDataPointsPerSample, int numSamples) const
551 {
552 return (isEmpty() ||
553 (numDataPointsPerSample==getNumDataPointsPerSample() && numSamples==getNumSamples()));
554 }
555
556 /**
557 \brief
558 Returns true if the shape matches the vector (dimensions[0],...,
559 dimensions[rank-1]). DataEmpty always returns true.
560 */
561 inline
562 bool isDataPointShapeEqual(int rank, const int* dimensions) const
563 {
564 if (isEmpty())
565 return true;
566 const DataTypes::ShapeType givenShape(&dimensions[0],&dimensions[rank]);
567 return (getDataPointShape()==givenShape);
568 }
569
570 /**
571 \brief
572 Return the number of values in the shape for this object.
573 */
574 ESCRIPT_DLL_API
575 int
576 getNoValues() const
577 {
578 return m_data->getNoValues();
579 }
580
581
582 /**
583 \brief
584 dumps the object into a netCDF file
585 */
586 ESCRIPT_DLL_API
587 void
588 dump(const std::string fileName) const;
589
590 /**
591 \brief returns the values of the object as a list of tuples (one for each datapoint).
592
593 \param scalarastuple If true, scalar data will produce single valued tuples [(1,) (2,) ...]
594 If false, the result is a list of scalars [1, 2, ...]
595 */
596 ESCRIPT_DLL_API
597 const boost::python::object
598 toListOfTuples(bool scalarastuple=true);
599
600
601 /**
602 \brief
603 Return the sample data for the given sample no.
604 Please do not use this unless you NEED to access samples individually
605 \param sampleNo - Input - the given sample no.
606 \return pointer to the sample data.
607 */
608 ESCRIPT_DLL_API
609 inline
610 const DataAbstract::ValueType::value_type*
611 getSampleDataRO(DataAbstract::ValueType::size_type sampleNo) const;
612
613
614 /**
615 \brief
616 Return the sample data for the given sample no.
617 Please do not use this unless you NEED to access samples individually
618 \param sampleNo - Input - the given sample no.
619 \return pointer to the sample data.
620 */
621 ESCRIPT_DLL_API
622 inline
623 DataAbstract::ValueType::value_type*
624 getSampleDataRW(DataAbstract::ValueType::size_type sampleNo);
625
626
627 /**
628 \brief
629 Return a pointer to the beginning of the underlying data
630 \warning please avoid using this method since it by-passes possible lazy improvements. May be removed without notice.
631 \return pointer to the data.
632 */
633 ESCRIPT_DLL_API
634 inline
635 const DataAbstract::ValueType::value_type*
636 getDataRO() const;
637
638 /**
639 \brief
640 Return the sample data for the given tag. If an attempt is made to
641 access data that isn't tagged an exception will be thrown.
642 \param tag - Input - the tag key.
643 */
644 ESCRIPT_DLL_API
645 inline
646 DataAbstract::ValueType::value_type*
647 getSampleDataByTag(int tag)
648 {
649 return m_data->getSampleDataByTag(tag);
650 }
651
652 /**
653 \brief
654 Return a reference into the DataVector which points to the specified data point.
655 \param sampleNo - Input -
656 \param dataPointNo - Input -
657 */
658 ESCRIPT_DLL_API
659 DataTypes::FloatVectorType::const_reference
660 getDataPointRO(int sampleNo, int dataPointNo);
661
662 /**
663 \brief
664 Return a reference into the DataVector which points to the specified data point.
665 \param sampleNo - Input -
666 \param dataPointNo - Input -
667 */
668 ESCRIPT_DLL_API
669 DataTypes::FloatVectorType::reference
670 getDataPointRW(int sampleNo, int dataPointNo);
671
672
673
674 /**
675 \brief
676 Return the offset for the given sample and point within the sample
677 */
678 ESCRIPT_DLL_API
679 inline
680 DataTypes::FloatVectorType::size_type
681 getDataOffset(int sampleNo,
682 int dataPointNo)
683 {
684 return m_data->getPointOffset(sampleNo,dataPointNo);
685 }
686
687 /**
688 \brief
689 Return a reference to the data point shape.
690 */
691 ESCRIPT_DLL_API
692 inline
693 const DataTypes::ShapeType&
694 getDataPointShape() const
695 {
696 return m_data->getShape();
697 }
698
699 /**
700 \brief
701 Return the data point shape as a tuple of integers.
702 */
703 ESCRIPT_DLL_API
704 const boost::python::tuple
705 getShapeTuple() const;
706
707 /**
708 \brief
709 Return the size of the data point. It is the product of the
710 data point shape dimensions.
711 */
712 ESCRIPT_DLL_API
713 int
714 getDataPointSize() const;
715
716 /**
717 \brief
718 Return the number of doubles stored for this Data.
719 */
720 ESCRIPT_DLL_API
721 DataTypes::FloatVectorType::size_type
722 getLength() const;
723
724 /**
725 \brief Return true if this object contains no samples.
726 This is not the same as isEmpty()
727 */
728 ESCRIPT_DLL_API
729 bool
730 hasNoSamples() const
731 {
732 return getLength()==0;
733 }
734
735 /**
736 \brief
737 Assign the given value to the tag assocciated with name. Implicitly converts this
738 object to type DataTagged. Throws an exception if this object
739 cannot be converted to a DataTagged object or name cannot be mapped onto a tag key.
740 \param name - Input - name of tag.
741 \param value - Input - Value to associate with given key.
742 */
743 ESCRIPT_DLL_API
744 void
745 setTaggedValueByName(std::string name,
746 const boost::python::object& value);
747
748 /**
749 \brief
750 Assign the given value to the tag. Implicitly converts this
751 object to type DataTagged if it is constant.
752
753 \param tagKey - Input - Integer key.
754 \param value - Input - Value to associate with given key.
755 ==>*
756 */
757 ESCRIPT_DLL_API
758 void
759 setTaggedValue(int tagKey,
760 const boost::python::object& value);
761
762 /**
763 \brief
764 Assign the given value to the tag. Implicitly converts this
765 object to type DataTagged if it is constant.
766
767 \param tagKey - Input - Integer key.
768 \param pointshape - Input - The shape of the value parameter
769 \param value - Input - Value to associate with given key.
770 \param dataOffset - Input - Offset of the begining of the point within the value parameter
771 */
772 ESCRIPT_DLL_API
773 void
774 setTaggedValueFromCPP(int tagKey,
775 const DataTypes::ShapeType& pointshape,
776 const DataTypes::FloatVectorType& value,
777 int dataOffset=0);
778
779
780
781 /**
782 \brief
783 Copy other Data object into this Data object where mask is positive.
784 */
785 ESCRIPT_DLL_API
786 void
787 copyWithMask(const Data& other,
788 const Data& mask);
789
790 /**
791 Data object operation methods and operators.
792 */
793
794 /**
795 \brief
796 set all values to zero
797 *
798 */
799 ESCRIPT_DLL_API
800 void
801 setToZero();
802
803 /**
804 \brief
805 Interpolates this onto the given functionspace and returns
806 the result as a Data object.
807 *
808 */
809 ESCRIPT_DLL_API
810 Data
811 interpolate(const FunctionSpace& functionspace) const;
812
813 ESCRIPT_DLL_API
814 Data
815 interpolateFromTable3D(const WrappedArray& table, double Amin, double Astep,
816 double undef, Data& B, double Bmin, double Bstep, Data& C,
817 double Cmin, double Cstep, bool check_boundaries);
818
819 ESCRIPT_DLL_API
820 Data
821 interpolateFromTable2D(const WrappedArray& table, double Amin, double Astep,
822 double undef, Data& B, double Bmin, double Bstep,bool check_boundaries);
823
824 ESCRIPT_DLL_API
825 Data
826 interpolateFromTable1D(const WrappedArray& table, double Amin, double Astep,
827 double undef,bool check_boundaries);
828
829
830 ESCRIPT_DLL_API
831 Data
832 interpolateFromTable3DP(boost::python::object table, double Amin, double Astep,
833 Data& B, double Bmin, double Bstep, Data& C, double Cmin, double Cstep, double undef,bool check_boundaries);
834
835
836 ESCRIPT_DLL_API
837 Data
838 interpolateFromTable2DP(boost::python::object table, double Amin, double Astep,
839 Data& B, double Bmin, double Bstep, double undef,bool check_boundaries);
840
841 ESCRIPT_DLL_API
842 Data
843 interpolateFromTable1DP(boost::python::object table, double Amin, double Astep,
844 double undef,bool check_boundaries);
845
846 ESCRIPT_DLL_API
847 Data
848 nonuniforminterp(boost::python::object in, boost::python::object out, bool check_boundaries);
849
850 ESCRIPT_DLL_API
851 Data
852 nonuniformslope(boost::python::object in, boost::python::object out, bool check_boundaries);
853
854 /**
855 \brief
856 Calculates the gradient of the data at the data points of functionspace.
857 If functionspace is not present the function space of Function(getDomain()) is used.
858 *
859 */
860 ESCRIPT_DLL_API
861 Data
862 gradOn(const FunctionSpace& functionspace) const;
863
864 ESCRIPT_DLL_API
865 Data
866 grad() const;
867
868 /**
869 \brief
870 Calculate the integral over the function space domain as a python tuple.
871 */
872 ESCRIPT_DLL_API
873 boost::python::object
874 integrateToTuple_const() const;
875
876
877 /**
878 \brief
879 Calculate the integral over the function space domain as a python tuple.
880 */
881 ESCRIPT_DLL_API
882 boost::python::object
883 integrateToTuple();
884
885
886
887 /**
888 \brief
889 Returns 1./ Data object
890 *
891 */
892 ESCRIPT_DLL_API
893 Data
894 oneOver() const;
895 /**
896 \brief
897 Return a Data with a 1 for +ive values and a 0 for 0 or -ive values.
898 *
899 */
900 ESCRIPT_DLL_API
901 Data
902 wherePositive() const;
903
904 /**
905 \brief
906 Return a Data with a 1 for -ive values and a 0 for +ive or 0 values.
907 *
908 */
909 ESCRIPT_DLL_API
910 Data
911 whereNegative() const;
912
913 /**
914 \brief
915 Return a Data with a 1 for +ive or 0 values and a 0 for -ive values.
916 *
917 */
918 ESCRIPT_DLL_API
919 Data
920 whereNonNegative() const;
921
922 /**
923 \brief
924 Return a Data with a 1 for -ive or 0 values and a 0 for +ive values.
925 *
926 */
927 ESCRIPT_DLL_API
928 Data
929 whereNonPositive() const;
930
931 /**
932 \brief
933 Return a Data with a 1 for 0 values and a 0 for +ive or -ive values.
934 *
935 */
936 ESCRIPT_DLL_API
937 Data
938 whereZero(double tol=0.0) const;
939
940 /**
941 \brief
942 Return a Data with a 0 for 0 values and a 1 for +ive or -ive values.
943 *
944 */
945 ESCRIPT_DLL_API
946 Data
947 whereNonZero(double tol=0.0) const;
948
949 /**
950 \brief
951 Return the maximum absolute value of this Data object.
952
953 The method is not const because lazy data needs to be expanded before Lsup can be computed.
954 The _const form can be used when the Data object is const, however this will only work for
955 Data which is not Lazy.
956
957 For Data which contain no samples (or tagged Data for which no tags in use have a value)
958 zero is returned.
959 */
960 ESCRIPT_DLL_API
961 double
962 Lsup();
963
964 ESCRIPT_DLL_API
965 double
966 Lsup_const() const;
967
968
969 /**
970 \brief
971 Return the maximum value of this Data object.
972
973 The method is not const because lazy data needs to be expanded before sup can be computed.
974 The _const form can be used when the Data object is const, however this will only work for
975 Data which is not Lazy.
976
977 For Data which contain no samples (or tagged Data for which no tags in use have a value)
978 a large negative value is returned.
979 */
980 ESCRIPT_DLL_API
981 double
982 sup();
983
984 ESCRIPT_DLL_API
985 double
986 sup_const() const;
987
988
989 /**
990 \brief
991 Return the minimum value of this Data object.
992
993 The method is not const because lazy data needs to be expanded before inf can be computed.
994 The _const form can be used when the Data object is const, however this will only work for
995 Data which is not Lazy.
996
997 For Data which contain no samples (or tagged Data for which no tags in use have a value)
998 a large positive value is returned.
999 */
1000 ESCRIPT_DLL_API
1001 double
1002 inf();
1003
1004 ESCRIPT_DLL_API
1005 double
1006 inf_const() const;
1007
1008
1009
1010 /**
1011 \brief
1012 Return the absolute value of each data point of this Data object.
1013 *
1014 */
1015 ESCRIPT_DLL_API
1016 Data
1017 abs() const;
1018
1019 /**
1020 \brief
1021 Return the maximum value of each data point of this Data object.
1022 *
1023 */
1024 ESCRIPT_DLL_API
1025 Data
1026 maxval() const;
1027
1028 /**
1029 \brief
1030 Return the minimum value of each data point of this Data object.
1031 *
1032 */
1033 ESCRIPT_DLL_API
1034 Data
1035 minval() const;
1036
1037 /**
1038 \brief
1039 Return the (sample number, data-point number) of the data point with
1040 the minimum component value in this Data object.
1041 \note If you are working in python, please consider using Locator
1042 instead of manually manipulating process and point IDs.
1043 */
1044 ESCRIPT_DLL_API
1045 const boost::python::tuple
1046 minGlobalDataPoint() const;
1047
1048 /**
1049 \brief
1050 Return the (sample number, data-point number) of the data point with
1051 the minimum component value in this Data object.
1052 \note If you are working in python, please consider using Locator
1053 instead of manually manipulating process and point IDs.
1054 */
1055 ESCRIPT_DLL_API
1056 const boost::python::tuple
1057 maxGlobalDataPoint() const;
1058
1059
1060
1061 /**
1062 \brief
1063 Return the sign of each data point of this Data object.
1064 -1 for negative values, zero for zero values, 1 for positive values.
1065 *
1066 */
1067 ESCRIPT_DLL_API
1068 Data
1069 sign() const;
1070
1071 /**
1072 \brief
1073 Return the symmetric part of a matrix which is half the matrix plus its transpose.
1074 *
1075 */
1076 ESCRIPT_DLL_API
1077 Data
1078 symmetric() const;
1079
1080 /**
1081 \brief
1082 Return the nonsymmetric part of a matrix which is half the matrix minus its transpose.
1083 *
1084 */
1085 ESCRIPT_DLL_API
1086 Data
1087 nonsymmetric() const;
1088
1089 /**
1090 \brief
1091 Return the trace of a matrix
1092 *
1093 */
1094 ESCRIPT_DLL_API
1095 Data
1096 trace(int axis_offset) const;
1097
1098 /**
1099 \brief
1100 Transpose each data point of this Data object around the given axis.
1101 *
1102 */
1103 ESCRIPT_DLL_API
1104 Data
1105 transpose(int axis_offset) const;
1106
1107 /**
1108 \brief
1109 Return the eigenvalues of the symmetric part at each data point of this Data object in increasing values.
1110 Currently this function is restricted to rank 2, square shape, and dimension 3.
1111 *
1112 */
1113 ESCRIPT_DLL_API
1114 Data
1115 eigenvalues() const;
1116
1117 /**
1118 \brief
1119 Return the eigenvalues and corresponding eigenvcetors of the symmetric part at each data point of this Data object.
1120 the eigenvalues are ordered in increasing size where eigenvalues with relative difference less than
1121 tol are treated as equal. The eigenvectors are orthogonal, normalized and the sclaed such that the
1122 first non-zero entry is positive.
1123 Currently this function is restricted to rank 2, square shape, and dimension 3
1124 *
1125 */
1126 ESCRIPT_DLL_API
1127 const boost::python::tuple
1128 eigenvalues_and_eigenvectors(const double tol=1.e-12) const;
1129
1130 /**
1131 \brief
1132 swaps the components axis0 and axis1
1133 *
1134 */
1135 ESCRIPT_DLL_API
1136 Data
1137 swapaxes(const int axis0, const int axis1) const;
1138
1139 /**
1140 \brief
1141 Return the error function erf of each data point of this Data object.
1142 *
1143 */
1144 ESCRIPT_DLL_API
1145 Data
1146 erf() const;
1147
1148
1149 /**
1150 \brief
1151 For complex values return the conjugate values.
1152 For non-complex data return a copy
1153 */
1154 ESCRIPT_DLL_API
1155 Data
1156 conjugate() const;
1157
1158 /**
1159 \brief
1160 Return the sin of each data point of this Data object.
1161 *
1162 */
1163 ESCRIPT_DLL_API
1164 Data
1165 sin() const;
1166
1167 /**
1168 \brief
1169 Return the cos of each data point of this Data object.
1170 *
1171 */
1172 ESCRIPT_DLL_API
1173 Data
1174 cos() const;
1175
1176 /**
1177 \brief
1178 Bessel worker function.
1179 *
1180 */
1181 ESCRIPT_DLL_API
1182 Data
1183 bessel(int order, double (*besselfunc) (int,double) );
1184
1185
1186 /**
1187 \brief
1188 Return the Bessel function of the first kind for each data point of this Data object.
1189 *
1190 */
1191 ESCRIPT_DLL_API
1192 Data
1193 besselFirstKind(int order);
1194
1195 /**
1196 \brief
1197 Return the Bessel function of the second kind for each data point of this Data object.
1198 *
1199 */
1200 ESCRIPT_DLL_API
1201 Data
1202 besselSecondKind(int order);
1203
1204
1205 /**
1206 \brief
1207 Return the tan of each data point of this Data object.
1208 *
1209 */
1210 ESCRIPT_DLL_API
1211 Data
1212 tan() const;
1213
1214 /**
1215 \brief
1216 Return the asin of each data point of this Data object.
1217 *
1218 */
1219 ESCRIPT_DLL_API
1220 Data
1221 asin() const;
1222
1223 /**
1224 \brief
1225 Return the acos of each data point of this Data object.
1226 *
1227 */
1228 ESCRIPT_DLL_API
1229 Data
1230 acos() const;
1231
1232 /**
1233 \brief
1234 Return the atan of each data point of this Data object.
1235 *
1236 */
1237 ESCRIPT_DLL_API
1238 Data
1239 atan() const;
1240
1241 /**
1242 \brief
1243 Return the sinh of each data point of this Data object.
1244 *
1245 */
1246 ESCRIPT_DLL_API
1247 Data
1248 sinh() const;
1249
1250 /**
1251 \brief
1252 Return the cosh of each data point of this Data object.
1253 *
1254 */
1255 ESCRIPT_DLL_API
1256 Data
1257 cosh() const;
1258
1259 /**
1260 \brief
1261 Return the tanh of each data point of this Data object.
1262 *
1263 */
1264 ESCRIPT_DLL_API
1265 Data
1266 tanh() const;
1267
1268 /**
1269 \brief
1270 Return the asinh of each data point of this Data object.
1271 *
1272 */
1273 ESCRIPT_DLL_API
1274 Data
1275 asinh() const;
1276
1277 /**
1278 \brief
1279 Return the acosh of each data point of this Data object.
1280 *
1281 */
1282 ESCRIPT_DLL_API
1283 Data
1284 acosh() const;
1285
1286 /**
1287 \brief
1288 Return the atanh of each data point of this Data object.
1289 *
1290 */
1291 ESCRIPT_DLL_API
1292 Data
1293 atanh() const;
1294
1295 /**
1296 \brief
1297 Return the log to base 10 of each data point of this Data object.
1298 *
1299 */
1300 ESCRIPT_DLL_API
1301 Data
1302 log10() const;
1303
1304 /**
1305 \brief
1306 Return the natural log of each data point of this Data object.
1307 *
1308 */
1309 ESCRIPT_DLL_API
1310 Data
1311 log() const;
1312
1313 /**
1314 \brief
1315 Return the exponential function of each data point of this Data object.
1316 *
1317 */
1318 ESCRIPT_DLL_API
1319 Data
1320 exp() const;
1321
1322 /**
1323 \brief
1324 Return the square root of each data point of this Data object.
1325 *
1326 */
1327 ESCRIPT_DLL_API
1328 Data
1329 sqrt() const;
1330
1331 /**
1332 \brief
1333 Return the negation of each data point of this Data object.
1334 *
1335 */
1336 ESCRIPT_DLL_API
1337 Data
1338 neg() const;
1339
1340 /**
1341 \brief
1342 Return the identity of each data point of this Data object.
1343 Simply returns this object unmodified.
1344 *
1345 */
1346 ESCRIPT_DLL_API
1347 Data
1348 pos() const;
1349
1350 /**
1351 \brief
1352 Return the given power of each data point of this Data object.
1353
1354 \param right Input - the power to raise the object to.
1355 *
1356 */
1357 ESCRIPT_DLL_API
1358 Data
1359 powD(const Data& right) const;
1360
1361 /**
1362 \brief
1363 Return the given power of each data point of this boost python object.
1364
1365 \param right Input - the power to raise the object to.
1366 *
1367 */
1368 ESCRIPT_DLL_API
1369 Data
1370 powO(const boost::python::object& right) const;
1371
1372 /**
1373 \brief
1374 Return the given power of each data point of this boost python object.
1375
1376 \param left Input - the bases
1377 *
1378 */
1379
1380 ESCRIPT_DLL_API
1381 Data
1382 rpowO(const boost::python::object& left) const;
1383
1384 /**
1385 \brief
1386 Overloaded operator +=
1387 \param right - Input - The right hand side.
1388 *
1389 */
1390 ESCRIPT_DLL_API
1391 Data& operator+=(const Data& right);
1392 ESCRIPT_DLL_API
1393 Data& operator+=(const boost::python::object& right);
1394
1395 ESCRIPT_DLL_API
1396 Data& operator=(const Data& other);
1397
1398 /**
1399 \brief
1400 Overloaded operator -=
1401 \param right - Input - The right hand side.
1402 *
1403 */
1404 ESCRIPT_DLL_API
1405 Data& operator-=(const Data& right);
1406 ESCRIPT_DLL_API
1407 Data& operator-=(const boost::python::object& right);
1408
1409 /**
1410 \brief
1411 Overloaded operator *=
1412 \param right - Input - The right hand side.
1413 *
1414 */
1415 ESCRIPT_DLL_API
1416 Data& operator*=(const Data& right);
1417 ESCRIPT_DLL_API
1418 Data& operator*=(const boost::python::object& right);
1419
1420 /**
1421 \brief
1422 Overloaded operator /=
1423 \param right - Input - The right hand side.
1424 *
1425 */
1426 ESCRIPT_DLL_API
1427 Data& operator/=(const Data& right);
1428 ESCRIPT_DLL_API
1429 Data& operator/=(const boost::python::object& right);
1430
1431 /**
1432 \brief
1433 Newer style division operator for python
1434 */
1435 ESCRIPT_DLL_API
1436 Data truedivD(const Data& right);
1437
1438 /**
1439 \brief
1440 Newer style division operator for python
1441 */
1442 ESCRIPT_DLL_API
1443 Data truedivO(const boost::python::object& right);
1444
1445 /**
1446 \brief
1447 Newer style division operator for python
1448 */
1449 ESCRIPT_DLL_API
1450 Data rtruedivO(const boost::python::object& left);
1451
1452 /**
1453 \brief
1454 wrapper for python add operation
1455 */
1456 ESCRIPT_DLL_API
1457 boost::python::object __add__(const boost::python::object& right);
1458
1459
1460 /**
1461 \brief
1462 wrapper for python subtract operation
1463 */
1464 ESCRIPT_DLL_API
1465 boost::python::object __sub__(const boost::python::object& right);
1466
1467 /**
1468 \brief
1469 wrapper for python reverse subtract operation
1470 */
1471 ESCRIPT_DLL_API
1472 boost::python::object __rsub__(const boost::python::object& right);
1473
1474 /**
1475 \brief
1476 wrapper for python multiply operation
1477 */
1478 ESCRIPT_DLL_API
1479 boost::python::object __mul__(const boost::python::object& right);
1480
1481 /**
1482 \brief
1483 wrapper for python divide operation
1484 */
1485 ESCRIPT_DLL_API
1486 boost::python::object __div__(const boost::python::object& right);
1487
1488 /**
1489 \brief
1490 wrapper for python reverse divide operation
1491 */
1492 ESCRIPT_DLL_API
1493 boost::python::object __rdiv__(const boost::python::object& right);
1494
1495 /**
1496 \brief return inverse of matricies.
1497 */
1498 ESCRIPT_DLL_API
1499 Data
1500 matrixInverse() const;
1501
1502 /**
1503 \brief
1504 Returns true if this can be interpolated to functionspace.
1505 */
1506 ESCRIPT_DLL_API
1507 bool
1508 probeInterpolation(const FunctionSpace& functionspace) const;
1509
1510 /**
1511 Data object slicing methods.
1512 */
1513
1514 /**
1515 \brief
1516 Returns a slice from this Data object.
1517
1518 /description
1519 Implements the [] get operator in python.
1520 Calls getSlice.
1521
1522 \param key - Input - python slice tuple specifying
1523 slice to return.
1524 */
1525 ESCRIPT_DLL_API
1526 Data
1527 getItem(const boost::python::object& key) const;
1528
1529 /**
1530 \brief
1531 Copies slice from value into this Data object.
1532
1533 Implements the [] set operator in python.
1534 Calls setSlice.
1535
1536 \param key - Input - python slice tuple specifying
1537 slice to copy from value.
1538 \param value - Input - Data object to copy from.
1539 */
1540 ESCRIPT_DLL_API
1541 void
1542 setItemD(const boost::python::object& key,
1543 const Data& value);
1544
1545 ESCRIPT_DLL_API
1546 void
1547 setItemO(const boost::python::object& key,
1548 const boost::python::object& value);
1549
1550 // These following public methods should be treated as private.
1551
1552 /**
1553 \brief
1554 Perform the given unary operation on every element of every data point in
1555 this Data object.
1556 */
1557 template <class UnaryFunction>
1558 ESCRIPT_DLL_API
1559 inline
1560 void
1561 unaryOp2(UnaryFunction operation);
1562
1563 /**
1564 \brief
1565 Return a Data object containing the specified slice of
1566 this Data object.
1567 \param region - Input - Region to copy.
1568 *
1569 */
1570 ESCRIPT_DLL_API
1571 Data
1572 getSlice(const DataTypes::RegionType& region) const;
1573
1574 /**
1575 \brief
1576 Copy the specified slice from the given value into this
1577 Data object.
1578 \param value - Input - Data to copy from.
1579 \param region - Input - Region to copy.
1580 *
1581 */
1582 ESCRIPT_DLL_API
1583 void
1584 setSlice(const Data& value,
1585 const DataTypes::RegionType& region);
1586
1587 /**
1588 \brief
1589 print the data values to stdout. Used for debugging
1590 */
1591 ESCRIPT_DLL_API
1592 void
1593 print(void);
1594
1595 /**
1596 \brief
1597 return the MPI rank number of the local data
1598 MPI_COMM_WORLD is assumed and the result of MPI_Comm_size()
1599 is returned
1600 */
1601 ESCRIPT_DLL_API
1602 int
1603 get_MPIRank(void) const;
1604
1605 /**
1606 \brief
1607 return the MPI rank number of the local data
1608 MPI_COMM_WORLD is assumed and the result of MPI_Comm_rank()
1609 is returned
1610 */
1611 ESCRIPT_DLL_API
1612 int
1613 get_MPISize(void) const;
1614
1615 /**
1616 \brief
1617 return the MPI rank number of the local data
1618 MPI_COMM_WORLD is assumed and returned.
1619 */
1620 ESCRIPT_DLL_API
1621 MPI_Comm
1622 get_MPIComm(void) const;
1623
1624 /**
1625 \brief
1626 return the object produced by the factory, which is a DataConstant or DataExpanded
1627 TODO Ownership of this object should be explained in doco.
1628 */
1629 ESCRIPT_DLL_API
1630 DataAbstract*
1631 borrowData(void) const;
1632
1633 ESCRIPT_DLL_API
1634 DataAbstract_ptr
1635 borrowDataPtr(void) const;
1636
1637 ESCRIPT_DLL_API
1638 DataReady_ptr
1639 borrowReadyPtr(void) const;
1640
1641
1642
1643 /**
1644 \brief
1645 Return a pointer to the beginning of the datapoint at the specified offset.
1646 TODO Eventually these should be inlined.
1647 \param i - position(offset) in the underlying datastructure
1648 */
1649
1650 ESCRIPT_DLL_API
1651 DataTypes::FloatVectorType::const_reference
1652 getDataAtOffsetRO(DataTypes::FloatVectorType::size_type i);
1653
1654
1655 ESCRIPT_DLL_API
1656 DataTypes::FloatVectorType::reference
1657 getDataAtOffsetRW(DataTypes::FloatVectorType::size_type i);
1658
1659 /**
1660 \brief Ensures that the Data is expanded and returns its underlying vector
1661 Does not check for exclusive write so do that before calling if sharing
1662 Is a posibility.
1663 \warning For domain implementors only. Using this function will
1664 avoid using optimisations like lazy evaluation. It is intended
1665 to allow quick initialisation of Data by domain; not as a bypass around
1666 escript's other mechanisms.
1667 */
1668 ESCRIPT_DLL_API
1669 DataTypes::FloatVectorType&
1670 getExpandedVectorReference();
1671
1672
1673 /**
1674 * \brief For tagged Data returns the number of tags with values.
1675 * For non-tagged data will return 0 (even Data which has been expanded from tagged).
1676 */
1677 ESCRIPT_DLL_API
1678 size_t
1679 getNumberOfTaggedValues() const;
1680
1681 protected:
1682
1683 private:
1684
1685 template <class BinaryOp>
1686 double
1687 #ifdef ESYS_MPI
1688 lazyAlgWorker(double init, MPI_Op mpiop_type);
1689 #else
1690 lazyAlgWorker(double init);
1691 #endif
1692
1693 double
1694 LsupWorker() const;
1695
1696 double
1697 supWorker() const;
1698
1699 double
1700 infWorker() const;
1701
1702 boost::python::object
1703 integrateWorker() const;
1704
1705 void
1706 calc_minGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1707
1708 void
1709 calc_maxGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1710
1711 // For internal use in Data.cpp only!
1712 // other uses should call the main entry points and allow laziness
1713 Data
1714 minval_nonlazy() const;
1715
1716 // For internal use in Data.cpp only!
1717 Data
1718 maxval_nonlazy() const;
1719
1720
1721 /**
1722 \brief
1723 Check *this and the right operand are compatible. Throws
1724 an exception if they aren't.
1725 \param right - Input - The right hand side.
1726 */
1727 inline
1728 void
1729 operandCheck(const Data& right) const
1730 {
1731 return m_data->operandCheck(*(right.m_data.get()));
1732 }
1733
1734 /**
1735 \brief
1736 Perform the specified reduction algorithm on every element of every data point in
1737 this Data object according to the given function and return the single value result.
1738 */
1739 template <class BinaryFunction>
1740 inline
1741 double
1742 algorithm(BinaryFunction operation,
1743 double initial_value) const;
1744
1745 /**
1746 \brief
1747 Reduce each data-point in this Data object using the given operation. Return a Data
1748 object with the same number of data-points, but with each data-point containing only
1749 one value - the result of the reduction operation on the corresponding data-point in
1750 this Data object
1751 */
1752 template <class BinaryFunction>
1753 inline
1754 Data
1755 dp_algorithm(BinaryFunction operation,
1756 double initial_value) const;
1757
1758 /**
1759 \brief
1760 Perform the given binary operation on all of the data's elements.
1761 The underlying type of the right hand side (right) determines the final
1762 type of *this after the operation. For example if the right hand side
1763 is expanded *this will be expanded if necessary.
1764 RHS is a Data object.
1765 */
1766 template <class BinaryFunction>
1767 inline
1768 void
1769 binaryOp(const Data& right,
1770 BinaryFunction operation);
1771
1772 /**
1773 \brief
1774 Convert the data type of the RHS to match this.
1775 \param right - Input - data type to match.
1776 */
1777 void
1778 typeMatchLeft(Data& right) const;
1779
1780 /**
1781 \brief
1782 Convert the data type of this to match the RHS.
1783 \param right - Input - data type to match.
1784 */
1785 void
1786 typeMatchRight(const Data& right);
1787
1788 /**
1789 \brief
1790 Construct a Data object of the appropriate type.
1791 */
1792
1793 void
1794 initialise(const DataTypes::FloatVectorType& value,
1795 const DataTypes::ShapeType& shape,
1796 const FunctionSpace& what,
1797 bool expanded);
1798
1799 void
1800 initialise(const WrappedArray& value,
1801 const FunctionSpace& what,
1802 bool expanded);
1803
1804 void
1805 initialise(const double value,
1806 const DataTypes::ShapeType& shape,
1807 const FunctionSpace& what,
1808 bool expanded);
1809
1810 //
1811 // flag to protect the data object against any update
1812 bool m_protected;
1813 mutable bool m_shared;
1814 bool m_lazy;
1815
1816 //
1817 // pointer to the actual data object
1818 // boost::shared_ptr<DataAbstract> m_data;
1819 DataAbstract_ptr m_data;
1820
1821 // If possible please use getReadyPtr instead.
1822 // But see warning below.
1823 const DataReady*
1824 getReady() const;
1825
1826 DataReady*
1827 getReady();
1828
1829
1830 // Be wary of using this for local operations since it (temporarily) increases reference count.
1831 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1832 // getReady() instead
1833 DataReady_ptr
1834 getReadyPtr();
1835
1836 const_DataReady_ptr
1837 getReadyPtr() const;
1838
1839
1840 /**
1841 \brief Update the Data's shared flag
1842 This indicates that the DataAbstract used by this object is now shared (or no longer shared).
1843 For internal use only.
1844 */
1845 void updateShareStatus(bool nowshared) const
1846 {
1847 m_shared=nowshared; // m_shared is mutable
1848 }
1849
1850 // In the isShared() method below:
1851 // A problem would occur if m_data (the address pointed to) were being modified
1852 // while the call m_data->is_shared is being executed.
1853 //
1854 // Q: So why do I think this code can be thread safe/correct?
1855 // A: We need to make some assumptions.
1856 // 1. We assume it is acceptable to return true under some conditions when we aren't shared.
1857 // 2. We assume that no constructions or assignments which will share previously unshared
1858 // will occur while this call is executing. This is consistent with the way Data:: and C are written.
1859 //
1860 // This means that the only transition we need to consider, is when a previously shared object is
1861 // not shared anymore. ie. the other objects have been destroyed or a deep copy has been made.
1862 // In those cases the m_shared flag changes to false after m_data has completed changing.
1863 // For any threads executing before the flag switches they will assume the object is still shared.
1864 bool isShared() const
1865 {
1866 return m_shared;
1867 /* if (m_shared) return true;
1868 if (m_data->isShared())
1869 {
1870 updateShareStatus(true);
1871 return true;
1872 }
1873 return false;*/
1874 }
1875
1876 void forceResolve()
1877 {
1878 if (isLazy())
1879 {
1880 #ifdef _OPENMP
1881 if (omp_in_parallel())
1882 { // Yes this is throwing an exception out of an omp thread which is forbidden.
1883 throw DataException("Please do not call forceResolve() in a parallel region.");
1884 }
1885 #endif
1886 resolve();
1887 }
1888 }
1889
1890 /**
1891 \brief if another object is sharing out member data make a copy to work with instead.
1892 This code should only be called from single threaded sections of code.
1893 */
1894 void exclusiveWrite()
1895 {
1896 #ifdef _OPENMP
1897 if (omp_in_parallel())
1898 {
1899 // *((int*)0)=17;
1900 throw DataException("Programming error. Please do not run exclusiveWrite() in multi-threaded sections.");
1901 }
1902 #endif
1903 forceResolve();
1904 if (isShared())
1905 {
1906 DataAbstract* t=m_data->deepCopy();
1907 set_m_data(DataAbstract_ptr(t));
1908 }
1909 #ifdef EXWRITECHK
1910 m_data->exclusivewritecalled=true;
1911 #endif
1912 }
1913
1914 /**
1915 \brief checks if caller can have exclusive write to the object
1916 */
1917 void checkExclusiveWrite()
1918 {
1919 if (isLazy() || isShared())
1920 {
1921 throw DataException("Programming error. ExclusiveWrite required - please call requireWrite()");
1922 }
1923 }
1924
1925 /**
1926 \brief Modify the data abstract hosted by this Data object
1927 For internal use only.
1928 Passing a pointer to null is permitted (do this in the destructor)
1929 \warning Only to be called in single threaded code or inside a single/critical section. This method needs to be atomic.
1930 */
1931 void set_m_data(DataAbstract_ptr p);
1932
1933 friend class DataAbstract; // To allow calls to updateShareStatus
1934 friend class TestDomain; // so its getX will work quickly
1935 #ifdef IKNOWWHATIMDOING
1936 friend ESCRIPT_DLL_API Data applyBinaryCFunction(boost::python::object cfunc, boost::python::tuple shape, escript::Data& d, escript::Data& e);
1937 #endif
1938 friend ESCRIPT_DLL_API Data condEval(escript::Data& mask, escript::Data& trueval, escript::Data& falseval);
1939 friend ESCRIPT_DLL_API Data randomData(const boost::python::tuple& shape, const FunctionSpace& what, long seed, const boost::python::tuple& filter);
1940
1941 };
1942
1943
1944 #ifdef IKNOWWHATIMDOING
1945 ESCRIPT_DLL_API
1946 Data
1947 applyBinaryCFunction(boost::python::object func, boost::python::tuple shape, escript::Data& d, escript::Data& e);
1948 #endif
1949
1950
1951 ESCRIPT_DLL_API
1952 Data
1953 condEval(escript::Data& mask, escript::Data& trueval, escript::Data& falseval);
1954
1955
1956
1957 /**
1958 \brief Create a new Expanded Data object filled with pseudo-random data.
1959 */
1960 ESCRIPT_DLL_API
1961 Data randomData(const boost::python::tuple& shape,
1962 const FunctionSpace& what,
1963 long seed, const boost::python::tuple& filter);
1964
1965
1966 } // end namespace escript
1967
1968
1969 // No, this is not supposed to be at the top of the file
1970 // DataAbstact needs to be declared first, then DataReady needs to be fully declared
1971 // so that I can dynamic cast between them below.
1972 #include "DataReady.h"
1973 #include "DataLazy.h"
1974
1975 namespace escript
1976 {
1977
1978 inline
1979 const DataReady*
1980 Data::getReady() const
1981 {
1982 const DataReady* dr=dynamic_cast<const DataReady*>(m_data.get());
1983 EsysAssert((dr!=0), "Error - casting to DataReady.");
1984 return dr;
1985 }
1986
1987 inline
1988 DataReady*
1989 Data::getReady()
1990 {
1991 DataReady* dr=dynamic_cast<DataReady*>(m_data.get());
1992 EsysAssert((dr!=0), "Error - casting to DataReady.");
1993 return dr;
1994 }
1995
1996 // Be wary of using this for local operations since it (temporarily) increases reference count.
1997 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1998 // getReady() instead
1999 inline
2000 DataReady_ptr
2001 Data::getReadyPtr()
2002 {
2003 DataReady_ptr dr=boost::dynamic_pointer_cast<DataReady>(m_data);
2004 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
2005 return dr;
2006 }
2007
2008
2009 inline
2010 const_DataReady_ptr
2011 Data::getReadyPtr() const
2012 {
2013 const_DataReady_ptr dr=boost::dynamic_pointer_cast<const DataReady>(m_data);
2014 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
2015 return dr;
2016 }
2017
2018 inline
2019 DataAbstract::ValueType::value_type*
2020 Data::getSampleDataRW(DataAbstract::ValueType::size_type sampleNo)
2021 {
2022 if (isLazy())
2023 {
2024 throw DataException("Error, attempt to acquire RW access to lazy data. Please call requireWrite() first.");
2025 }
2026 #ifdef EXWRITECHK
2027 if (!getReady()->exclusivewritecalled)
2028 {
2029 throw DataException("Error, call to Data::getSampleDataRW without a preceeding call to requireWrite/exclusiveWrite.");
2030 }
2031 #endif
2032 return getReady()->getSampleDataRW(sampleNo);
2033 }
2034
2035 inline
2036 const DataAbstract::ValueType::value_type*
2037 Data::getSampleDataRO(DataAbstract::ValueType::size_type sampleNo) const
2038 {
2039 DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
2040 if (l!=0)
2041 {
2042 size_t offset=0;
2043 const DataTypes::FloatVectorType* res=l->resolveSample(sampleNo,offset);
2044 return &((*res)[offset]);
2045 }
2046 return getReady()->getSampleDataRO(sampleNo);
2047 }
2048
2049 inline
2050 const DataAbstract::ValueType::value_type*
2051 Data::getDataRO() const
2052 {
2053 if (isLazy())
2054 {
2055 throw DataException("Programmer error - getDataRO must not be called on Lazy Data.");
2056 }
2057 if (getNumSamples()==0)
2058 {
2059 return 0;
2060 }
2061 else
2062 {
2063 return &(getReady()->getVectorRO()[0]);
2064 }
2065 }
2066
2067
2068 /**
2069 Binary Data object operators.
2070 */
2071 inline double rpow(double x,double y)
2072 {
2073 return pow(y,x);
2074 }
2075
2076 /**
2077 \brief
2078 Operator+
2079 Takes two Data objects.
2080 */
2081 ESCRIPT_DLL_API Data operator+(const Data& left, const Data& right);
2082
2083 /**
2084 \brief
2085 Operator-
2086 Takes two Data objects.
2087 */
2088 ESCRIPT_DLL_API Data operator-(const Data& left, const Data& right);
2089
2090 /**
2091 \brief
2092 Operator*
2093 Takes two Data objects.
2094 */
2095 ESCRIPT_DLL_API Data operator*(const Data& left, const Data& right);
2096
2097 /**
2098 \brief
2099 Operator/
2100 Takes two Data objects.
2101 */
2102 ESCRIPT_DLL_API Data operator/(const Data& left, const Data& right);
2103
2104 /**
2105 \brief
2106 Operator+
2107 Takes LHS Data object and RHS python::object.
2108 python::object must be convertable to Data type.
2109 */
2110 ESCRIPT_DLL_API Data operator+(const Data& left, const boost::python::object& right);
2111
2112 /**
2113 \brief
2114 Operator-
2115 Takes LHS Data object and RHS python::object.
2116 python::object must be convertable to Data type.
2117 */
2118 ESCRIPT_DLL_API Data operator-(const Data& left, const boost::python::object& right);
2119
2120 /**
2121 \brief
2122 Operator*
2123 Takes LHS Data object and RHS python::object.
2124 python::object must be convertable to Data type.
2125 */
2126 ESCRIPT_DLL_API Data operator*(const Data& left, const boost::python::object& right);
2127
2128 /**
2129 \brief
2130 Operator/
2131 Takes LHS Data object and RHS python::object.
2132 python::object must be convertable to Data type.
2133 */
2134 ESCRIPT_DLL_API Data operator/(const Data& left, const boost::python::object& right);
2135
2136 /**
2137 \brief
2138 Operator+
2139 Takes LHS python::object and RHS Data object.
2140 python::object must be convertable to Data type.
2141 */
2142 ESCRIPT_DLL_API Data operator+(const boost::python::object& left, const Data& right);
2143
2144 /**
2145 \brief
2146 Operator-
2147 Takes LHS python::object and RHS Data object.
2148 python::object must be convertable to Data type.
2149 */
2150 ESCRIPT_DLL_API Data operator-(const boost::python::object& left, const Data& right);
2151
2152 /**
2153 \brief
2154 Operator*
2155 Takes LHS python::object and RHS Data object.
2156 python::object must be convertable to Data type.
2157 */
2158 ESCRIPT_DLL_API Data operator*(const boost::python::object& left, const Data& right);
2159
2160 /**
2161 \brief
2162 Operator/
2163 Takes LHS python::object and RHS Data object.
2164 python::object must be convertable to Data type.
2165 */
2166 ESCRIPT_DLL_API Data operator/(const boost::python::object& left, const Data& right);
2167
2168
2169
2170 /**
2171 \brief
2172 Output operator
2173 */
2174 ESCRIPT_DLL_API std::ostream& operator<<(std::ostream& o, const Data& data);
2175
2176 /**
2177 \brief
2178 Compute a tensor product of two Data objects
2179 \param arg_0 - Input - Data object
2180 \param arg_1 - Input - Data object
2181 \param axis_offset - Input - axis offset
2182 \param transpose - Input - 0: transpose neither, 1: transpose arg0, 2: transpose arg1
2183 */
2184 ESCRIPT_DLL_API
2185 Data
2186 C_GeneralTensorProduct(Data& arg_0,
2187 Data& arg_1,
2188 int axis_offset=0,
2189 int transpose=0);
2190
2191 /**
2192 \brief
2193 Operator/
2194 Takes RHS Data object.
2195 */
2196 inline
2197 Data
2198 Data::truedivD(const Data& right)
2199 {
2200 return *this / right;
2201 }
2202
2203 /**
2204 \brief
2205 Operator/
2206 Takes RHS python::object.
2207 */
2208 inline
2209 Data
2210 Data::truedivO(const boost::python::object& right)
2211 {
2212 Data tmp(right, getFunctionSpace(), false);
2213 return truedivD(tmp);
2214 }
2215
2216 /**
2217 \brief
2218 Operator/
2219 Takes LHS python::object.
2220 */
2221 inline
2222 Data
2223 Data::rtruedivO(const boost::python::object& left)
2224 {
2225 Data tmp(left, getFunctionSpace(), false);
2226 return tmp.truedivD(*this);
2227 }
2228
2229 /**
2230 \brief
2231 Perform the given binary operation with this and right as operands.
2232 Right is a Data object.
2233 */
2234 template <class BinaryFunction>
2235 inline
2236 void
2237 Data::binaryOp(const Data& right,
2238 BinaryFunction operation)
2239 {
2240 //
2241 // if this has a rank of zero promote it to the rank of the RHS
2242 if (getDataPointRank()==0 && right.getDataPointRank()!=0) {
2243 throw DataException("Error - attempt to update rank zero object with object with rank bigger than zero.");
2244 }
2245
2246 if (isLazy() || right.isLazy())
2247 {
2248 throw DataException("Programmer error - attempt to call binaryOp with Lazy Data.");
2249 }
2250 //
2251 // initially make the temporary a shallow copy
2252 Data tempRight(right);
2253 FunctionSpace fsl=getFunctionSpace();
2254 FunctionSpace fsr=right.getFunctionSpace();
2255 if (fsl!=fsr) {
2256 signed char intres=fsl.getDomain()->preferredInterpolationOnDomain(fsr.getTypeCode(), fsl.getTypeCode());
2257 if (intres==0)
2258 {
2259 std::string msg="Error - attempt to combine incompatible FunctionSpaces.";
2260 msg+=fsl.toString();
2261 msg+=" ";
2262 msg+=fsr.toString();
2263 throw DataException(msg.c_str());
2264 }
2265 else if (intres==1)
2266 {
2267 // an interpolation is required so create a new Data
2268 tempRight=Data(right,fsl);
2269 }
2270 else // reverse interpolation preferred
2271 {
2272 // interpolate onto the RHS function space
2273 Data tempLeft(*this,fsr);
2274 set_m_data(tempLeft.m_data);
2275 }
2276 }
2277 operandCheck(tempRight);
2278 //
2279 // ensure this has the right type for the RHS
2280 typeMatchRight(tempRight);
2281 //
2282 // Need to cast to the concrete types so that the correct binaryOp
2283 // is called.
2284 if (isExpanded()) {
2285 //
2286 // Expanded data will be done in parallel, the right hand side can be
2287 // of any data type
2288 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2289 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2290 escript::binaryOp(*leftC,*(tempRight.getReady()),operation);
2291 } else if (isTagged()) {
2292 //
2293 // Tagged data is operated on serially, the right hand side can be
2294 // either DataConstant or DataTagged
2295 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2296 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2297 if (right.isTagged()) {
2298 DataTagged* rightC=dynamic_cast<DataTagged*>(tempRight.m_data.get());
2299 EsysAssert((rightC!=0), "Programming error - casting to DataTagged.");
2300 escript::binaryOp(*leftC,*rightC,operation);
2301 } else {
2302 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2303 EsysAssert((rightC!=0), "Programming error - casting to DataConstant.");
2304 escript::binaryOp(*leftC,*rightC,operation);
2305 }
2306 } else if (isConstant()) {
2307 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2308 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2309 EsysAssert((leftC!=0 && rightC!=0), "Programming error - casting to DataConstant.");
2310 escript::binaryOp(*leftC,*rightC,operation);
2311 }
2312 }
2313
2314 /**
2315 \brief
2316 Perform the given Data object reduction algorithm on this and return the result.
2317 Given operation combines each element of each data point, thus argument
2318 object (*this) is a rank n Data object, and returned object is a scalar.
2319 Calls escript::algorithm.
2320 */
2321 template <class BinaryFunction>
2322 inline
2323 double
2324 Data::algorithm(BinaryFunction operation, double initial_value) const
2325 {
2326 if (isExpanded()) {
2327 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2328 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2329 return escript::algorithm(*leftC,operation,initial_value);
2330 } else if (isTagged()) {
2331 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2332 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2333 return escript::algorithm(*leftC,operation,initial_value);
2334 } else if (isConstant()) {
2335 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2336 EsysAssert((leftC!=0), "Programming error - casting to DataConstant.");
2337 return escript::algorithm(*leftC,operation,initial_value);
2338 } else if (isEmpty()) {
2339 throw DataException("Error - Operations (algorithm) not permitted on instances of DataEmpty.");
2340 } else if (isLazy()) {
2341 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2342 } else {
2343 throw DataException("Error - Data encapsulates an unknown type.");
2344 }
2345 }
2346
2347 /**
2348 \brief
2349 Perform the given data point reduction algorithm on data and return the result.
2350 Given operation combines each element within each data point into a scalar,
2351 thus argument object is a rank n Data object, and returned object is a
2352 rank 0 Data object.
2353 Calls escript::dp_algorithm.
2354 */
2355 template <class BinaryFunction>
2356 inline
2357 Data
2358 Data::dp_algorithm(BinaryFunction operation, double initial_value) const
2359 {
2360 if (isEmpty()) {
2361 throw DataException("Error - Operations (dp_algorithm) not permitted on instances of DataEmpty.");
2362 }
2363 else if (isExpanded()) {
2364 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2365 DataExpanded* dataE=dynamic_cast<DataExpanded*>(m_data.get());
2366 DataExpanded* resultE=dynamic_cast<DataExpanded*>(result.m_data.get());
2367 EsysAssert((dataE!=0), "Programming error - casting data to DataExpanded.");
2368 EsysAssert((resultE!=0), "Programming error - casting result to DataExpanded.");
2369 escript::dp_algorithm(*dataE,*resultE,operation,initial_value);
2370 return result;
2371 }
2372 else if (isTagged()) {
2373 DataTagged* dataT=dynamic_cast<DataTagged*>(m_data.get());
2374 EsysAssert((dataT!=0), "Programming error - casting data to DataTagged.");
2375 DataTypes::FloatVectorType defval(1);
2376 defval[0]=0;
2377 DataTagged* resultT=new DataTagged(getFunctionSpace(), DataTypes::scalarShape, defval, dataT);
2378 escript::dp_algorithm(*dataT,*resultT,operation,initial_value);
2379 return Data(resultT); // note: the Data object now owns the resultT pointer
2380 }
2381 else if (isConstant()) {
2382 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2383 DataConstant* dataC=dynamic_cast<DataConstant*>(m_data.get());
2384 DataConstant* resultC=dynamic_cast<DataConstant*>(result.m_data.get());
2385 EsysAssert((dataC!=0), "Programming error - casting data to DataConstant.");
2386 EsysAssert((resultC!=0), "Programming error - casting result to DataConstant.");
2387 escript::dp_algorithm(*dataC,*resultC,operation,initial_value);
2388 return result;
2389 } else if (isLazy()) {
2390 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2391 } else {
2392 throw DataException("Error - Data encapsulates an unknown type.");
2393 }
2394 }
2395
2396 /**
2397 \brief
2398 Compute a tensor operation with two Data objects
2399 \param arg_0 - Input - Data object
2400 \param arg_1 - Input - Data object
2401 \param operation - Input - Binary op functor
2402 */
2403 template <typename BinaryFunction>
2404 inline
2405 Data
2406 C_TensorBinaryOperation(Data const &arg_0,
2407 Data const &arg_1,
2408 BinaryFunction operation)
2409 {
2410 if (arg_0.isEmpty() || arg_1.isEmpty())
2411 {
2412 throw DataException("Error - Operations (C_TensorBinaryOperation) not permitted on instances of DataEmpty.");
2413 }
2414 if (arg_0.isLazy() || arg_1.isLazy())
2415 {
2416 throw DataException("Error - Operations not permitted on lazy data.");
2417 }
2418 // Interpolate if necessary and find an appropriate function space
2419 Data arg_0_Z, arg_1_Z;
2420 FunctionSpace fsl=arg_0.getFunctionSpace();
2421 FunctionSpace fsr=arg_1.getFunctionSpace();
2422 if (fsl!=fsr) {
2423 signed char intres=fsl.getDomain()->preferredInterpolationOnDomain(fsr.getTypeCode(), fsl.getTypeCode());
2424 if (intres==0)
2425 {
2426 std::string msg="Error - C_TensorBinaryOperation: arguments have incompatible function spaces.";
2427 msg+=fsl.toString();
2428 msg+=" ";
2429 msg+=fsr.toString();
2430 throw DataException(msg.c_str());
2431 }
2432 else if (intres==1)
2433 {
2434 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2435 arg_0_Z =Data(arg_0);
2436 }
2437 else // reverse interpolation preferred
2438 {
2439 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2440 arg_1_Z = Data(arg_1);
2441 }
2442 } else {
2443 arg_0_Z = Data(arg_0);
2444 arg_1_Z = Data(arg_1);
2445 }
2446 // Get rank and shape of inputs
2447 int rank0 = arg_0_Z.getDataPointRank();
2448 int rank1 = arg_1_Z.getDataPointRank();
2449 DataTypes::ShapeType shape0 = arg_0_Z.getDataPointShape();
2450 DataTypes::ShapeType shape1 = arg_1_Z.getDataPointShape();
2451 int size0 = arg_0_Z.getDataPointSize();
2452 int size1 = arg_1_Z.getDataPointSize();
2453 // Declare output Data object
2454 Data res;
2455
2456 if (shape0 == shape1) {
2457 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2458 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2459 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2460 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2461 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2462
2463 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2464 }
2465 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2466
2467 // Prepare the DataConstant input
2468 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2469
2470 // Borrow DataTagged input from Data object
2471 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2472
2473 // Prepare a DataTagged output 2
2474 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2475 res.tag();
2476 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2477
2478 // Prepare offset into DataConstant
2479 int offset_0 = tmp_0->getPointOffset(0,0);
2480 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2481
2482 // Get the pointers to the actual data
2483 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2484 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2485
2486 // Compute a result for the default
2487 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2488 // Compute a result for each tag
2489 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2490 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2491 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2492 tmp_2->addTag(i->first);
2493 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2494 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2495
2496 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2497 }
2498
2499 }
2500 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2501 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2502 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2503 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2504 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2505
2506 int sampleNo_1,dataPointNo_1;
2507 int numSamples_1 = arg_1_Z.getNumSamples();
2508 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2509 int offset_0 = tmp_0->getPointOffset(0,0);
2510 res.requireWrite();
2511 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2512 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2513 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2514 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2515 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2516 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2517 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2518 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2519 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2520 }
2521 }
2522
2523 }
2524 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2525 // Borrow DataTagged input from Data object
2526 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2527
2528 // Prepare the DataConstant input
2529 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2530
2531 // Prepare a DataTagged output 2
2532 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2533 res.tag();
2534 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2535
2536 // Prepare offset into DataConstant
2537 int offset_1 = tmp_1->getPointOffset(0,0);
2538
2539 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2540 // Get the pointers to the actual data
2541 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2542 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2543 // Compute a result for the default
2544 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2545 // Compute a result for each tag
2546 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2547 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2548 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2549 tmp_2->addTag(i->first);
2550 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2551 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2552 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2553 }
2554
2555 }
2556 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2557 // Borrow DataTagged input from Data object
2558 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2559
2560 // Borrow DataTagged input from Data object
2561 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2562
2563 // Prepare a DataTagged output 2
2564 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2565 res.tag(); // DataTagged output
2566 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2567
2568 // Get the pointers to the actual data
2569 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2570 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2571 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2572
2573 // Compute a result for the default
2574 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2575 // Merge the tags
2576 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2577 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2578 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2579 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2580 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2581 }
2582 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2583 tmp_2->addTag(i->first);
2584 }
2585 // Compute a result for each tag
2586 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2587 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2588
2589 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2590 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2591 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2592
2593 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2594 }
2595
2596 }
2597 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2598 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2599 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2600 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2601 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2602 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2603
2604 int sampleNo_0,dataPointNo_0;
2605 int numSamples_0 = arg_0_Z.getNumSamples();
2606 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2607 res.requireWrite();
2608 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2609 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2610 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2611 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2612 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2613 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2614 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2615 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2616 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2617 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2618 }
2619 }
2620
2621 }
2622 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2623 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2624 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2625 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2626 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2627
2628 int sampleNo_0,dataPointNo_0;
2629 int numSamples_0 = arg_0_Z.getNumSamples();
2630 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2631 int offset_1 = tmp_1->getPointOffset(0,0);
2632 res.requireWrite();
2633 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2634 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2635 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2636 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2637 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2638
2639 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2640 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2641 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2642
2643
2644 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2645 }
2646 }
2647
2648 }
2649 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2650 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2651 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2652 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2653 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2654 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2655
2656 int sampleNo_0,dataPointNo_0;
2657 int numSamples_0 = arg_0_Z.getNumSamples();
2658 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2659 res.requireWrite();
2660 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2661 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2662 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2663 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2664 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2665 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2666 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2667 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2668 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2669 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2670 }
2671 }
2672
2673 }
2674 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2675 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2676 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2677 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2678 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2679 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2680
2681 int sampleNo_0,dataPointNo_0;
2682 int numSamples_0 = arg_0_Z.getNumSamples();
2683 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2684 res.requireWrite();
2685 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2686 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2687 dataPointNo_0=0;
2688 // for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2689 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2690 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2691 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2692 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2693 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2694 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2695 tensor_binary_operation(size0*numDataPointsPerSample_0, ptr_0, ptr_1, ptr_2, operation);
2696 // }
2697 }
2698
2699 }
2700 else {
2701 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2702 }
2703
2704 } else if (0 == rank0) {
2705 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2706 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataConstant output
2707 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2708 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2709 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2710 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2711 }
2712 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2713
2714 // Prepare the DataConstant input
2715 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2716
2717 // Borrow DataTagged input from Data object
2718 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2719
2720 // Prepare a DataTagged output 2
2721 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataTagged output
2722 res.tag();
2723 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2724
2725 // Prepare offset into DataConstant
2726 int offset_0 = tmp_0->getPointOffset(0,0);
2727 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2728
2729 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2730 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2731
2732 // Compute a result for the default
2733 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2734 // Compute a result for each tag
2735 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2736 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2737 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2738 tmp_2->addTag(i->first);
2739 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2740 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2741 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2742 }
2743
2744 }
2745 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2746
2747 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2748 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2749 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2750 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2751
2752 int sampleNo_1;
2753 int numSamples_1 = arg_1_Z.getNumSamples();
2754 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2755 int offset_0 = tmp_0->getPointOffset(0,0);
2756 const double *ptr_src = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2757 double ptr_0 = ptr_src[0];
2758 int size = size1*numDataPointsPerSample_1;
2759 res.requireWrite();
2760 #pragma omp parallel for private(sampleNo_1) schedule(static)
2761 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2762 // for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2763 int offset_1 = tmp_1->getPointOffset(sampleNo_1,0);
2764 int offset_2 = tmp_2->getPointOffset(sampleNo_1,0);
2765 // const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2766 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2767 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2768 tensor_binary_operation(size, ptr_0, ptr_1, ptr_2, operation);
2769
2770 // }
2771 }
2772
2773 }
2774 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2775
2776 // Borrow DataTagged input from Data object
2777 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2778
2779 // Prepare the DataConstant input
2780 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2781
2782 // Prepare a DataTagged output 2
2783 res = Data(0.0, shape1, arg_0_Z.getFunctionSpace()); // DataTagged output
2784 res.tag();
2785 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2786
2787 // Prepare offset into DataConstant
2788 int offset_1 = tmp_1->getPointOffset(0,0);
2789 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2790
2791 // Get the pointers to the actual data
2792 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2793 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2794
2795
2796 // Compute a result for the default
2797 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2798 // Compute a result for each tag
2799 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2800 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2801 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2802 tmp_2->addTag(i->first);
2803 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2804 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2805
2806 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2807 }
2808
2809 }
2810 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2811
2812 // Borrow DataTagged input from Data object
2813 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2814
2815 // Borrow DataTagged input from Data object
2816 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2817
2818 // Prepare a DataTagged output 2
2819 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());
2820 res.tag(); // DataTagged output
2821 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2822
2823 // Get the pointers to the actual data
2824 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2825 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2826 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2827
2828 // Compute a result for the default
2829 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2830 // Merge the tags
2831 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2832 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2833 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2834 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2835 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2836 }
2837 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2838 tmp_2->addTag(i->first);
2839 }
2840 // Compute a result for each tag
2841 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2842 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2843 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2844 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2845 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2846
2847 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2848 }
2849
2850 }
2851 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2852
2853 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2854 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2855 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2856 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2857 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2858
2859 int sampleNo_0,dataPointNo_0;
2860 int numSamples_0 = arg_0_Z.getNumSamples();
2861 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2862 res.requireWrite();
2863 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2864 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2865 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2866 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2867 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2868 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2869 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2870 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2871 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2872 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2873 }
2874 }
2875
2876 }
2877 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2878 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2879 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2880 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2881 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2882
2883 int sampleNo_0,dataPointNo_0;
2884 int numSamples_0 = arg_0_Z.getNumSamples();
2885 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2886 int offset_1 = tmp_1->getPointOffset(0,0);
2887 res.requireWrite();
2888 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2889 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2890 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2891 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2892 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2893 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2894 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2895 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2896 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2897 }
2898 }
2899
2900
2901 }
2902 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2903
2904 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2905 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2906 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2907 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2908 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2909
2910 int sampleNo_0,dataPointNo_0;
2911 int numSamples_0 = arg_0_Z.getNumSamples();
2912 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2913 res.requireWrite();
2914 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2915 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2916 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2917 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2918 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2919 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2920 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2921 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2922 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2923 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2924 }
2925 }
2926
2927 }
2928 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2929
2930 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2931 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2932 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2933 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2934 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2935
2936 int sampleNo_0,dataPointNo_0;
2937 int numSamples_0 = arg_0_Z.getNumSamples();
2938 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2939 res.requireWrite();
2940 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2941 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2942 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2943 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2944 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2945 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2946 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2947 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2948 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2949 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2950 }
2951 }
2952
2953 }
2954 else {
2955 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2956 }
2957
2958 } else if (0 == rank1) {
2959 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2960 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2961 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2962 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2963 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2964 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2965 }
2966 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2967
2968 // Prepare the DataConstant input
2969 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2970
2971 // Borrow DataTagged input from Data object
2972 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2973
2974 // Prepare a DataTagged output 2
2975 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2976 res.tag();
2977 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2978
2979 // Prepare offset into DataConstant
2980 int offset_0 = tmp_0->getPointOffset(0,0);
2981 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2982
2983 //Get the pointers to the actual data
2984 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2985 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2986
2987 // Compute a result for the default
2988 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2989 // Compute a result for each tag
2990 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2991 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2992 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2993 tmp_2->addTag(i->first);
2994 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2995 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2996 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2997 }
2998 }
2999 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
3000
3001 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3002 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
3003 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
3004 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3005
3006 int sampleNo_1,dataPointNo_1;
3007 int numSamples_1 = arg_1_Z.getNumSamples();
3008 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
3009 int offset_0 = tmp_0->getPointOffset(0,0);
3010 res.requireWrite();
3011 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
3012 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
3013 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
3014 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
3015 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
3016 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3017 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3018 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3019 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3020 }
3021 }
3022
3023 }
3024 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
3025
3026 // Borrow DataTagged input from Data object
3027 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
3028
3029 // Prepare the DataConstant input
3030 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
3031
3032 // Prepare a DataTagged output 2
3033 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
3034 res.tag();
3035 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
3036
3037 // Prepare offset into DataConstant
3038 int offset_1 = tmp_1->getPointOffset(0,0);
3039 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3040 // Get the pointers to the actual data
3041 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
3042 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
3043 // Compute a result for the default
3044 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3045 // Compute a result for each tag
3046 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
3047 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
3048 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
3049 tmp_2->addTag(i->first);
3050 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
3051 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
3052 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3053 }
3054
3055 }
3056 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
3057
3058 // Borrow DataTagged input from Data object
3059 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
3060
3061 // Borrow DataTagged input from Data object
3062 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
3063
3064 // Prepare a DataTagged output 2
3065 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
3066 res.tag(); // DataTagged output
3067 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
3068
3069 // Get the pointers to the actual data
3070 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
3071 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
3072 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
3073
3074 // Compute a result for the default
3075 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3076 // Merge the tags
3077 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
3078 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
3079 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
3080 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
3081 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
3082 }
3083 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
3084 tmp_2->addTag(i->first);
3085 }
3086 // Compute a result for each tag
3087 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
3088 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
3089 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
3090 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
3091 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
3092 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3093 }
3094
3095 }
3096 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
3097
3098 // After finding a common function space above the two inputs have the same numSamples and num DPPS
3099 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3100 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
3101 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
3102 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3103
3104 int sampleNo_0,dataPointNo_0;
3105 int numSamples_0 = arg_0_Z.getNumSamples();
3106 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3107 res.requireWrite();
3108 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3109 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3110 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
3111 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3112 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3113 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
3114 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3115 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3116 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3117 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3118 }
3119 }
3120
3121 }
3122 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
3123 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3124 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3125 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
3126 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3127
3128 int sampleNo_0;
3129 int numSamples_0 = arg_0_Z.getNumSamples();
3130 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3131 int offset_1 = tmp_1->getPointOffset(0,0);
3132 const double *ptr_src = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3133 double ptr_1 = ptr_src[0];
3134 int size = size0 * numDataPointsPerSample_0;
3135 res.requireWrite();
3136 #pragma omp parallel for private(sampleNo_0) schedule(static)
3137 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3138 // for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3139 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0);
3140 int offset_2 = tmp_2->getPointOffset(sampleNo_0,0);
3141 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3142 // const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3143 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3144 tensor_binary_operation(size, ptr_0, ptr_1, ptr_2, operation);
3145 // }
3146 }
3147
3148
3149 }
3150 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
3151
3152 // After finding a common function space above the two inputs have the same numSamples and num DPPS
3153 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3154 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3155 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
3156 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3157
3158 int sampleNo_0,dataPointNo_0;
3159 int numSamples_0 = arg_0_Z.getNumSamples();
3160 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3161 res.requireWrite();
3162 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3163 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3164 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
3165 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3166 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3167 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
3168 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3169 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3170 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3171 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3172 }
3173 }
3174
3175 }
3176 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
3177
3178 // After finding a common function space above the two inputs have the same numSamples and num DPPS
3179 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3180 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3181 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
3182 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3183
3184 int sampleNo_0,dataPointNo_0;
3185 int numSamples_0 = arg_0_Z.getNumSamples();
3186 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3187 res.requireWrite();
3188 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3189 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3190 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3191 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
3192 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
3193 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3194 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3195 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3196 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3197 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3198 }
3199 }
3200
3201 }
3202 else {
3203 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
3204 }
3205
3206 } else {
3207 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible shapes");
3208 }
3209
3210 return res;
3211 }
3212
3213 template <typename UnaryFunction>
3214 Data
3215 C_TensorUnaryOperation(Data const &arg_0,
3216 UnaryFunction operation)
3217 {
3218 if (arg_0.isEmpty()) // do this before we attempt to interpolate
3219 {
3220 throw DataException("Error - Operations (C_TensorUnaryOperation) not permitted on instances of DataEmpty.");
3221 }
3222 if (arg_0.isLazy())
3223 {
3224 throw DataException("Error - Operations not permitted on lazy data.");
3225 }
3226 // Interpolate if necessary and find an appropriate function space
3227 Data arg_0_Z = Data(arg_0);
3228
3229 // Get rank and shape of inputs
3230 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
3231 int size0 = arg_0_Z.getDataPointSize();
3232
3233 // Declare output Data object
3234 Data res;
3235
3236 if (arg_0_Z.isConstant()) {
3237 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataConstant output
3238 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
3239 double *ptr_2 = &(res.getDataAtOffsetRW(0));
3240 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
3241 }
3242 else if (arg_0_Z.isTagged()) {
3243
3244 // Borrow DataTagged input from Data object
3245 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
3246
3247 // Prepare a DataTagged output 2
3248 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
3249 res.tag();
3250 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
3251
3252 // Get the pointers to the actual data
3253 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
3254 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
3255 // Compute a result for the default
3256 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
3257 // Compute a result for each tag
3258 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
3259 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
3260 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
3261 tmp_2->addTag(i->first);
3262 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
3263 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
3264 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
3265 }
3266
3267 }
3268 else if (arg_0_Z.isExpanded()) {
3269
3270 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace(),true); // DataExpanded output
3271 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3272 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3273
3274 int sampleNo_0,dataPointNo_0;
3275 int numSamples_0 = arg_0_Z.getNumSamples();
3276 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3277 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3278 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3279 dataPointNo_0=0;
3280 // for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3281 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
3282 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3283 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3284 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3285 tensor_unary_operation(size0*numDataPointsPerSample_0, ptr_0, ptr_2, operation);
3286 // }
3287 }
3288 }
3289 else {
3290 throw DataException("Error - C_TensorUnaryOperation: unknown combination of inputs");
3291 }
3292
3293 return res;
3294 }
3295
3296 }
3297 #endif

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26