/[escript]/trunk/escript/src/Data.h
ViewVC logotype

Contents of /trunk/escript/src/Data.h

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2628 - (show annotations)
Tue Aug 25 03:50:00 2009 UTC (10 years, 6 months ago) by jfenwick
File MIME type: text/plain
File size: 91880 byte(s)
Fixed linkage on getTestDomainFunctionSpace.

added Data.interpolateTable to perform 2D interpolation.
It needs better unit tests though.
1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2009 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 /** \file Data.h */
16
17 #ifndef DATA_H
18 #define DATA_H
19 #include "system_dep.h"
20
21 #include "DataTypes.h"
22 #include "DataAbstract.h"
23 #include "DataAlgorithm.h"
24 #include "FunctionSpace.h"
25 #include "BinaryOp.h"
26 #include "UnaryOp.h"
27 #include "DataException.h"
28
29
30 extern "C" {
31 #include "DataC.h"
32 //#include <omp.h>
33 }
34
35 #include "esysmpi.h"
36 #include <string>
37 #include <algorithm>
38 #include <sstream>
39
40 #include <boost/shared_ptr.hpp>
41 #include <boost/python/object.hpp>
42 #include <boost/python/tuple.hpp>
43
44 #include "BufferGroup.h"
45
46 namespace escript {
47
48 //
49 // Forward declaration for various implementations of Data.
50 class DataConstant;
51 class DataTagged;
52 class DataExpanded;
53 class DataLazy;
54
55 /**
56 \brief
57 Data represents a collection of datapoints.
58
59 Description:
60 Internally, the datapoints are actually stored by a DataAbstract object.
61 The specific instance of DataAbstract used may vary over the lifetime
62 of the Data object.
63 Some methods on this class return references (eg getShape()).
64 These references should not be used after an operation which changes the underlying DataAbstract object.
65 Doing so will lead to invalid memory access.
66 This should not affect any methods exposed via boost::python.
67 */
68 class Data {
69
70 public:
71
72 // These typedefs allow function names to be cast to pointers
73 // to functions of the appropriate type when calling unaryOp etc.
74 typedef double (*UnaryDFunPtr)(double);
75 typedef double (*BinaryDFunPtr)(double,double);
76
77
78 /**
79 Constructors.
80 */
81
82 /**
83 \brief
84 Default constructor.
85 Creates a DataEmpty object.
86 */
87 ESCRIPT_DLL_API
88 Data();
89
90 /**
91 \brief
92 Copy constructor.
93 WARNING: Only performs a shallow copy.
94 */
95 ESCRIPT_DLL_API
96 Data(const Data& inData);
97
98 /**
99 \brief
100 Constructor from another Data object. If "what" is different from the
101 function space of inData the inData are tried to be interpolated to what,
102 otherwise a shallow copy of inData is returned.
103 */
104 ESCRIPT_DLL_API
105 Data(const Data& inData,
106 const FunctionSpace& what);
107
108 /**
109 \brief Copy Data from an existing vector
110 */
111
112 ESCRIPT_DLL_API
113 Data(const DataTypes::ValueType& value,
114 const DataTypes::ShapeType& shape,
115 const FunctionSpace& what=FunctionSpace(),
116 bool expanded=false);
117
118 /**
119 \brief
120 Constructor which creates a Data with points having the specified shape.
121
122 \param value - Input - Single value applied to all Data.
123 \param dataPointShape - Input - The shape of each data point.
124 \param what - Input - A description of what this data represents.
125 \param expanded - Input - Flag, if true fill the entire container with
126 the given value. Otherwise a more efficient storage
127 mechanism will be used.
128 */
129 ESCRIPT_DLL_API
130 Data(double value,
131 const DataTypes::ShapeType& dataPointShape=DataTypes::ShapeType(),
132 const FunctionSpace& what=FunctionSpace(),
133 bool expanded=false);
134
135 /**
136 \brief
137 Constructor which performs a deep copy of a region from another Data object.
138
139 \param inData - Input - Input Data object.
140 \param region - Input - Region to copy.
141 */
142 ESCRIPT_DLL_API
143 Data(const Data& inData,
144 const DataTypes::RegionType& region);
145
146 /**
147 \brief
148 Constructor which copies data from any object that can be treated like a python array/sequence.
149
150 \param value - Input - Input data.
151 \param what - Input - A description of what this data represents.
152 \param expanded - Input - Flag, if true fill the entire container with
153 the value. Otherwise a more efficient storage
154 mechanism will be used.
155 */
156 ESCRIPT_DLL_API
157 Data(const boost::python::object& value,
158 const FunctionSpace& what=FunctionSpace(),
159 bool expanded=false);
160
161 /**
162 \brief
163 Constructor which creates a DataConstant.
164 Copies data from any object that can be treated like a python array/sequence.
165 All other parameters are copied from other.
166
167 \param value - Input - Input data.
168 \param other - Input - contains all other parameters.
169 */
170 ESCRIPT_DLL_API
171 Data(const boost::python::object& value,
172 const Data& other);
173
174 /**
175 \brief
176 Constructor which creates a DataConstant of "shape" with constant value.
177 */
178 ESCRIPT_DLL_API
179 Data(double value,
180 const boost::python::tuple& shape=boost::python::make_tuple(),
181 const FunctionSpace& what=FunctionSpace(),
182 bool expanded=false);
183
184
185
186 /**
187 \brief Create a Data using an existing DataAbstract. Warning: The new object assumes ownership of the pointer!
188 Once you have passed the pointer, do not delete it.
189 */
190 ESCRIPT_DLL_API
191 explicit Data(DataAbstract* underlyingdata);
192
193 /**
194 \brief Create a Data based on the supplied DataAbstract
195 */
196 ESCRIPT_DLL_API
197 explicit Data(DataAbstract_ptr underlyingdata);
198
199 /**
200 \brief
201 Destructor
202 */
203 ESCRIPT_DLL_API
204 ~Data();
205
206 /**
207 \brief Make this object a deep copy of "other".
208 */
209 ESCRIPT_DLL_API
210 void
211 copy(const Data& other);
212
213 /**
214 \brief Return a pointer to a deep copy of this object.
215 */
216 ESCRIPT_DLL_API
217 Data
218 copySelf();
219
220
221 /**
222 \brief produce a delayed evaluation version of this Data.
223 */
224 ESCRIPT_DLL_API
225 Data
226 delay();
227
228 /**
229 \brief convert the current data into lazy data.
230 */
231 ESCRIPT_DLL_API
232 void
233 delaySelf();
234
235
236 /**
237 Member access methods.
238 */
239
240 /**
241 \brief
242 switches on update protection
243
244 */
245 ESCRIPT_DLL_API
246 void
247 setProtection();
248
249 /**
250 \brief
251 Returns true, if the data object is protected against update
252
253 */
254 ESCRIPT_DLL_API
255 bool
256 isProtected() const;
257
258
259 /**
260 \brief
261 Return the value of a data point as a python tuple.
262 */
263 ESCRIPT_DLL_API
264 const boost::python::object
265 getValueOfDataPointAsTuple(int dataPointNo);
266
267 /**
268 \brief
269 sets the values of a data-point from a python object on this process
270 */
271 ESCRIPT_DLL_API
272 void
273 setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object);
274
275 /**
276 \brief
277 sets the values of a data-point from a array-like object on this process
278 */
279 ESCRIPT_DLL_API
280 void
281 setValueOfDataPointToArray(int dataPointNo, const boost::python::object&);
282
283 /**
284 \brief
285 sets the values of a data-point on this process
286 */
287 ESCRIPT_DLL_API
288 void
289 setValueOfDataPoint(int dataPointNo, const double);
290
291 /**
292 \brief Return a data point across all processors as a python tuple.
293 */
294 ESCRIPT_DLL_API
295 const boost::python::object
296 getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo);
297
298 /**
299 \brief
300 Return the tag number associated with the given data-point.
301
302 */
303 ESCRIPT_DLL_API
304 int
305 getTagNumber(int dpno);
306
307 /**
308 \brief
309 Return the C wrapper for the Data object.
310 */
311 ESCRIPT_DLL_API
312 escriptDataC
313 getDataC();
314
315
316
317 /**
318 \brief
319 Return the C wrapper for the Data object - const version.
320 */
321 ESCRIPT_DLL_API
322 escriptDataC
323 getDataC() const;
324
325 /**
326 \brief How much space is required to evaulate a sample of the Data.
327 */
328 ESCRIPT_DLL_API
329 size_t
330 getSampleBufferSize() const;
331
332
333
334 /**
335 \brief
336 Write the data as a string. For large amounts of data, a summary is printed.
337 */
338 ESCRIPT_DLL_API
339 std::string
340 toString() const;
341
342 /**
343 \brief
344 Whatever the current Data type make this into a DataExpanded.
345 */
346 ESCRIPT_DLL_API
347 void
348 expand();
349
350 /**
351 \brief
352 If possible convert this Data to DataTagged. This will only allow
353 Constant data to be converted to tagged. An attempt to convert
354 Expanded data to tagged will throw an exception.
355 */
356 ESCRIPT_DLL_API
357 void
358 tag();
359
360 /**
361 \brief If this data is lazy, then convert it to ready data.
362 What type of ready data depends on the expression. For example, Constant+Tagged==Tagged.
363 */
364 ESCRIPT_DLL_API
365 void
366 resolve();
367
368
369 /**
370 \brief Ensures data is ready for write access.
371 This means that the data will be resolved if lazy and will be copied if shared with another Data object.
372 \warning This method should only be called in single threaded sections of code. (It modifies m_data).
373 Do not create any Data objects from this one between calling requireWrite and getSampleDataRW.
374 Doing so might introduce additional sharing.
375 */
376 ESCRIPT_DLL_API
377 void
378 requireWrite();
379
380 /**
381 \brief
382 Return true if this Data is expanded.
383 \note To determine if a sample will contain separate values for each datapoint. Use actsExpanded instead.
384 */
385 ESCRIPT_DLL_API
386 bool
387 isExpanded() const;
388
389 /**
390 \brief
391 Return true if this Data is expanded or resolves to expanded.
392 That is, if it has a separate value for each datapoint in the sample.
393 */
394 ESCRIPT_DLL_API
395 bool
396 actsExpanded() const;
397
398
399 /**
400 \brief
401 Return true if this Data is tagged.
402 */
403 ESCRIPT_DLL_API
404 bool
405 isTagged() const;
406
407 /**
408 \brief
409 Return true if this Data is constant.
410 */
411 ESCRIPT_DLL_API
412 bool
413 isConstant() const;
414
415 /**
416 \brief Return true if this Data is lazy.
417 */
418 ESCRIPT_DLL_API
419 bool
420 isLazy() const;
421
422 /**
423 \brief Return true if this data is ready.
424 */
425 ESCRIPT_DLL_API
426 bool
427 isReady() const;
428
429 /**
430 \brief
431 Return true if this Data holds an instance of DataEmpty. This is _not_ the same as asking if the object
432 contains datapoints.
433 */
434 ESCRIPT_DLL_API
435 bool
436 isEmpty() const;
437
438 /**
439 \brief
440 Return the function space.
441 */
442 ESCRIPT_DLL_API
443 inline
444 const FunctionSpace&
445 getFunctionSpace() const
446 {
447 return m_data->getFunctionSpace();
448 }
449
450 /**
451 \brief
452 Return a copy of the function space.
453 */
454 ESCRIPT_DLL_API
455 const FunctionSpace
456 getCopyOfFunctionSpace() const;
457
458 /**
459 \brief
460 Return the domain.
461 */
462 ESCRIPT_DLL_API
463 inline
464 // const AbstractDomain&
465 const_Domain_ptr
466 getDomain() const
467 {
468 return getFunctionSpace().getDomain();
469 }
470
471
472 /**
473 \brief
474 Return the domain.
475 TODO: For internal use only. This should be removed.
476 */
477 ESCRIPT_DLL_API
478 inline
479 // const AbstractDomain&
480 Domain_ptr
481 getDomainPython() const
482 {
483 return getFunctionSpace().getDomainPython();
484 }
485
486 /**
487 \brief
488 Return a copy of the domain.
489 */
490 ESCRIPT_DLL_API
491 const AbstractDomain
492 getCopyOfDomain() const;
493
494 /**
495 \brief
496 Return the rank of the point data.
497 */
498 ESCRIPT_DLL_API
499 inline
500 unsigned int
501 getDataPointRank() const
502 {
503 return m_data->getRank();
504 }
505
506 /**
507 \brief
508 Return the number of data points
509 */
510 ESCRIPT_DLL_API
511 inline
512 int
513 getNumDataPoints() const
514 {
515 return getNumSamples() * getNumDataPointsPerSample();
516 }
517 /**
518 \brief
519 Return the number of samples.
520 */
521 ESCRIPT_DLL_API
522 inline
523 int
524 getNumSamples() const
525 {
526 return m_data->getNumSamples();
527 }
528
529 /**
530 \brief
531 Return the number of data points per sample.
532 */
533 ESCRIPT_DLL_API
534 inline
535 int
536 getNumDataPointsPerSample() const
537 {
538 return m_data->getNumDPPSample();
539 }
540
541
542 /**
543 \brief
544 Return the number of values in the shape for this object.
545 */
546 ESCRIPT_DLL_API
547 int
548 getNoValues() const
549 {
550 return m_data->getNoValues();
551 }
552
553
554 /**
555 \brief
556 dumps the object into a netCDF file
557 */
558 ESCRIPT_DLL_API
559 void
560 dump(const std::string fileName) const;
561
562 /**
563 \brief returns the values of the object as a list of tuples (one for each datapoint).
564
565 \param scalarastuple If true, scalar data will produce single valued tuples [(1,) (2,) ...]
566 If false, the result is a list of scalars [1, 2, ...]
567 */
568 ESCRIPT_DLL_API
569 const boost::python::object
570 toListOfTuples(bool scalarastuple=true);
571
572
573 /**
574 \brief
575 Return the sample data for the given sample no. This is not the
576 preferred interface but is provided for use by C code.
577 The bufferg parameter is only required for LazyData.
578 \param sampleNo - Input - the given sample no.
579 \param bufferg - A buffer to compute (and store) sample data in will be selected from this group.
580 \return pointer to the sample data.
581 */
582 ESCRIPT_DLL_API
583 inline
584 const DataAbstract::ValueType::value_type*
585 getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, BufferGroup* bufferg=0);
586
587
588 /**
589 \brief
590 Return the sample data for the given sample no. This is not the
591 preferred interface but is provided for use by C code.
592 \param sampleNo - Input - the given sample no.
593 \return pointer to the sample data.
594 */
595 ESCRIPT_DLL_API
596 inline
597 DataAbstract::ValueType::value_type*
598 getSampleDataRW(DataAbstract::ValueType::size_type sampleNo);
599
600
601 /**
602 \brief
603 Return the sample data for the given tag. If an attempt is made to
604 access data that isn't tagged an exception will be thrown.
605 \param tag - Input - the tag key.
606 */
607 ESCRIPT_DLL_API
608 inline
609 DataAbstract::ValueType::value_type*
610 getSampleDataByTag(int tag)
611 {
612 return m_data->getSampleDataByTag(tag);
613 }
614
615 /**
616 \brief
617 Return a reference into the DataVector which points to the specified data point.
618 \param sampleNo - Input -
619 \param dataPointNo - Input -
620 */
621 ESCRIPT_DLL_API
622 DataTypes::ValueType::const_reference
623 getDataPointRO(int sampleNo, int dataPointNo);
624
625 /**
626 \brief
627 Return a reference into the DataVector which points to the specified data point.
628 \param sampleNo - Input -
629 \param dataPointNo - Input -
630 */
631 ESCRIPT_DLL_API
632 DataTypes::ValueType::reference
633 getDataPointRW(int sampleNo, int dataPointNo);
634
635
636
637 /**
638 \brief
639 Return the offset for the given sample and point within the sample
640 */
641 ESCRIPT_DLL_API
642 inline
643 DataTypes::ValueType::size_type
644 getDataOffset(int sampleNo,
645 int dataPointNo)
646 {
647 return m_data->getPointOffset(sampleNo,dataPointNo);
648 }
649
650 /**
651 \brief
652 Return a reference to the data point shape.
653 */
654 ESCRIPT_DLL_API
655 inline
656 const DataTypes::ShapeType&
657 getDataPointShape() const
658 {
659 return m_data->getShape();
660 }
661
662 /**
663 \brief
664 Return the data point shape as a tuple of integers.
665 */
666 ESCRIPT_DLL_API
667 const boost::python::tuple
668 getShapeTuple() const;
669
670 /**
671 \brief
672 Return the size of the data point. It is the product of the
673 data point shape dimensions.
674 */
675 ESCRIPT_DLL_API
676 int
677 getDataPointSize() const;
678
679 /**
680 \brief
681 Return the number of doubles stored for this Data.
682 */
683 ESCRIPT_DLL_API
684 DataTypes::ValueType::size_type
685 getLength() const;
686
687
688
689 /**
690 \brief
691 Assign the given value to the tag assocciated with name. Implicitly converts this
692 object to type DataTagged. Throws an exception if this object
693 cannot be converted to a DataTagged object or name cannot be mapped onto a tag key.
694 \param name - Input - name of tag.
695 \param value - Input - Value to associate with given key.
696 */
697 ESCRIPT_DLL_API
698 void
699 setTaggedValueByName(std::string name,
700 const boost::python::object& value);
701
702 /**
703 \brief
704 Assign the given value to the tag. Implicitly converts this
705 object to type DataTagged if it is constant.
706
707 \param tagKey - Input - Integer key.
708 \param value - Input - Value to associate with given key.
709 ==>*
710 */
711 ESCRIPT_DLL_API
712 void
713 setTaggedValue(int tagKey,
714 const boost::python::object& value);
715
716 /**
717 \brief
718 Assign the given value to the tag. Implicitly converts this
719 object to type DataTagged if it is constant.
720
721 \param tagKey - Input - Integer key.
722 \param pointshape - Input - The shape of the value parameter
723 \param value - Input - Value to associate with given key.
724 \param dataOffset - Input - Offset of the begining of the point within the value parameter
725 */
726 ESCRIPT_DLL_API
727 void
728 setTaggedValueFromCPP(int tagKey,
729 const DataTypes::ShapeType& pointshape,
730 const DataTypes::ValueType& value,
731 int dataOffset=0);
732
733
734
735 /**
736 \brief
737 Copy other Data object into this Data object where mask is positive.
738 */
739 ESCRIPT_DLL_API
740 void
741 copyWithMask(const Data& other,
742 const Data& mask);
743
744 /**
745 Data object operation methods and operators.
746 */
747
748 /**
749 \brief
750 set all values to zero
751 *
752 */
753 ESCRIPT_DLL_API
754 void
755 setToZero();
756
757 /**
758 \brief
759 Interpolates this onto the given functionspace and returns
760 the result as a Data object.
761 *
762 */
763 ESCRIPT_DLL_API
764 Data
765 interpolate(const FunctionSpace& functionspace) const;
766
767
768 ESCRIPT_DLL_API
769 Data
770 interpolateFromTable2D(const WrappedArray& table, double Amin, double Astep,
771 double undef, Data& B, double Bmin, double Bstep);
772
773
774 // This signature needs to be tuned
775 ESCRIPT_DLL_API
776 Data
777 interpolateFromTable(boost::python::object table, double Amin, double Astep,
778 double undef, Data& B, double Bmin, double Bstep);
779
780 /**
781 \brief
782 Calculates the gradient of the data at the data points of functionspace.
783 If functionspace is not present the function space of Function(getDomain()) is used.
784 *
785 */
786 ESCRIPT_DLL_API
787 Data
788 gradOn(const FunctionSpace& functionspace) const;
789
790 ESCRIPT_DLL_API
791 Data
792 grad() const;
793
794 /**
795 \brief
796 Calculate the integral over the function space domain as a python tuple.
797 */
798 ESCRIPT_DLL_API
799 boost::python::object
800 integrateToTuple_const() const;
801
802
803 /**
804 \brief
805 Calculate the integral over the function space domain as a python tuple.
806 */
807 ESCRIPT_DLL_API
808 boost::python::object
809 integrateToTuple();
810
811
812
813 /**
814 \brief
815 Returns 1./ Data object
816 *
817 */
818 ESCRIPT_DLL_API
819 Data
820 oneOver() const;
821 /**
822 \brief
823 Return a Data with a 1 for +ive values and a 0 for 0 or -ive values.
824 *
825 */
826 ESCRIPT_DLL_API
827 Data
828 wherePositive() const;
829
830 /**
831 \brief
832 Return a Data with a 1 for -ive values and a 0 for +ive or 0 values.
833 *
834 */
835 ESCRIPT_DLL_API
836 Data
837 whereNegative() const;
838
839 /**
840 \brief
841 Return a Data with a 1 for +ive or 0 values and a 0 for -ive values.
842 *
843 */
844 ESCRIPT_DLL_API
845 Data
846 whereNonNegative() const;
847
848 /**
849 \brief
850 Return a Data with a 1 for -ive or 0 values and a 0 for +ive values.
851 *
852 */
853 ESCRIPT_DLL_API
854 Data
855 whereNonPositive() const;
856
857 /**
858 \brief
859 Return a Data with a 1 for 0 values and a 0 for +ive or -ive values.
860 *
861 */
862 ESCRIPT_DLL_API
863 Data
864 whereZero(double tol=0.0) const;
865
866 /**
867 \brief
868 Return a Data with a 0 for 0 values and a 1 for +ive or -ive values.
869 *
870 */
871 ESCRIPT_DLL_API
872 Data
873 whereNonZero(double tol=0.0) const;
874
875 /**
876 \brief
877 Return the maximum absolute value of this Data object.
878
879 The method is not const because lazy data needs to be expanded before Lsup can be computed.
880 The _const form can be used when the Data object is const, however this will only work for
881 Data which is not Lazy.
882
883 For Data which contain no samples (or tagged Data for which no tags in use have a value)
884 zero is returned.
885 */
886 ESCRIPT_DLL_API
887 double
888 Lsup();
889
890 ESCRIPT_DLL_API
891 double
892 Lsup_const() const;
893
894
895 /**
896 \brief
897 Return the maximum value of this Data object.
898
899 The method is not const because lazy data needs to be expanded before sup can be computed.
900 The _const form can be used when the Data object is const, however this will only work for
901 Data which is not Lazy.
902
903 For Data which contain no samples (or tagged Data for which no tags in use have a value)
904 a large negative value is returned.
905 */
906 ESCRIPT_DLL_API
907 double
908 sup();
909
910 ESCRIPT_DLL_API
911 double
912 sup_const() const;
913
914
915 /**
916 \brief
917 Return the minimum value of this Data object.
918
919 The method is not const because lazy data needs to be expanded before inf can be computed.
920 The _const form can be used when the Data object is const, however this will only work for
921 Data which is not Lazy.
922
923 For Data which contain no samples (or tagged Data for which no tags in use have a value)
924 a large positive value is returned.
925 */
926 ESCRIPT_DLL_API
927 double
928 inf();
929
930 ESCRIPT_DLL_API
931 double
932 inf_const() const;
933
934
935
936 /**
937 \brief
938 Return the absolute value of each data point of this Data object.
939 *
940 */
941 ESCRIPT_DLL_API
942 Data
943 abs() const;
944
945 /**
946 \brief
947 Return the maximum value of each data point of this Data object.
948 *
949 */
950 ESCRIPT_DLL_API
951 Data
952 maxval() const;
953
954 /**
955 \brief
956 Return the minimum value of each data point of this Data object.
957 *
958 */
959 ESCRIPT_DLL_API
960 Data
961 minval() const;
962
963 /**
964 \brief
965 Return the (sample number, data-point number) of the data point with
966 the minimum component value in this Data object.
967 \note If you are working in python, please consider using Locator
968 instead of manually manipulating process and point IDs.
969 */
970 ESCRIPT_DLL_API
971 const boost::python::tuple
972 minGlobalDataPoint() const;
973
974 /**
975 \brief
976 Return the (sample number, data-point number) of the data point with
977 the minimum component value in this Data object.
978 \note If you are working in python, please consider using Locator
979 instead of manually manipulating process and point IDs.
980 */
981 ESCRIPT_DLL_API
982 const boost::python::tuple
983 maxGlobalDataPoint() const;
984
985
986
987 /**
988 \brief
989 Return the sign of each data point of this Data object.
990 -1 for negative values, zero for zero values, 1 for positive values.
991 *
992 */
993 ESCRIPT_DLL_API
994 Data
995 sign() const;
996
997 /**
998 \brief
999 Return the symmetric part of a matrix which is half the matrix plus its transpose.
1000 *
1001 */
1002 ESCRIPT_DLL_API
1003 Data
1004 symmetric() const;
1005
1006 /**
1007 \brief
1008 Return the nonsymmetric part of a matrix which is half the matrix minus its transpose.
1009 *
1010 */
1011 ESCRIPT_DLL_API
1012 Data
1013 nonsymmetric() const;
1014
1015 /**
1016 \brief
1017 Return the trace of a matrix
1018 *
1019 */
1020 ESCRIPT_DLL_API
1021 Data
1022 trace(int axis_offset) const;
1023
1024 /**
1025 \brief
1026 Transpose each data point of this Data object around the given axis.
1027 *
1028 */
1029 ESCRIPT_DLL_API
1030 Data
1031 transpose(int axis_offset) const;
1032
1033 /**
1034 \brief
1035 Return the eigenvalues of the symmetric part at each data point of this Data object in increasing values.
1036 Currently this function is restricted to rank 2, square shape, and dimension 3.
1037 *
1038 */
1039 ESCRIPT_DLL_API
1040 Data
1041 eigenvalues() const;
1042
1043 /**
1044 \brief
1045 Return the eigenvalues and corresponding eigenvcetors of the symmetric part at each data point of this Data object.
1046 the eigenvalues are ordered in increasing size where eigenvalues with relative difference less than
1047 tol are treated as equal. The eigenvectors are orthogonal, normalized and the sclaed such that the
1048 first non-zero entry is positive.
1049 Currently this function is restricted to rank 2, square shape, and dimension 3
1050 *
1051 */
1052 ESCRIPT_DLL_API
1053 const boost::python::tuple
1054 eigenvalues_and_eigenvectors(const double tol=1.e-12) const;
1055
1056 /**
1057 \brief
1058 swaps the components axis0 and axis1
1059 *
1060 */
1061 ESCRIPT_DLL_API
1062 Data
1063 swapaxes(const int axis0, const int axis1) const;
1064
1065 /**
1066 \brief
1067 Return the error function erf of each data point of this Data object.
1068 *
1069 */
1070 ESCRIPT_DLL_API
1071 Data
1072 erf() const;
1073
1074 /**
1075 \brief
1076 Return the sin of each data point of this Data object.
1077 *
1078 */
1079 ESCRIPT_DLL_API
1080 Data
1081 sin() const;
1082
1083 /**
1084 \brief
1085 Return the cos of each data point of this Data object.
1086 *
1087 */
1088 ESCRIPT_DLL_API
1089 Data
1090 cos() const;
1091
1092 /**
1093 \brief
1094 Return the tan of each data point of this Data object.
1095 *
1096 */
1097 ESCRIPT_DLL_API
1098 Data
1099 tan() const;
1100
1101 /**
1102 \brief
1103 Return the asin of each data point of this Data object.
1104 *
1105 */
1106 ESCRIPT_DLL_API
1107 Data
1108 asin() const;
1109
1110 /**
1111 \brief
1112 Return the acos of each data point of this Data object.
1113 *
1114 */
1115 ESCRIPT_DLL_API
1116 Data
1117 acos() const;
1118
1119 /**
1120 \brief
1121 Return the atan of each data point of this Data object.
1122 *
1123 */
1124 ESCRIPT_DLL_API
1125 Data
1126 atan() const;
1127
1128 /**
1129 \brief
1130 Return the sinh of each data point of this Data object.
1131 *
1132 */
1133 ESCRIPT_DLL_API
1134 Data
1135 sinh() const;
1136
1137 /**
1138 \brief
1139 Return the cosh of each data point of this Data object.
1140 *
1141 */
1142 ESCRIPT_DLL_API
1143 Data
1144 cosh() const;
1145
1146 /**
1147 \brief
1148 Return the tanh of each data point of this Data object.
1149 *
1150 */
1151 ESCRIPT_DLL_API
1152 Data
1153 tanh() const;
1154
1155 /**
1156 \brief
1157 Return the asinh of each data point of this Data object.
1158 *
1159 */
1160 ESCRIPT_DLL_API
1161 Data
1162 asinh() const;
1163
1164 /**
1165 \brief
1166 Return the acosh of each data point of this Data object.
1167 *
1168 */
1169 ESCRIPT_DLL_API
1170 Data
1171 acosh() const;
1172
1173 /**
1174 \brief
1175 Return the atanh of each data point of this Data object.
1176 *
1177 */
1178 ESCRIPT_DLL_API
1179 Data
1180 atanh() const;
1181
1182 /**
1183 \brief
1184 Return the log to base 10 of each data point of this Data object.
1185 *
1186 */
1187 ESCRIPT_DLL_API
1188 Data
1189 log10() const;
1190
1191 /**
1192 \brief
1193 Return the natural log of each data point of this Data object.
1194 *
1195 */
1196 ESCRIPT_DLL_API
1197 Data
1198 log() const;
1199
1200 /**
1201 \brief
1202 Return the exponential function of each data point of this Data object.
1203 *
1204 */
1205 ESCRIPT_DLL_API
1206 Data
1207 exp() const;
1208
1209 /**
1210 \brief
1211 Return the square root of each data point of this Data object.
1212 *
1213 */
1214 ESCRIPT_DLL_API
1215 Data
1216 sqrt() const;
1217
1218 /**
1219 \brief
1220 Return the negation of each data point of this Data object.
1221 *
1222 */
1223 ESCRIPT_DLL_API
1224 Data
1225 neg() const;
1226
1227 /**
1228 \brief
1229 Return the identity of each data point of this Data object.
1230 Simply returns this object unmodified.
1231 *
1232 */
1233 ESCRIPT_DLL_API
1234 Data
1235 pos() const;
1236
1237 /**
1238 \brief
1239 Return the given power of each data point of this Data object.
1240
1241 \param right Input - the power to raise the object to.
1242 *
1243 */
1244 ESCRIPT_DLL_API
1245 Data
1246 powD(const Data& right) const;
1247
1248 /**
1249 \brief
1250 Return the given power of each data point of this boost python object.
1251
1252 \param right Input - the power to raise the object to.
1253 *
1254 */
1255 ESCRIPT_DLL_API
1256 Data
1257 powO(const boost::python::object& right) const;
1258
1259 /**
1260 \brief
1261 Return the given power of each data point of this boost python object.
1262
1263 \param left Input - the bases
1264 *
1265 */
1266
1267 ESCRIPT_DLL_API
1268 Data
1269 rpowO(const boost::python::object& left) const;
1270
1271 /**
1272 \brief
1273 writes the object to a file in the DX file format
1274 */
1275 ESCRIPT_DLL_API
1276 void
1277 saveDX(std::string fileName) const;
1278
1279 /**
1280 \brief
1281 writes the object to a file in the VTK file format
1282 */
1283 ESCRIPT_DLL_API
1284 void
1285 saveVTK(std::string fileName) const;
1286
1287 /**
1288 \brief
1289 Overloaded operator +=
1290 \param right - Input - The right hand side.
1291 *
1292 */
1293 ESCRIPT_DLL_API
1294 Data& operator+=(const Data& right);
1295 ESCRIPT_DLL_API
1296 Data& operator+=(const boost::python::object& right);
1297
1298 ESCRIPT_DLL_API
1299 Data& operator=(const Data& other);
1300
1301 /**
1302 \brief
1303 Overloaded operator -=
1304 \param right - Input - The right hand side.
1305 *
1306 */
1307 ESCRIPT_DLL_API
1308 Data& operator-=(const Data& right);
1309 ESCRIPT_DLL_API
1310 Data& operator-=(const boost::python::object& right);
1311
1312 /**
1313 \brief
1314 Overloaded operator *=
1315 \param right - Input - The right hand side.
1316 *
1317 */
1318 ESCRIPT_DLL_API
1319 Data& operator*=(const Data& right);
1320 ESCRIPT_DLL_API
1321 Data& operator*=(const boost::python::object& right);
1322
1323 /**
1324 \brief
1325 Overloaded operator /=
1326 \param right - Input - The right hand side.
1327 *
1328 */
1329 ESCRIPT_DLL_API
1330 Data& operator/=(const Data& right);
1331 ESCRIPT_DLL_API
1332 Data& operator/=(const boost::python::object& right);
1333
1334 /**
1335 \brief
1336 Returns true if this can be interpolated to functionspace.
1337 */
1338 ESCRIPT_DLL_API
1339 bool
1340 probeInterpolation(const FunctionSpace& functionspace) const;
1341
1342 /**
1343 Data object slicing methods.
1344 */
1345
1346 /**
1347 \brief
1348 Returns a slice from this Data object.
1349
1350 /description
1351 Implements the [] get operator in python.
1352 Calls getSlice.
1353
1354 \param key - Input - python slice tuple specifying
1355 slice to return.
1356 */
1357 ESCRIPT_DLL_API
1358 Data
1359 getItem(const boost::python::object& key) const;
1360
1361 /**
1362 \brief
1363 Copies slice from value into this Data object.
1364
1365 Implements the [] set operator in python.
1366 Calls setSlice.
1367
1368 \param key - Input - python slice tuple specifying
1369 slice to copy from value.
1370 \param value - Input - Data object to copy from.
1371 */
1372 ESCRIPT_DLL_API
1373 void
1374 setItemD(const boost::python::object& key,
1375 const Data& value);
1376
1377 ESCRIPT_DLL_API
1378 void
1379 setItemO(const boost::python::object& key,
1380 const boost::python::object& value);
1381
1382 // These following public methods should be treated as private.
1383
1384 /**
1385 \brief
1386 Perform the given unary operation on every element of every data point in
1387 this Data object.
1388 */
1389 template <class UnaryFunction>
1390 ESCRIPT_DLL_API
1391 inline
1392 void
1393 unaryOp2(UnaryFunction operation);
1394
1395 /**
1396 \brief
1397 Return a Data object containing the specified slice of
1398 this Data object.
1399 \param region - Input - Region to copy.
1400 *
1401 */
1402 ESCRIPT_DLL_API
1403 Data
1404 getSlice(const DataTypes::RegionType& region) const;
1405
1406 /**
1407 \brief
1408 Copy the specified slice from the given value into this
1409 Data object.
1410 \param value - Input - Data to copy from.
1411 \param region - Input - Region to copy.
1412 *
1413 */
1414 ESCRIPT_DLL_API
1415 void
1416 setSlice(const Data& value,
1417 const DataTypes::RegionType& region);
1418
1419 /**
1420 \brief
1421 print the data values to stdout. Used for debugging
1422 */
1423 ESCRIPT_DLL_API
1424 void
1425 print(void);
1426
1427 /**
1428 \brief
1429 return the MPI rank number of the local data
1430 MPI_COMM_WORLD is assumed and the result of MPI_Comm_size()
1431 is returned
1432 */
1433 ESCRIPT_DLL_API
1434 int
1435 get_MPIRank(void) const;
1436
1437 /**
1438 \brief
1439 return the MPI rank number of the local data
1440 MPI_COMM_WORLD is assumed and the result of MPI_Comm_rank()
1441 is returned
1442 */
1443 ESCRIPT_DLL_API
1444 int
1445 get_MPISize(void) const;
1446
1447 /**
1448 \brief
1449 return the MPI rank number of the local data
1450 MPI_COMM_WORLD is assumed and returned.
1451 */
1452 ESCRIPT_DLL_API
1453 MPI_Comm
1454 get_MPIComm(void) const;
1455
1456 /**
1457 \brief
1458 return the object produced by the factory, which is a DataConstant or DataExpanded
1459 TODO Ownership of this object should be explained in doco.
1460 */
1461 ESCRIPT_DLL_API
1462 DataAbstract*
1463 borrowData(void) const;
1464
1465 ESCRIPT_DLL_API
1466 DataAbstract_ptr
1467 borrowDataPtr(void) const;
1468
1469 ESCRIPT_DLL_API
1470 DataReady_ptr
1471 borrowReadyPtr(void) const;
1472
1473
1474
1475 /**
1476 \brief
1477 Return a pointer to the beginning of the datapoint at the specified offset.
1478 TODO Eventually these should be inlined.
1479 \param i - position(offset) in the underlying datastructure
1480 */
1481
1482 ESCRIPT_DLL_API
1483 DataTypes::ValueType::const_reference
1484 getDataAtOffsetRO(DataTypes::ValueType::size_type i);
1485
1486
1487 ESCRIPT_DLL_API
1488 DataTypes::ValueType::reference
1489 getDataAtOffsetRW(DataTypes::ValueType::size_type i);
1490
1491
1492
1493 /**
1494 \brief Create a buffer for use by getSample
1495 Allocates a DataVector large enough for DataLazy::resolveSample to operate on for the current Data.
1496 Do not use this buffer for other Data instances (unless you are sure they will be the same size).
1497
1498 In multi-threaded sections, this needs to be called on each thread.
1499
1500 \return A BufferGroup* if Data is lazy, NULL otherwise.
1501 \warning This pointer must be deallocated using freeSampleBuffer to avoid cross library memory issues.
1502 */
1503 ESCRIPT_DLL_API
1504 BufferGroup*
1505 allocSampleBuffer() const;
1506
1507 /**
1508 \brief Free a buffer allocated with allocSampleBuffer.
1509 \param buffer Input - pointer to the buffer to deallocate.
1510 */
1511 ESCRIPT_DLL_API void freeSampleBuffer(BufferGroup* buffer);
1512
1513 protected:
1514
1515 private:
1516
1517 double
1518 LsupWorker() const;
1519
1520 double
1521 supWorker() const;
1522
1523 double
1524 infWorker() const;
1525
1526 boost::python::object
1527 integrateWorker() const;
1528
1529 void
1530 calc_minGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1531
1532 void
1533 calc_maxGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1534
1535
1536 /**
1537 \brief
1538 Check *this and the right operand are compatible. Throws
1539 an exception if they aren't.
1540 \param right - Input - The right hand side.
1541 */
1542 inline
1543 void
1544 operandCheck(const Data& right) const
1545 {
1546 return m_data->operandCheck(*(right.m_data.get()));
1547 }
1548
1549 /**
1550 \brief
1551 Perform the specified reduction algorithm on every element of every data point in
1552 this Data object according to the given function and return the single value result.
1553 */
1554 template <class BinaryFunction>
1555 inline
1556 double
1557 algorithm(BinaryFunction operation,
1558 double initial_value) const;
1559
1560 /**
1561 \brief
1562 Reduce each data-point in this Data object using the given operation. Return a Data
1563 object with the same number of data-points, but with each data-point containing only
1564 one value - the result of the reduction operation on the corresponding data-point in
1565 this Data object
1566 */
1567 template <class BinaryFunction>
1568 inline
1569 Data
1570 dp_algorithm(BinaryFunction operation,
1571 double initial_value) const;
1572
1573 /**
1574 \brief
1575 Perform the given binary operation on all of the data's elements.
1576 The underlying type of the right hand side (right) determines the final
1577 type of *this after the operation. For example if the right hand side
1578 is expanded *this will be expanded if necessary.
1579 RHS is a Data object.
1580 */
1581 template <class BinaryFunction>
1582 inline
1583 void
1584 binaryOp(const Data& right,
1585 BinaryFunction operation);
1586
1587 /**
1588 \brief
1589 Convert the data type of the RHS to match this.
1590 \param right - Input - data type to match.
1591 */
1592 void
1593 typeMatchLeft(Data& right) const;
1594
1595 /**
1596 \brief
1597 Convert the data type of this to match the RHS.
1598 \param right - Input - data type to match.
1599 */
1600 void
1601 typeMatchRight(const Data& right);
1602
1603 /**
1604 \brief
1605 Construct a Data object of the appropriate type.
1606 */
1607
1608 void
1609 initialise(const DataTypes::ValueType& value,
1610 const DataTypes::ShapeType& shape,
1611 const FunctionSpace& what,
1612 bool expanded);
1613
1614 void
1615 initialise(const WrappedArray& value,
1616 const FunctionSpace& what,
1617 bool expanded);
1618
1619 //
1620 // flag to protect the data object against any update
1621 bool m_protected;
1622 mutable bool m_shared;
1623 bool m_lazy;
1624
1625 //
1626 // pointer to the actual data object
1627 // boost::shared_ptr<DataAbstract> m_data;
1628 DataAbstract_ptr m_data;
1629
1630 // If possible please use getReadyPtr instead.
1631 // But see warning below.
1632 const DataReady*
1633 getReady() const;
1634
1635 DataReady*
1636 getReady();
1637
1638
1639 // Be wary of using this for local operations since it (temporarily) increases reference count.
1640 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1641 // getReady() instead
1642 DataReady_ptr
1643 getReadyPtr();
1644
1645 const_DataReady_ptr
1646 getReadyPtr() const;
1647
1648
1649 /**
1650 \brief Update the Data's shared flag
1651 This indicates that the DataAbstract used by this object is now shared (or no longer shared).
1652 For internal use only.
1653 */
1654 void updateShareStatus(bool nowshared) const
1655 {
1656 m_shared=nowshared; // m_shared is mutable
1657 }
1658
1659 // In the isShared() method below:
1660 // A problem would occur if m_data (the address pointed to) were being modified
1661 // while the call m_data->is_shared is being executed.
1662 //
1663 // Q: So why do I think this code can be thread safe/correct?
1664 // A: We need to make some assumptions.
1665 // 1. We assume it is acceptable to return true under some conditions when we aren't shared.
1666 // 2. We assume that no constructions or assignments which will share previously unshared
1667 // will occur while this call is executing. This is consistent with the way Data:: and C are written.
1668 //
1669 // This means that the only transition we need to consider, is when a previously shared object is
1670 // not shared anymore. ie. the other objects have been destroyed or a deep copy has been made.
1671 // In those cases the m_shared flag changes to false after m_data has completed changing.
1672 // For any threads executing before the flag switches they will assume the object is still shared.
1673 bool isShared() const
1674 {
1675 return m_shared;
1676 /* if (m_shared) return true;
1677 if (m_data->isShared())
1678 {
1679 updateShareStatus(true);
1680 return true;
1681 }
1682 return false;*/
1683 }
1684
1685 void forceResolve()
1686 {
1687 if (isLazy())
1688 {
1689 #ifdef _OPENMP
1690 if (omp_in_parallel())
1691 { // Yes this is throwing an exception out of an omp thread which is forbidden.
1692 throw DataException("Please do not call forceResolve() in a parallel region.");
1693 }
1694 #endif
1695 resolve();
1696 }
1697 }
1698
1699 /**
1700 \brief if another object is sharing out member data make a copy to work with instead.
1701 This code should only be called from single threaded sections of code.
1702 */
1703 void exclusiveWrite()
1704 {
1705 #ifdef _OPENMP
1706 if (omp_in_parallel())
1707 {
1708 // *((int*)0)=17;
1709 throw DataException("Programming error. Please do not run exclusiveWrite() in multi-threaded sections.");
1710 }
1711 #endif
1712 forceResolve();
1713 if (isShared())
1714 {
1715 DataAbstract* t=m_data->deepCopy();
1716 set_m_data(DataAbstract_ptr(t));
1717 }
1718 }
1719
1720 /**
1721 \brief checks if caller can have exclusive write to the object
1722 */
1723 void checkExclusiveWrite()
1724 {
1725 if (isLazy() || isShared())
1726 {
1727 throw DataException("Programming error. ExclusiveWrite required - please call requireWrite()");
1728 }
1729 }
1730
1731 /**
1732 \brief Modify the data abstract hosted by this Data object
1733 For internal use only.
1734 Passing a pointer to null is permitted (do this in the destructor)
1735 \warning Only to be called in single threaded code or inside a single/critical section. This method needs to be atomic.
1736 */
1737 void set_m_data(DataAbstract_ptr p);
1738
1739 friend class DataAbstract; // To allow calls to updateShareStatus
1740
1741 };
1742
1743 } // end namespace escript
1744
1745
1746 // No, this is not supposed to be at the top of the file
1747 // DataAbstact needs to be declared first, then DataReady needs to be fully declared
1748 // so that I can dynamic cast between them below.
1749 #include "DataReady.h"
1750 #include "DataLazy.h"
1751
1752 namespace escript
1753 {
1754
1755 inline
1756 const DataReady*
1757 Data::getReady() const
1758 {
1759 const DataReady* dr=dynamic_cast<const DataReady*>(m_data.get());
1760 EsysAssert((dr!=0), "Error - casting to DataReady.");
1761 return dr;
1762 }
1763
1764 inline
1765 DataReady*
1766 Data::getReady()
1767 {
1768 DataReady* dr=dynamic_cast<DataReady*>(m_data.get());
1769 EsysAssert((dr!=0), "Error - casting to DataReady.");
1770 return dr;
1771 }
1772
1773 // Be wary of using this for local operations since it (temporarily) increases reference count.
1774 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1775 // getReady() instead
1776 inline
1777 DataReady_ptr
1778 Data::getReadyPtr()
1779 {
1780 DataReady_ptr dr=boost::dynamic_pointer_cast<DataReady>(m_data);
1781 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1782 return dr;
1783 }
1784
1785
1786 inline
1787 const_DataReady_ptr
1788 Data::getReadyPtr() const
1789 {
1790 const_DataReady_ptr dr=boost::dynamic_pointer_cast<const DataReady>(m_data);
1791 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1792 return dr;
1793 }
1794
1795 inline
1796 DataAbstract::ValueType::value_type*
1797 Data::getSampleDataRW(DataAbstract::ValueType::size_type sampleNo)
1798 {
1799 if (isLazy())
1800 {
1801 throw DataException("Error, attempt to acquire RW access to lazy data. Please call requireWrite() first.");
1802 }
1803 return getReady()->getSampleDataRW(sampleNo);
1804 }
1805
1806 inline
1807 const DataAbstract::ValueType::value_type*
1808 Data::getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, BufferGroup* bufferg)
1809 {
1810 DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
1811 if (l!=0)
1812 {
1813 size_t offset=0;
1814 if (bufferg==NULL)
1815 {
1816 throw DataException("Error, attempt to getSampleDataRO for lazy Data with buffer==NULL");
1817 }
1818 const DataTypes::ValueType* res=l->resolveSample(*bufferg,sampleNo,offset);
1819 return &((*res)[offset]);
1820 }
1821 return getReady()->getSampleDataRO(sampleNo);
1822 }
1823
1824
1825
1826 /**
1827 Modify a filename for MPI parallel output to multiple files
1828 */
1829 char *Escript_MPI_appendRankToFileName(const char *, int, int);
1830
1831 /**
1832 Binary Data object operators.
1833 */
1834 inline double rpow(double x,double y)
1835 {
1836 return pow(y,x);
1837 }
1838
1839 /**
1840 \brief
1841 Operator+
1842 Takes two Data objects.
1843 */
1844 ESCRIPT_DLL_API Data operator+(const Data& left, const Data& right);
1845
1846 /**
1847 \brief
1848 Operator-
1849 Takes two Data objects.
1850 */
1851 ESCRIPT_DLL_API Data operator-(const Data& left, const Data& right);
1852
1853 /**
1854 \brief
1855 Operator*
1856 Takes two Data objects.
1857 */
1858 ESCRIPT_DLL_API Data operator*(const Data& left, const Data& right);
1859
1860 /**
1861 \brief
1862 Operator/
1863 Takes two Data objects.
1864 */
1865 ESCRIPT_DLL_API Data operator/(const Data& left, const Data& right);
1866
1867 /**
1868 \brief
1869 Operator+
1870 Takes LHS Data object and RHS python::object.
1871 python::object must be convertable to Data type.
1872 */
1873 ESCRIPT_DLL_API Data operator+(const Data& left, const boost::python::object& right);
1874
1875 /**
1876 \brief
1877 Operator-
1878 Takes LHS Data object and RHS python::object.
1879 python::object must be convertable to Data type.
1880 */
1881 ESCRIPT_DLL_API Data operator-(const Data& left, const boost::python::object& right);
1882
1883 /**
1884 \brief
1885 Operator*
1886 Takes LHS Data object and RHS python::object.
1887 python::object must be convertable to Data type.
1888 */
1889 ESCRIPT_DLL_API Data operator*(const Data& left, const boost::python::object& right);
1890
1891 /**
1892 \brief
1893 Operator/
1894 Takes LHS Data object and RHS python::object.
1895 python::object must be convertable to Data type.
1896 */
1897 ESCRIPT_DLL_API Data operator/(const Data& left, const boost::python::object& right);
1898
1899 /**
1900 \brief
1901 Operator+
1902 Takes LHS python::object and RHS Data object.
1903 python::object must be convertable to Data type.
1904 */
1905 ESCRIPT_DLL_API Data operator+(const boost::python::object& left, const Data& right);
1906
1907 /**
1908 \brief
1909 Operator-
1910 Takes LHS python::object and RHS Data object.
1911 python::object must be convertable to Data type.
1912 */
1913 ESCRIPT_DLL_API Data operator-(const boost::python::object& left, const Data& right);
1914
1915 /**
1916 \brief
1917 Operator*
1918 Takes LHS python::object and RHS Data object.
1919 python::object must be convertable to Data type.
1920 */
1921 ESCRIPT_DLL_API Data operator*(const boost::python::object& left, const Data& right);
1922
1923 /**
1924 \brief
1925 Operator/
1926 Takes LHS python::object and RHS Data object.
1927 python::object must be convertable to Data type.
1928 */
1929 ESCRIPT_DLL_API Data operator/(const boost::python::object& left, const Data& right);
1930
1931
1932
1933 /**
1934 \brief
1935 Output operator
1936 */
1937 ESCRIPT_DLL_API std::ostream& operator<<(std::ostream& o, const Data& data);
1938
1939 /**
1940 \brief
1941 Compute a tensor product of two Data objects
1942 \param arg_0 - Input - Data object
1943 \param arg_1 - Input - Data object
1944 \param axis_offset - Input - axis offset
1945 \param transpose - Input - 0: transpose neither, 1: transpose arg0, 2: transpose arg1
1946 */
1947 ESCRIPT_DLL_API
1948 Data
1949 C_GeneralTensorProduct(Data& arg_0,
1950 Data& arg_1,
1951 int axis_offset=0,
1952 int transpose=0);
1953
1954 /**
1955 \brief
1956 Perform the given binary operation with this and right as operands.
1957 Right is a Data object.
1958 */
1959 template <class BinaryFunction>
1960 inline
1961 void
1962 Data::binaryOp(const Data& right,
1963 BinaryFunction operation)
1964 {
1965 //
1966 // if this has a rank of zero promote it to the rank of the RHS
1967 if (getDataPointRank()==0 && right.getDataPointRank()!=0) {
1968 throw DataException("Error - attempt to update rank zero object with object with rank bigger than zero.");
1969 }
1970
1971 if (isLazy() || right.isLazy())
1972 {
1973 throw DataException("Programmer error - attempt to call binaryOp with Lazy Data.");
1974 }
1975 //
1976 // initially make the temporary a shallow copy
1977 Data tempRight(right);
1978
1979 if (getFunctionSpace()!=right.getFunctionSpace()) {
1980 if (right.probeInterpolation(getFunctionSpace())) {
1981 //
1982 // an interpolation is required so create a new Data
1983 tempRight=Data(right,this->getFunctionSpace());
1984 } else if (probeInterpolation(right.getFunctionSpace())) {
1985 //
1986 // interpolate onto the RHS function space
1987 Data tempLeft(*this,right.getFunctionSpace());
1988 // m_data=tempLeft.m_data;
1989 set_m_data(tempLeft.m_data);
1990 }
1991 }
1992 operandCheck(tempRight);
1993 //
1994 // ensure this has the right type for the RHS
1995 typeMatchRight(tempRight);
1996 //
1997 // Need to cast to the concrete types so that the correct binaryOp
1998 // is called.
1999 if (isExpanded()) {
2000 //
2001 // Expanded data will be done in parallel, the right hand side can be
2002 // of any data type
2003 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2004 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2005 escript::binaryOp(*leftC,*(tempRight.getReady()),operation);
2006 } else if (isTagged()) {
2007 //
2008 // Tagged data is operated on serially, the right hand side can be
2009 // either DataConstant or DataTagged
2010 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2011 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2012 if (right.isTagged()) {
2013 DataTagged* rightC=dynamic_cast<DataTagged*>(tempRight.m_data.get());
2014 EsysAssert((rightC!=0), "Programming error - casting to DataTagged.");
2015 escript::binaryOp(*leftC,*rightC,operation);
2016 } else {
2017 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2018 EsysAssert((rightC!=0), "Programming error - casting to DataConstant.");
2019 escript::binaryOp(*leftC,*rightC,operation);
2020 }
2021 } else if (isConstant()) {
2022 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2023 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2024 EsysAssert((leftC!=0 && rightC!=0), "Programming error - casting to DataConstant.");
2025 escript::binaryOp(*leftC,*rightC,operation);
2026 }
2027 }
2028
2029 /**
2030 \brief
2031 Perform the given Data object reduction algorithm on this and return the result.
2032 Given operation combines each element of each data point, thus argument
2033 object (*this) is a rank n Data object, and returned object is a scalar.
2034 Calls escript::algorithm.
2035 */
2036 template <class BinaryFunction>
2037 inline
2038 double
2039 Data::algorithm(BinaryFunction operation, double initial_value) const
2040 {
2041 if (isExpanded()) {
2042 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2043 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2044 return escript::algorithm(*leftC,operation,initial_value);
2045 } else if (isTagged()) {
2046 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2047 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2048 return escript::algorithm(*leftC,operation,initial_value);
2049 } else if (isConstant()) {
2050 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2051 EsysAssert((leftC!=0), "Programming error - casting to DataConstant.");
2052 return escript::algorithm(*leftC,operation,initial_value);
2053 } else if (isEmpty()) {
2054 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2055 } else if (isLazy()) {
2056 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2057 } else {
2058 throw DataException("Error - Data encapsulates an unknown type.");
2059 }
2060 }
2061
2062 /**
2063 \brief
2064 Perform the given data point reduction algorithm on data and return the result.
2065 Given operation combines each element within each data point into a scalar,
2066 thus argument object is a rank n Data object, and returned object is a
2067 rank 0 Data object.
2068 Calls escript::dp_algorithm.
2069 */
2070 template <class BinaryFunction>
2071 inline
2072 Data
2073 Data::dp_algorithm(BinaryFunction operation, double initial_value) const
2074 {
2075 if (isEmpty()) {
2076 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2077 }
2078 else if (isExpanded()) {
2079 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2080 DataExpanded* dataE=dynamic_cast<DataExpanded*>(m_data.get());
2081 DataExpanded* resultE=dynamic_cast<DataExpanded*>(result.m_data.get());
2082 EsysAssert((dataE!=0), "Programming error - casting data to DataExpanded.");
2083 EsysAssert((resultE!=0), "Programming error - casting result to DataExpanded.");
2084 escript::dp_algorithm(*dataE,*resultE,operation,initial_value);
2085 return result;
2086 }
2087 else if (isTagged()) {
2088 DataTagged* dataT=dynamic_cast<DataTagged*>(m_data.get());
2089 EsysAssert((dataT!=0), "Programming error - casting data to DataTagged.");
2090 DataTypes::ValueType defval(1);
2091 defval[0]=0;
2092 DataTagged* resultT=new DataTagged(getFunctionSpace(), DataTypes::scalarShape, defval, dataT);
2093 escript::dp_algorithm(*dataT,*resultT,operation,initial_value);
2094 return Data(resultT); // note: the Data object now owns the resultT pointer
2095 }
2096 else if (isConstant()) {
2097 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2098 DataConstant* dataC=dynamic_cast<DataConstant*>(m_data.get());
2099 DataConstant* resultC=dynamic_cast<DataConstant*>(result.m_data.get());
2100 EsysAssert((dataC!=0), "Programming error - casting data to DataConstant.");
2101 EsysAssert((resultC!=0), "Programming error - casting result to DataConstant.");
2102 escript::dp_algorithm(*dataC,*resultC,operation,initial_value);
2103 return result;
2104 } else if (isLazy()) {
2105 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2106 } else {
2107 throw DataException("Error - Data encapsulates an unknown type.");
2108 }
2109 }
2110
2111 /**
2112 \brief
2113 Compute a tensor operation with two Data objects
2114 \param arg_0 - Input - Data object
2115 \param arg_1 - Input - Data object
2116 \param operation - Input - Binary op functor
2117 */
2118 template <typename BinaryFunction>
2119 inline
2120 Data
2121 C_TensorBinaryOperation(Data const &arg_0,
2122 Data const &arg_1,
2123 BinaryFunction operation)
2124 {
2125 if (arg_0.isEmpty() || arg_1.isEmpty())
2126 {
2127 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2128 }
2129 if (arg_0.isLazy() || arg_1.isLazy())
2130 {
2131 throw DataException("Error - Operations not permitted on lazy data.");
2132 }
2133 // Interpolate if necessary and find an appropriate function space
2134 Data arg_0_Z, arg_1_Z;
2135 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2136 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2137 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2138 arg_1_Z = Data(arg_1);
2139 }
2140 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2141 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2142 arg_0_Z =Data(arg_0);
2143 }
2144 else {
2145 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible function spaces.");
2146 }
2147 } else {
2148 arg_0_Z = Data(arg_0);
2149 arg_1_Z = Data(arg_1);
2150 }
2151 // Get rank and shape of inputs
2152 int rank0 = arg_0_Z.getDataPointRank();
2153 int rank1 = arg_1_Z.getDataPointRank();
2154 DataTypes::ShapeType shape0 = arg_0_Z.getDataPointShape();
2155 DataTypes::ShapeType shape1 = arg_1_Z.getDataPointShape();
2156 int size0 = arg_0_Z.getDataPointSize();
2157 int size1 = arg_1_Z.getDataPointSize();
2158 // Declare output Data object
2159 Data res;
2160
2161 if (shape0 == shape1) {
2162 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2163 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2164 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2165 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2166 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2167
2168 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2169 }
2170 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2171
2172 // Prepare the DataConstant input
2173 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2174
2175 // Borrow DataTagged input from Data object
2176 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2177
2178 // Prepare a DataTagged output 2
2179 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2180 res.tag();
2181 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2182
2183 // Prepare offset into DataConstant
2184 int offset_0 = tmp_0->getPointOffset(0,0);
2185 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2186
2187 // Get the pointers to the actual data
2188 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2189 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2190
2191 // Compute a result for the default
2192 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2193 // Compute a result for each tag
2194 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2195 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2196 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2197 tmp_2->addTag(i->first);
2198 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2199 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2200
2201 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2202 }
2203
2204 }
2205 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2206 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2207 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2208 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2209 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2210
2211 int sampleNo_1,dataPointNo_1;
2212 int numSamples_1 = arg_1_Z.getNumSamples();
2213 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2214 int offset_0 = tmp_0->getPointOffset(0,0);
2215 res.requireWrite();
2216 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2217 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2218 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2219 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2220 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2221 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2222 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2223 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2224 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2225 }
2226 }
2227
2228 }
2229 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2230 // Borrow DataTagged input from Data object
2231 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2232
2233 // Prepare the DataConstant input
2234 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2235
2236 // Prepare a DataTagged output 2
2237 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2238 res.tag();
2239 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2240
2241 // Prepare offset into DataConstant
2242 int offset_1 = tmp_1->getPointOffset(0,0);
2243
2244 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2245 // Get the pointers to the actual data
2246 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2247 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2248 // Compute a result for the default
2249 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2250 // Compute a result for each tag
2251 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2252 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2253 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2254 tmp_2->addTag(i->first);
2255 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2256 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2257 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2258 }
2259
2260 }
2261 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2262 // Borrow DataTagged input from Data object
2263 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2264
2265 // Borrow DataTagged input from Data object
2266 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2267
2268 // Prepare a DataTagged output 2
2269 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2270 res.tag(); // DataTagged output
2271 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2272
2273 // Get the pointers to the actual data
2274 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2275 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2276 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2277
2278 // Compute a result for the default
2279 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2280 // Merge the tags
2281 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2282 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2283 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2284 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2285 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2286 }
2287 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2288 tmp_2->addTag(i->first);
2289 }
2290 // Compute a result for each tag
2291 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2292 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2293
2294 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2295 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2296 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2297
2298 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2299 }
2300
2301 }
2302 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2303 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2304 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2305 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2306 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2307 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2308
2309 int sampleNo_0,dataPointNo_0;
2310 int numSamples_0 = arg_0_Z.getNumSamples();
2311 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2312 res.requireWrite();
2313 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2314 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2315 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2316 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2317 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2318 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2319 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2320 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2321 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2322 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2323 }
2324 }
2325
2326 }
2327 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2328 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2329 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2330 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2331 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2332
2333 int sampleNo_0,dataPointNo_0;
2334 int numSamples_0 = arg_0_Z.getNumSamples();
2335 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2336 int offset_1 = tmp_1->getPointOffset(0,0);
2337 res.requireWrite();
2338 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2339 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2340 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2341 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2342 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2343
2344 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2345 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2346 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2347
2348
2349 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2350 }
2351 }
2352
2353 }
2354 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2355 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2356 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2357 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2358 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2359 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2360
2361 int sampleNo_0,dataPointNo_0;
2362 int numSamples_0 = arg_0_Z.getNumSamples();
2363 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2364 res.requireWrite();
2365 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2366 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2367 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2368 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2369 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2370 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2371 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2372 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2373 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2374 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2375 }
2376 }
2377
2378 }
2379 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2380 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2381 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2382 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2383 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2384 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2385
2386 int sampleNo_0,dataPointNo_0;
2387 int numSamples_0 = arg_0_Z.getNumSamples();
2388 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2389 res.requireWrite();
2390 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2391 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2392 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2393 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2394 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2395 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2396 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2397 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2398 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2399 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2400 }
2401 }
2402
2403 }
2404 else {
2405 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2406 }
2407
2408 } else if (0 == rank0) {
2409 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2410 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataConstant output
2411 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2412 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2413 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2414 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2415 }
2416 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2417
2418 // Prepare the DataConstant input
2419 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2420
2421 // Borrow DataTagged input from Data object
2422 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2423
2424 // Prepare a DataTagged output 2
2425 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataTagged output
2426 res.tag();
2427 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2428
2429 // Prepare offset into DataConstant
2430 int offset_0 = tmp_0->getPointOffset(0,0);
2431 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2432
2433 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2434 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2435
2436 // Compute a result for the default
2437 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2438 // Compute a result for each tag
2439 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2440 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2441 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2442 tmp_2->addTag(i->first);
2443 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2444 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2445 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2446 }
2447
2448 }
2449 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2450
2451 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2452 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2453 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2454 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2455
2456 int sampleNo_1,dataPointNo_1;
2457 int numSamples_1 = arg_1_Z.getNumSamples();
2458 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2459 int offset_0 = tmp_0->getPointOffset(0,0);
2460 res.requireWrite();
2461 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2462 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2463 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2464 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2465 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2466 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2467 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2468 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2469 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2470
2471 }
2472 }
2473
2474 }
2475 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2476
2477 // Borrow DataTagged input from Data object
2478 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2479
2480 // Prepare the DataConstant input
2481 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2482
2483 // Prepare a DataTagged output 2
2484 res = Data(0.0, shape1, arg_0_Z.getFunctionSpace()); // DataTagged output
2485 res.tag();
2486 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2487
2488 // Prepare offset into DataConstant
2489 int offset_1 = tmp_1->getPointOffset(0,0);
2490 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2491
2492 // Get the pointers to the actual data
2493 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2494 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2495
2496
2497 // Compute a result for the default
2498 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2499 // Compute a result for each tag
2500 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2501 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2502 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2503 tmp_2->addTag(i->first);
2504 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2505 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2506
2507 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2508 }
2509
2510 }
2511 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2512
2513 // Borrow DataTagged input from Data object
2514 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2515
2516 // Borrow DataTagged input from Data object
2517 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2518
2519 // Prepare a DataTagged output 2
2520 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());
2521 res.tag(); // DataTagged output
2522 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2523
2524 // Get the pointers to the actual data
2525 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2526 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2527 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2528
2529 // Compute a result for the default
2530 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2531 // Merge the tags
2532 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2533 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2534 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2535 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2536 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2537 }
2538 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2539 tmp_2->addTag(i->first);
2540 }
2541 // Compute a result for each tag
2542 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2543 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2544 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2545 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2546 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2547
2548 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2549 }
2550
2551 }
2552 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2553
2554 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2555 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2556 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2557 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2558 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2559
2560 int sampleNo_0,dataPointNo_0;
2561 int numSamples_0 = arg_0_Z.getNumSamples();
2562 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2563 res.requireWrite();
2564 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2565 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2566 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2567 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2568 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2569 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2570 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2571 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2572 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2573 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2574 }
2575 }
2576
2577 }
2578 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2579 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2580 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2581 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2582 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2583
2584 int sampleNo_0,dataPointNo_0;
2585 int numSamples_0 = arg_0_Z.getNumSamples();
2586 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2587 int offset_1 = tmp_1->getPointOffset(0,0);
2588 res.requireWrite();
2589 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2590 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2591 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2592 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2593 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2594 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2595 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2596 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2597 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2598 }
2599 }
2600
2601
2602 }
2603 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2604
2605 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2606 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2607 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2608 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2609 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2610
2611 int sampleNo_0,dataPointNo_0;
2612 int numSamples_0 = arg_0_Z.getNumSamples();
2613 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2614 res.requireWrite();
2615 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2616 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2617 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2618 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2619 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2620 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2621 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2622 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2623 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2624 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2625 }
2626 }
2627
2628 }
2629 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2630
2631 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2632 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2633 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2634 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2635 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2636
2637 int sampleNo_0,dataPointNo_0;
2638 int numSamples_0 = arg_0_Z.getNumSamples();
2639 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2640 res.requireWrite();
2641 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2642 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2643 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2644 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2645 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2646 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2647 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2648 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2649 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2650 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2651 }
2652 }
2653
2654 }
2655 else {
2656 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2657 }
2658
2659 } else if (0 == rank1) {
2660 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2661 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2662 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2663 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2664 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2665 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2666 }
2667 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2668
2669 // Prepare the DataConstant input
2670 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2671
2672 // Borrow DataTagged input from Data object
2673 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2674
2675 // Prepare a DataTagged output 2
2676 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2677 res.tag();
2678 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2679
2680 // Prepare offset into DataConstant
2681 int offset_0 = tmp_0->getPointOffset(0,0);
2682 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2683
2684 //Get the pointers to the actual data
2685 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2686 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2687
2688 // Compute a result for the default
2689 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2690 // Compute a result for each tag
2691 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2692 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2693 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2694 tmp_2->addTag(i->first);
2695 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2696 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2697 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2698 }
2699 }
2700 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2701
2702 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2703 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2704 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2705 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2706
2707 int sampleNo_1,dataPointNo_1;
2708 int numSamples_1 = arg_1_Z.getNumSamples();
2709 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2710 int offset_0 = tmp_0->getPointOffset(0,0);
2711 res.requireWrite();
2712 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2713 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2714 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2715 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2716 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2717 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2718 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2719 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2720 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2721 }
2722 }
2723
2724 }
2725 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2726
2727 // Borrow DataTagged input from Data object
2728 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2729
2730 // Prepare the DataConstant input
2731 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2732
2733 // Prepare a DataTagged output 2
2734 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2735 res.tag();
2736 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2737
2738 // Prepare offset into DataConstant
2739 int offset_1 = tmp_1->getPointOffset(0,0);
2740 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2741 // Get the pointers to the actual data
2742 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2743 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2744 // Compute a result for the default
2745 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2746 // Compute a result for each tag
2747 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2748 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2749 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2750 tmp_2->addTag(i->first);
2751 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2752 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2753 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2754 }
2755
2756 }
2757 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2758
2759 // Borrow DataTagged input from Data object
2760 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2761
2762 // Borrow DataTagged input from Data object
2763 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2764
2765 // Prepare a DataTagged output 2
2766 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2767 res.tag(); // DataTagged output
2768 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2769
2770 // Get the pointers to the actual data
2771 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2772 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2773 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2774
2775 // Compute a result for the default
2776 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2777 // Merge the tags
2778 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2779 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2780 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2781 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2782 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2783 }
2784 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2785 tmp_2->addTag(i->first);
2786 }
2787 // Compute a result for each tag
2788 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2789 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2790 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2791 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2792 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2793 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2794 }
2795
2796 }
2797 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2798
2799 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2800 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2801 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2802 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2803 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2804
2805 int sampleNo_0,dataPointNo_0;
2806 int numSamples_0 = arg_0_Z.getNumSamples();
2807 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2808 res.requireWrite();
2809 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2810 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2811 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2812 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2813 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2814 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2815 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2816 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2817 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2818 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2819 }
2820 }
2821
2822 }
2823 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2824 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2825 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2826 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2827 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2828
2829 int sampleNo_0,dataPointNo_0;
2830 int numSamples_0 = arg_0_Z.getNumSamples();
2831 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2832 int offset_1 = tmp_1->getPointOffset(0,0);
2833 res.requireWrite();
2834 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2835 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2836 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2837 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2838 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2839 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2840 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2841 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2842 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2843 }
2844 }
2845
2846
2847 }
2848 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2849
2850 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2851 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2852 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2853 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2854 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2855
2856 int sampleNo_0,dataPointNo_0;
2857 int numSamples_0 = arg_0_Z.getNumSamples();
2858 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2859 res.requireWrite();
2860 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2861 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2862 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2863 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2864 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2865 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2866 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2867 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2868 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2869 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2870 }
2871 }
2872
2873 }
2874 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2875
2876 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2877 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2878 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2879 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2880 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2881
2882 int sampleNo_0,dataPointNo_0;
2883 int numSamples_0 = arg_0_Z.getNumSamples();
2884 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2885 res.requireWrite();
2886 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2887 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2888 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2889 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2890 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2891 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2892 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2893 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2894 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2895 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2896 }
2897 }
2898
2899 }
2900 else {
2901 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2902 }
2903
2904 } else {
2905 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible shapes");
2906 }
2907
2908 return res;
2909 }
2910
2911 template <typename UnaryFunction>
2912 Data
2913 C_TensorUnaryOperation(Data const &arg_0,
2914 UnaryFunction operation)
2915 {
2916 if (arg_0.isEmpty()) // do this before we attempt to interpolate
2917 {
2918 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2919 }
2920 if (arg_0.isLazy())
2921 {
2922 throw DataException("Error - Operations not permitted on lazy data.");
2923 }
2924 // Interpolate if necessary and find an appropriate function space
2925 Data arg_0_Z = Data(arg_0);
2926
2927 // Get rank and shape of inputs
2928 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
2929 int size0 = arg_0_Z.getDataPointSize();
2930
2931 // Declare output Data object
2932 Data res;
2933
2934 if (arg_0_Z.isConstant()) {
2935 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataConstant output
2936 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2937 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2938 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2939 }
2940 else if (arg_0_Z.isTagged()) {
2941
2942 // Borrow DataTagged input from Data object
2943 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2944
2945 // Prepare a DataTagged output 2
2946 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2947 res.tag();
2948 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2949
2950 // Get the pointers to the actual data
2951 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2952 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2953 // Compute a result for the default
2954 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2955 // Compute a result for each tag
2956 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2957 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2958 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2959 tmp_2->addTag(i->first);
2960 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2961 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2962 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2963 }
2964
2965 }
2966 else if (arg_0_Z.isExpanded()) {
2967
2968 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace(),true); // DataExpanded output
2969 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2970 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2971
2972 int sampleNo_0,dataPointNo_0;
2973 int numSamples_0 = arg_0_Z.getNumSamples();
2974 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2975 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2976 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2977 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2978 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2979 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2980 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2981 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2982 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2983 }
2984 }
2985 }
2986 else {
2987 throw DataException("Error - C_TensorUnaryOperation: unknown combination of inputs");
2988 }
2989
2990 return res;
2991 }
2992
2993 }
2994 #endif

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26