/[escript]/trunk/escript/src/Data.h
ViewVC logotype

Contents of /trunk/escript/src/Data.h

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2646 - (show annotations)
Fri Sep 4 00:13:00 2009 UTC (9 years, 7 months ago) by jfenwick
File MIME type: text/plain
File size: 92324 byte(s)
Unit tests and user doco for interpolation and saveCSV
1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2009 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 /** \file Data.h */
16
17 #ifndef DATA_H
18 #define DATA_H
19 #include "system_dep.h"
20
21 #include "DataTypes.h"
22 #include "DataAbstract.h"
23 #include "DataAlgorithm.h"
24 #include "FunctionSpace.h"
25 #include "BinaryOp.h"
26 #include "UnaryOp.h"
27 #include "DataException.h"
28
29
30 extern "C" {
31 #include "DataC.h"
32 //#include <omp.h>
33 }
34
35 #include "esysmpi.h"
36 #include <string>
37 #include <algorithm>
38 #include <sstream>
39
40 #include <boost/shared_ptr.hpp>
41 #include <boost/python/object.hpp>
42 #include <boost/python/tuple.hpp>
43
44 #include "BufferGroup.h"
45
46 namespace escript {
47
48 //
49 // Forward declaration for various implementations of Data.
50 class DataConstant;
51 class DataTagged;
52 class DataExpanded;
53 class DataLazy;
54
55 /**
56 \brief
57 Data represents a collection of datapoints.
58
59 Description:
60 Internally, the datapoints are actually stored by a DataAbstract object.
61 The specific instance of DataAbstract used may vary over the lifetime
62 of the Data object.
63 Some methods on this class return references (eg getShape()).
64 These references should not be used after an operation which changes the underlying DataAbstract object.
65 Doing so will lead to invalid memory access.
66 This should not affect any methods exposed via boost::python.
67 */
68 class Data {
69
70 public:
71
72 // These typedefs allow function names to be cast to pointers
73 // to functions of the appropriate type when calling unaryOp etc.
74 typedef double (*UnaryDFunPtr)(double);
75 typedef double (*BinaryDFunPtr)(double,double);
76
77
78 /**
79 Constructors.
80 */
81
82 /**
83 \brief
84 Default constructor.
85 Creates a DataEmpty object.
86 */
87 ESCRIPT_DLL_API
88 Data();
89
90 /**
91 \brief
92 Copy constructor.
93 WARNING: Only performs a shallow copy.
94 */
95 ESCRIPT_DLL_API
96 Data(const Data& inData);
97
98 /**
99 \brief
100 Constructor from another Data object. If "what" is different from the
101 function space of inData the inData are tried to be interpolated to what,
102 otherwise a shallow copy of inData is returned.
103 */
104 ESCRIPT_DLL_API
105 Data(const Data& inData,
106 const FunctionSpace& what);
107
108 /**
109 \brief Copy Data from an existing vector
110 */
111
112 ESCRIPT_DLL_API
113 Data(const DataTypes::ValueType& value,
114 const DataTypes::ShapeType& shape,
115 const FunctionSpace& what=FunctionSpace(),
116 bool expanded=false);
117
118 /**
119 \brief
120 Constructor which creates a Data with points having the specified shape.
121
122 \param value - Input - Single value applied to all Data.
123 \param dataPointShape - Input - The shape of each data point.
124 \param what - Input - A description of what this data represents.
125 \param expanded - Input - Flag, if true fill the entire container with
126 the given value. Otherwise a more efficient storage
127 mechanism will be used.
128 */
129 ESCRIPT_DLL_API
130 Data(double value,
131 const DataTypes::ShapeType& dataPointShape=DataTypes::ShapeType(),
132 const FunctionSpace& what=FunctionSpace(),
133 bool expanded=false);
134
135 /**
136 \brief
137 Constructor which performs a deep copy of a region from another Data object.
138
139 \param inData - Input - Input Data object.
140 \param region - Input - Region to copy.
141 */
142 ESCRIPT_DLL_API
143 Data(const Data& inData,
144 const DataTypes::RegionType& region);
145
146 /**
147 \brief
148 Constructor which copies data from any object that can be treated like a python array/sequence.
149
150 \param value - Input - Input data.
151 \param what - Input - A description of what this data represents.
152 \param expanded - Input - Flag, if true fill the entire container with
153 the value. Otherwise a more efficient storage
154 mechanism will be used.
155 */
156 ESCRIPT_DLL_API
157 Data(const boost::python::object& value,
158 const FunctionSpace& what=FunctionSpace(),
159 bool expanded=false);
160
161 /**
162 \brief
163 Constructor which creates a DataConstant.
164 Copies data from any object that can be treated like a python array/sequence.
165 All other parameters are copied from other.
166
167 \param value - Input - Input data.
168 \param other - Input - contains all other parameters.
169 */
170 ESCRIPT_DLL_API
171 Data(const boost::python::object& value,
172 const Data& other);
173
174 /**
175 \brief
176 Constructor which creates a DataConstant of "shape" with constant value.
177 */
178 ESCRIPT_DLL_API
179 Data(double value,
180 const boost::python::tuple& shape=boost::python::make_tuple(),
181 const FunctionSpace& what=FunctionSpace(),
182 bool expanded=false);
183
184
185
186 /**
187 \brief Create a Data using an existing DataAbstract. Warning: The new object assumes ownership of the pointer!
188 Once you have passed the pointer, do not delete it.
189 */
190 ESCRIPT_DLL_API
191 explicit Data(DataAbstract* underlyingdata);
192
193 /**
194 \brief Create a Data based on the supplied DataAbstract
195 */
196 ESCRIPT_DLL_API
197 explicit Data(DataAbstract_ptr underlyingdata);
198
199 /**
200 \brief
201 Destructor
202 */
203 ESCRIPT_DLL_API
204 ~Data();
205
206 /**
207 \brief Make this object a deep copy of "other".
208 */
209 ESCRIPT_DLL_API
210 void
211 copy(const Data& other);
212
213 /**
214 \brief Return a pointer to a deep copy of this object.
215 */
216 ESCRIPT_DLL_API
217 Data
218 copySelf();
219
220
221 /**
222 \brief produce a delayed evaluation version of this Data.
223 */
224 ESCRIPT_DLL_API
225 Data
226 delay();
227
228 /**
229 \brief convert the current data into lazy data.
230 */
231 ESCRIPT_DLL_API
232 void
233 delaySelf();
234
235
236 /**
237 Member access methods.
238 */
239
240 /**
241 \brief
242 switches on update protection
243
244 */
245 ESCRIPT_DLL_API
246 void
247 setProtection();
248
249 /**
250 \brief
251 Returns true, if the data object is protected against update
252
253 */
254 ESCRIPT_DLL_API
255 bool
256 isProtected() const;
257
258
259 /**
260 \brief
261 Return the value of a data point as a python tuple.
262 */
263 ESCRIPT_DLL_API
264 const boost::python::object
265 getValueOfDataPointAsTuple(int dataPointNo);
266
267 /**
268 \brief
269 sets the values of a data-point from a python object on this process
270 */
271 ESCRIPT_DLL_API
272 void
273 setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object);
274
275 /**
276 \brief
277 sets the values of a data-point from a array-like object on this process
278 */
279 ESCRIPT_DLL_API
280 void
281 setValueOfDataPointToArray(int dataPointNo, const boost::python::object&);
282
283 /**
284 \brief
285 sets the values of a data-point on this process
286 */
287 ESCRIPT_DLL_API
288 void
289 setValueOfDataPoint(int dataPointNo, const double);
290
291 /**
292 \brief Return a data point across all processors as a python tuple.
293 */
294 ESCRIPT_DLL_API
295 const boost::python::object
296 getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo);
297
298 /**
299 \brief
300 Return the tag number associated with the given data-point.
301
302 */
303 ESCRIPT_DLL_API
304 int
305 getTagNumber(int dpno);
306
307 /**
308 \brief
309 Return the C wrapper for the Data object.
310 */
311 ESCRIPT_DLL_API
312 escriptDataC
313 getDataC();
314
315
316
317 /**
318 \brief
319 Return the C wrapper for the Data object - const version.
320 */
321 ESCRIPT_DLL_API
322 escriptDataC
323 getDataC() const;
324
325 /**
326 \brief How much space is required to evaulate a sample of the Data.
327 */
328 ESCRIPT_DLL_API
329 size_t
330 getSampleBufferSize() const;
331
332
333
334 /**
335 \brief
336 Write the data as a string. For large amounts of data, a summary is printed.
337 */
338 ESCRIPT_DLL_API
339 std::string
340 toString() const;
341
342 /**
343 \brief
344 Whatever the current Data type make this into a DataExpanded.
345 */
346 ESCRIPT_DLL_API
347 void
348 expand();
349
350 /**
351 \brief
352 If possible convert this Data to DataTagged. This will only allow
353 Constant data to be converted to tagged. An attempt to convert
354 Expanded data to tagged will throw an exception.
355 */
356 ESCRIPT_DLL_API
357 void
358 tag();
359
360 /**
361 \brief If this data is lazy, then convert it to ready data.
362 What type of ready data depends on the expression. For example, Constant+Tagged==Tagged.
363 */
364 ESCRIPT_DLL_API
365 void
366 resolve();
367
368
369 /**
370 \brief Ensures data is ready for write access.
371 This means that the data will be resolved if lazy and will be copied if shared with another Data object.
372 \warning This method should only be called in single threaded sections of code. (It modifies m_data).
373 Do not create any Data objects from this one between calling requireWrite and getSampleDataRW.
374 Doing so might introduce additional sharing.
375 */
376 ESCRIPT_DLL_API
377 void
378 requireWrite();
379
380 /**
381 \brief
382 Return true if this Data is expanded.
383 \note To determine if a sample will contain separate values for each datapoint. Use actsExpanded instead.
384 */
385 ESCRIPT_DLL_API
386 bool
387 isExpanded() const;
388
389 /**
390 \brief
391 Return true if this Data is expanded or resolves to expanded.
392 That is, if it has a separate value for each datapoint in the sample.
393 */
394 ESCRIPT_DLL_API
395 bool
396 actsExpanded() const;
397
398
399 /**
400 \brief
401 Return true if this Data is tagged.
402 */
403 ESCRIPT_DLL_API
404 bool
405 isTagged() const;
406
407 /**
408 \brief
409 Return true if this Data is constant.
410 */
411 ESCRIPT_DLL_API
412 bool
413 isConstant() const;
414
415 /**
416 \brief Return true if this Data is lazy.
417 */
418 ESCRIPT_DLL_API
419 bool
420 isLazy() const;
421
422 /**
423 \brief Return true if this data is ready.
424 */
425 ESCRIPT_DLL_API
426 bool
427 isReady() const;
428
429 /**
430 \brief
431 Return true if this Data holds an instance of DataEmpty. This is _not_ the same as asking if the object
432 contains datapoints.
433 */
434 ESCRIPT_DLL_API
435 bool
436 isEmpty() const;
437
438 /**
439 \brief
440 Return the function space.
441 */
442 ESCRIPT_DLL_API
443 inline
444 const FunctionSpace&
445 getFunctionSpace() const
446 {
447 return m_data->getFunctionSpace();
448 }
449
450 /**
451 \brief
452 Return a copy of the function space.
453 */
454 ESCRIPT_DLL_API
455 const FunctionSpace
456 getCopyOfFunctionSpace() const;
457
458 /**
459 \brief
460 Return the domain.
461 */
462 ESCRIPT_DLL_API
463 inline
464 // const AbstractDomain&
465 const_Domain_ptr
466 getDomain() const
467 {
468 return getFunctionSpace().getDomain();
469 }
470
471
472 /**
473 \brief
474 Return the domain.
475 TODO: For internal use only. This should be removed.
476 */
477 ESCRIPT_DLL_API
478 inline
479 // const AbstractDomain&
480 Domain_ptr
481 getDomainPython() const
482 {
483 return getFunctionSpace().getDomainPython();
484 }
485
486 /**
487 \brief
488 Return a copy of the domain.
489 */
490 ESCRIPT_DLL_API
491 const AbstractDomain
492 getCopyOfDomain() const;
493
494 /**
495 \brief
496 Return the rank of the point data.
497 */
498 ESCRIPT_DLL_API
499 inline
500 unsigned int
501 getDataPointRank() const
502 {
503 return m_data->getRank();
504 }
505
506 /**
507 \brief
508 Return the number of data points
509 */
510 ESCRIPT_DLL_API
511 inline
512 int
513 getNumDataPoints() const
514 {
515 return getNumSamples() * getNumDataPointsPerSample();
516 }
517 /**
518 \brief
519 Return the number of samples.
520 */
521 ESCRIPT_DLL_API
522 inline
523 int
524 getNumSamples() const
525 {
526 return m_data->getNumSamples();
527 }
528
529 /**
530 \brief
531 Return the number of data points per sample.
532 */
533 ESCRIPT_DLL_API
534 inline
535 int
536 getNumDataPointsPerSample() const
537 {
538 return m_data->getNumDPPSample();
539 }
540
541
542 /**
543 \brief
544 Return the number of values in the shape for this object.
545 */
546 ESCRIPT_DLL_API
547 int
548 getNoValues() const
549 {
550 return m_data->getNoValues();
551 }
552
553
554 /**
555 \brief
556 dumps the object into a netCDF file
557 */
558 ESCRIPT_DLL_API
559 void
560 dump(const std::string fileName) const;
561
562 /**
563 \brief returns the values of the object as a list of tuples (one for each datapoint).
564
565 \param scalarastuple If true, scalar data will produce single valued tuples [(1,) (2,) ...]
566 If false, the result is a list of scalars [1, 2, ...]
567 */
568 ESCRIPT_DLL_API
569 const boost::python::object
570 toListOfTuples(bool scalarastuple=true);
571
572
573 /**
574 \brief
575 Return the sample data for the given sample no. This is not the
576 preferred interface but is provided for use by C code.
577 The bufferg parameter is only required for LazyData.
578 \param sampleNo - Input - the given sample no.
579 \param bufferg - A buffer to compute (and store) sample data in will be selected from this group.
580 \return pointer to the sample data.
581 */
582 ESCRIPT_DLL_API
583 inline
584 const DataAbstract::ValueType::value_type*
585 getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, BufferGroup* bufferg=0);
586
587
588 /**
589 \brief
590 Return the sample data for the given sample no. This is not the
591 preferred interface but is provided for use by C code.
592 \param sampleNo - Input - the given sample no.
593 \return pointer to the sample data.
594 */
595 ESCRIPT_DLL_API
596 inline
597 DataAbstract::ValueType::value_type*
598 getSampleDataRW(DataAbstract::ValueType::size_type sampleNo);
599
600
601 /**
602 \brief
603 Return the sample data for the given tag. If an attempt is made to
604 access data that isn't tagged an exception will be thrown.
605 \param tag - Input - the tag key.
606 */
607 ESCRIPT_DLL_API
608 inline
609 DataAbstract::ValueType::value_type*
610 getSampleDataByTag(int tag)
611 {
612 return m_data->getSampleDataByTag(tag);
613 }
614
615 /**
616 \brief
617 Return a reference into the DataVector which points to the specified data point.
618 \param sampleNo - Input -
619 \param dataPointNo - Input -
620 */
621 ESCRIPT_DLL_API
622 DataTypes::ValueType::const_reference
623 getDataPointRO(int sampleNo, int dataPointNo);
624
625 /**
626 \brief
627 Return a reference into the DataVector which points to the specified data point.
628 \param sampleNo - Input -
629 \param dataPointNo - Input -
630 */
631 ESCRIPT_DLL_API
632 DataTypes::ValueType::reference
633 getDataPointRW(int sampleNo, int dataPointNo);
634
635
636
637 /**
638 \brief
639 Return the offset for the given sample and point within the sample
640 */
641 ESCRIPT_DLL_API
642 inline
643 DataTypes::ValueType::size_type
644 getDataOffset(int sampleNo,
645 int dataPointNo)
646 {
647 return m_data->getPointOffset(sampleNo,dataPointNo);
648 }
649
650 /**
651 \brief
652 Return a reference to the data point shape.
653 */
654 ESCRIPT_DLL_API
655 inline
656 const DataTypes::ShapeType&
657 getDataPointShape() const
658 {
659 return m_data->getShape();
660 }
661
662 /**
663 \brief
664 Return the data point shape as a tuple of integers.
665 */
666 ESCRIPT_DLL_API
667 const boost::python::tuple
668 getShapeTuple() const;
669
670 /**
671 \brief
672 Return the size of the data point. It is the product of the
673 data point shape dimensions.
674 */
675 ESCRIPT_DLL_API
676 int
677 getDataPointSize() const;
678
679 /**
680 \brief
681 Return the number of doubles stored for this Data.
682 */
683 ESCRIPT_DLL_API
684 DataTypes::ValueType::size_type
685 getLength() const;
686
687 /**
688 \brief Return true if this object contains no samples.
689 This is not the same as isEmpty()
690 */
691 ESCRIPT_DLL_API
692 bool
693 hasNoSamples() const
694 {
695 return getLength()==0;
696 }
697
698 /**
699 \brief
700 Assign the given value to the tag assocciated with name. Implicitly converts this
701 object to type DataTagged. Throws an exception if this object
702 cannot be converted to a DataTagged object or name cannot be mapped onto a tag key.
703 \param name - Input - name of tag.
704 \param value - Input - Value to associate with given key.
705 */
706 ESCRIPT_DLL_API
707 void
708 setTaggedValueByName(std::string name,
709 const boost::python::object& value);
710
711 /**
712 \brief
713 Assign the given value to the tag. Implicitly converts this
714 object to type DataTagged if it is constant.
715
716 \param tagKey - Input - Integer key.
717 \param value - Input - Value to associate with given key.
718 ==>*
719 */
720 ESCRIPT_DLL_API
721 void
722 setTaggedValue(int tagKey,
723 const boost::python::object& value);
724
725 /**
726 \brief
727 Assign the given value to the tag. Implicitly converts this
728 object to type DataTagged if it is constant.
729
730 \param tagKey - Input - Integer key.
731 \param pointshape - Input - The shape of the value parameter
732 \param value - Input - Value to associate with given key.
733 \param dataOffset - Input - Offset of the begining of the point within the value parameter
734 */
735 ESCRIPT_DLL_API
736 void
737 setTaggedValueFromCPP(int tagKey,
738 const DataTypes::ShapeType& pointshape,
739 const DataTypes::ValueType& value,
740 int dataOffset=0);
741
742
743
744 /**
745 \brief
746 Copy other Data object into this Data object where mask is positive.
747 */
748 ESCRIPT_DLL_API
749 void
750 copyWithMask(const Data& other,
751 const Data& mask);
752
753 /**
754 Data object operation methods and operators.
755 */
756
757 /**
758 \brief
759 set all values to zero
760 *
761 */
762 ESCRIPT_DLL_API
763 void
764 setToZero();
765
766 /**
767 \brief
768 Interpolates this onto the given functionspace and returns
769 the result as a Data object.
770 *
771 */
772 ESCRIPT_DLL_API
773 Data
774 interpolate(const FunctionSpace& functionspace) const;
775
776
777 ESCRIPT_DLL_API
778 Data
779 interpolateFromTable2D(const WrappedArray& table, double Amin, double Astep,
780 double undef, Data& B, double Bmin, double Bstep);
781
782 ESCRIPT_DLL_API
783 Data
784 interpolateFromTable1D(const WrappedArray& table, double Amin, double Astep,
785 double undef);
786
787
788
789
790 ESCRIPT_DLL_API
791 Data
792 interpolateFromTable2DP(boost::python::object table, double Amin, double Astep,
793 Data& B, double Bmin, double Bstep, double undef);
794
795 ESCRIPT_DLL_API
796 Data
797 interpolateFromTable1DP(boost::python::object table, double Amin, double Astep,
798 double undef);
799
800 /**
801 \brief
802 Calculates the gradient of the data at the data points of functionspace.
803 If functionspace is not present the function space of Function(getDomain()) is used.
804 *
805 */
806 ESCRIPT_DLL_API
807 Data
808 gradOn(const FunctionSpace& functionspace) const;
809
810 ESCRIPT_DLL_API
811 Data
812 grad() const;
813
814 /**
815 \brief
816 Calculate the integral over the function space domain as a python tuple.
817 */
818 ESCRIPT_DLL_API
819 boost::python::object
820 integrateToTuple_const() const;
821
822
823 /**
824 \brief
825 Calculate the integral over the function space domain as a python tuple.
826 */
827 ESCRIPT_DLL_API
828 boost::python::object
829 integrateToTuple();
830
831
832
833 /**
834 \brief
835 Returns 1./ Data object
836 *
837 */
838 ESCRIPT_DLL_API
839 Data
840 oneOver() const;
841 /**
842 \brief
843 Return a Data with a 1 for +ive values and a 0 for 0 or -ive values.
844 *
845 */
846 ESCRIPT_DLL_API
847 Data
848 wherePositive() const;
849
850 /**
851 \brief
852 Return a Data with a 1 for -ive values and a 0 for +ive or 0 values.
853 *
854 */
855 ESCRIPT_DLL_API
856 Data
857 whereNegative() const;
858
859 /**
860 \brief
861 Return a Data with a 1 for +ive or 0 values and a 0 for -ive values.
862 *
863 */
864 ESCRIPT_DLL_API
865 Data
866 whereNonNegative() const;
867
868 /**
869 \brief
870 Return a Data with a 1 for -ive or 0 values and a 0 for +ive values.
871 *
872 */
873 ESCRIPT_DLL_API
874 Data
875 whereNonPositive() const;
876
877 /**
878 \brief
879 Return a Data with a 1 for 0 values and a 0 for +ive or -ive values.
880 *
881 */
882 ESCRIPT_DLL_API
883 Data
884 whereZero(double tol=0.0) const;
885
886 /**
887 \brief
888 Return a Data with a 0 for 0 values and a 1 for +ive or -ive values.
889 *
890 */
891 ESCRIPT_DLL_API
892 Data
893 whereNonZero(double tol=0.0) const;
894
895 /**
896 \brief
897 Return the maximum absolute value of this Data object.
898
899 The method is not const because lazy data needs to be expanded before Lsup can be computed.
900 The _const form can be used when the Data object is const, however this will only work for
901 Data which is not Lazy.
902
903 For Data which contain no samples (or tagged Data for which no tags in use have a value)
904 zero is returned.
905 */
906 ESCRIPT_DLL_API
907 double
908 Lsup();
909
910 ESCRIPT_DLL_API
911 double
912 Lsup_const() const;
913
914
915 /**
916 \brief
917 Return the maximum value of this Data object.
918
919 The method is not const because lazy data needs to be expanded before sup can be computed.
920 The _const form can be used when the Data object is const, however this will only work for
921 Data which is not Lazy.
922
923 For Data which contain no samples (or tagged Data for which no tags in use have a value)
924 a large negative value is returned.
925 */
926 ESCRIPT_DLL_API
927 double
928 sup();
929
930 ESCRIPT_DLL_API
931 double
932 sup_const() const;
933
934
935 /**
936 \brief
937 Return the minimum value of this Data object.
938
939 The method is not const because lazy data needs to be expanded before inf can be computed.
940 The _const form can be used when the Data object is const, however this will only work for
941 Data which is not Lazy.
942
943 For Data which contain no samples (or tagged Data for which no tags in use have a value)
944 a large positive value is returned.
945 */
946 ESCRIPT_DLL_API
947 double
948 inf();
949
950 ESCRIPT_DLL_API
951 double
952 inf_const() const;
953
954
955
956 /**
957 \brief
958 Return the absolute value of each data point of this Data object.
959 *
960 */
961 ESCRIPT_DLL_API
962 Data
963 abs() const;
964
965 /**
966 \brief
967 Return the maximum value of each data point of this Data object.
968 *
969 */
970 ESCRIPT_DLL_API
971 Data
972 maxval() const;
973
974 /**
975 \brief
976 Return the minimum value of each data point of this Data object.
977 *
978 */
979 ESCRIPT_DLL_API
980 Data
981 minval() const;
982
983 /**
984 \brief
985 Return the (sample number, data-point number) of the data point with
986 the minimum component value in this Data object.
987 \note If you are working in python, please consider using Locator
988 instead of manually manipulating process and point IDs.
989 */
990 ESCRIPT_DLL_API
991 const boost::python::tuple
992 minGlobalDataPoint() const;
993
994 /**
995 \brief
996 Return the (sample number, data-point number) of the data point with
997 the minimum component value in this Data object.
998 \note If you are working in python, please consider using Locator
999 instead of manually manipulating process and point IDs.
1000 */
1001 ESCRIPT_DLL_API
1002 const boost::python::tuple
1003 maxGlobalDataPoint() const;
1004
1005
1006
1007 /**
1008 \brief
1009 Return the sign of each data point of this Data object.
1010 -1 for negative values, zero for zero values, 1 for positive values.
1011 *
1012 */
1013 ESCRIPT_DLL_API
1014 Data
1015 sign() const;
1016
1017 /**
1018 \brief
1019 Return the symmetric part of a matrix which is half the matrix plus its transpose.
1020 *
1021 */
1022 ESCRIPT_DLL_API
1023 Data
1024 symmetric() const;
1025
1026 /**
1027 \brief
1028 Return the nonsymmetric part of a matrix which is half the matrix minus its transpose.
1029 *
1030 */
1031 ESCRIPT_DLL_API
1032 Data
1033 nonsymmetric() const;
1034
1035 /**
1036 \brief
1037 Return the trace of a matrix
1038 *
1039 */
1040 ESCRIPT_DLL_API
1041 Data
1042 trace(int axis_offset) const;
1043
1044 /**
1045 \brief
1046 Transpose each data point of this Data object around the given axis.
1047 *
1048 */
1049 ESCRIPT_DLL_API
1050 Data
1051 transpose(int axis_offset) const;
1052
1053 /**
1054 \brief
1055 Return the eigenvalues of the symmetric part at each data point of this Data object in increasing values.
1056 Currently this function is restricted to rank 2, square shape, and dimension 3.
1057 *
1058 */
1059 ESCRIPT_DLL_API
1060 Data
1061 eigenvalues() const;
1062
1063 /**
1064 \brief
1065 Return the eigenvalues and corresponding eigenvcetors of the symmetric part at each data point of this Data object.
1066 the eigenvalues are ordered in increasing size where eigenvalues with relative difference less than
1067 tol are treated as equal. The eigenvectors are orthogonal, normalized and the sclaed such that the
1068 first non-zero entry is positive.
1069 Currently this function is restricted to rank 2, square shape, and dimension 3
1070 *
1071 */
1072 ESCRIPT_DLL_API
1073 const boost::python::tuple
1074 eigenvalues_and_eigenvectors(const double tol=1.e-12) const;
1075
1076 /**
1077 \brief
1078 swaps the components axis0 and axis1
1079 *
1080 */
1081 ESCRIPT_DLL_API
1082 Data
1083 swapaxes(const int axis0, const int axis1) const;
1084
1085 /**
1086 \brief
1087 Return the error function erf of each data point of this Data object.
1088 *
1089 */
1090 ESCRIPT_DLL_API
1091 Data
1092 erf() const;
1093
1094 /**
1095 \brief
1096 Return the sin of each data point of this Data object.
1097 *
1098 */
1099 ESCRIPT_DLL_API
1100 Data
1101 sin() const;
1102
1103 /**
1104 \brief
1105 Return the cos of each data point of this Data object.
1106 *
1107 */
1108 ESCRIPT_DLL_API
1109 Data
1110 cos() const;
1111
1112 /**
1113 \brief
1114 Return the tan of each data point of this Data object.
1115 *
1116 */
1117 ESCRIPT_DLL_API
1118 Data
1119 tan() const;
1120
1121 /**
1122 \brief
1123 Return the asin of each data point of this Data object.
1124 *
1125 */
1126 ESCRIPT_DLL_API
1127 Data
1128 asin() const;
1129
1130 /**
1131 \brief
1132 Return the acos of each data point of this Data object.
1133 *
1134 */
1135 ESCRIPT_DLL_API
1136 Data
1137 acos() const;
1138
1139 /**
1140 \brief
1141 Return the atan of each data point of this Data object.
1142 *
1143 */
1144 ESCRIPT_DLL_API
1145 Data
1146 atan() const;
1147
1148 /**
1149 \brief
1150 Return the sinh of each data point of this Data object.
1151 *
1152 */
1153 ESCRIPT_DLL_API
1154 Data
1155 sinh() const;
1156
1157 /**
1158 \brief
1159 Return the cosh of each data point of this Data object.
1160 *
1161 */
1162 ESCRIPT_DLL_API
1163 Data
1164 cosh() const;
1165
1166 /**
1167 \brief
1168 Return the tanh of each data point of this Data object.
1169 *
1170 */
1171 ESCRIPT_DLL_API
1172 Data
1173 tanh() const;
1174
1175 /**
1176 \brief
1177 Return the asinh of each data point of this Data object.
1178 *
1179 */
1180 ESCRIPT_DLL_API
1181 Data
1182 asinh() const;
1183
1184 /**
1185 \brief
1186 Return the acosh of each data point of this Data object.
1187 *
1188 */
1189 ESCRIPT_DLL_API
1190 Data
1191 acosh() const;
1192
1193 /**
1194 \brief
1195 Return the atanh of each data point of this Data object.
1196 *
1197 */
1198 ESCRIPT_DLL_API
1199 Data
1200 atanh() const;
1201
1202 /**
1203 \brief
1204 Return the log to base 10 of each data point of this Data object.
1205 *
1206 */
1207 ESCRIPT_DLL_API
1208 Data
1209 log10() const;
1210
1211 /**
1212 \brief
1213 Return the natural log of each data point of this Data object.
1214 *
1215 */
1216 ESCRIPT_DLL_API
1217 Data
1218 log() const;
1219
1220 /**
1221 \brief
1222 Return the exponential function of each data point of this Data object.
1223 *
1224 */
1225 ESCRIPT_DLL_API
1226 Data
1227 exp() const;
1228
1229 /**
1230 \brief
1231 Return the square root of each data point of this Data object.
1232 *
1233 */
1234 ESCRIPT_DLL_API
1235 Data
1236 sqrt() const;
1237
1238 /**
1239 \brief
1240 Return the negation of each data point of this Data object.
1241 *
1242 */
1243 ESCRIPT_DLL_API
1244 Data
1245 neg() const;
1246
1247 /**
1248 \brief
1249 Return the identity of each data point of this Data object.
1250 Simply returns this object unmodified.
1251 *
1252 */
1253 ESCRIPT_DLL_API
1254 Data
1255 pos() const;
1256
1257 /**
1258 \brief
1259 Return the given power of each data point of this Data object.
1260
1261 \param right Input - the power to raise the object to.
1262 *
1263 */
1264 ESCRIPT_DLL_API
1265 Data
1266 powD(const Data& right) const;
1267
1268 /**
1269 \brief
1270 Return the given power of each data point of this boost python object.
1271
1272 \param right Input - the power to raise the object to.
1273 *
1274 */
1275 ESCRIPT_DLL_API
1276 Data
1277 powO(const boost::python::object& right) const;
1278
1279 /**
1280 \brief
1281 Return the given power of each data point of this boost python object.
1282
1283 \param left Input - the bases
1284 *
1285 */
1286
1287 ESCRIPT_DLL_API
1288 Data
1289 rpowO(const boost::python::object& left) const;
1290
1291 /**
1292 \brief
1293 writes the object to a file in the DX file format
1294 */
1295 ESCRIPT_DLL_API
1296 void
1297 saveDX(std::string fileName) const;
1298
1299 /**
1300 \brief
1301 writes the object to a file in the VTK file format
1302 */
1303 ESCRIPT_DLL_API
1304 void
1305 saveVTK(std::string fileName) const;
1306
1307
1308
1309 /**
1310 \brief
1311 Overloaded operator +=
1312 \param right - Input - The right hand side.
1313 *
1314 */
1315 ESCRIPT_DLL_API
1316 Data& operator+=(const Data& right);
1317 ESCRIPT_DLL_API
1318 Data& operator+=(const boost::python::object& right);
1319
1320 ESCRIPT_DLL_API
1321 Data& operator=(const Data& other);
1322
1323 /**
1324 \brief
1325 Overloaded operator -=
1326 \param right - Input - The right hand side.
1327 *
1328 */
1329 ESCRIPT_DLL_API
1330 Data& operator-=(const Data& right);
1331 ESCRIPT_DLL_API
1332 Data& operator-=(const boost::python::object& right);
1333
1334 /**
1335 \brief
1336 Overloaded operator *=
1337 \param right - Input - The right hand side.
1338 *
1339 */
1340 ESCRIPT_DLL_API
1341 Data& operator*=(const Data& right);
1342 ESCRIPT_DLL_API
1343 Data& operator*=(const boost::python::object& right);
1344
1345 /**
1346 \brief
1347 Overloaded operator /=
1348 \param right - Input - The right hand side.
1349 *
1350 */
1351 ESCRIPT_DLL_API
1352 Data& operator/=(const Data& right);
1353 ESCRIPT_DLL_API
1354 Data& operator/=(const boost::python::object& right);
1355
1356 /**
1357 \brief
1358 Returns true if this can be interpolated to functionspace.
1359 */
1360 ESCRIPT_DLL_API
1361 bool
1362 probeInterpolation(const FunctionSpace& functionspace) const;
1363
1364 /**
1365 Data object slicing methods.
1366 */
1367
1368 /**
1369 \brief
1370 Returns a slice from this Data object.
1371
1372 /description
1373 Implements the [] get operator in python.
1374 Calls getSlice.
1375
1376 \param key - Input - python slice tuple specifying
1377 slice to return.
1378 */
1379 ESCRIPT_DLL_API
1380 Data
1381 getItem(const boost::python::object& key) const;
1382
1383 /**
1384 \brief
1385 Copies slice from value into this Data object.
1386
1387 Implements the [] set operator in python.
1388 Calls setSlice.
1389
1390 \param key - Input - python slice tuple specifying
1391 slice to copy from value.
1392 \param value - Input - Data object to copy from.
1393 */
1394 ESCRIPT_DLL_API
1395 void
1396 setItemD(const boost::python::object& key,
1397 const Data& value);
1398
1399 ESCRIPT_DLL_API
1400 void
1401 setItemO(const boost::python::object& key,
1402 const boost::python::object& value);
1403
1404 // These following public methods should be treated as private.
1405
1406 /**
1407 \brief
1408 Perform the given unary operation on every element of every data point in
1409 this Data object.
1410 */
1411 template <class UnaryFunction>
1412 ESCRIPT_DLL_API
1413 inline
1414 void
1415 unaryOp2(UnaryFunction operation);
1416
1417 /**
1418 \brief
1419 Return a Data object containing the specified slice of
1420 this Data object.
1421 \param region - Input - Region to copy.
1422 *
1423 */
1424 ESCRIPT_DLL_API
1425 Data
1426 getSlice(const DataTypes::RegionType& region) const;
1427
1428 /**
1429 \brief
1430 Copy the specified slice from the given value into this
1431 Data object.
1432 \param value - Input - Data to copy from.
1433 \param region - Input - Region to copy.
1434 *
1435 */
1436 ESCRIPT_DLL_API
1437 void
1438 setSlice(const Data& value,
1439 const DataTypes::RegionType& region);
1440
1441 /**
1442 \brief
1443 print the data values to stdout. Used for debugging
1444 */
1445 ESCRIPT_DLL_API
1446 void
1447 print(void);
1448
1449 /**
1450 \brief
1451 return the MPI rank number of the local data
1452 MPI_COMM_WORLD is assumed and the result of MPI_Comm_size()
1453 is returned
1454 */
1455 ESCRIPT_DLL_API
1456 int
1457 get_MPIRank(void) const;
1458
1459 /**
1460 \brief
1461 return the MPI rank number of the local data
1462 MPI_COMM_WORLD is assumed and the result of MPI_Comm_rank()
1463 is returned
1464 */
1465 ESCRIPT_DLL_API
1466 int
1467 get_MPISize(void) const;
1468
1469 /**
1470 \brief
1471 return the MPI rank number of the local data
1472 MPI_COMM_WORLD is assumed and returned.
1473 */
1474 ESCRIPT_DLL_API
1475 MPI_Comm
1476 get_MPIComm(void) const;
1477
1478 /**
1479 \brief
1480 return the object produced by the factory, which is a DataConstant or DataExpanded
1481 TODO Ownership of this object should be explained in doco.
1482 */
1483 ESCRIPT_DLL_API
1484 DataAbstract*
1485 borrowData(void) const;
1486
1487 ESCRIPT_DLL_API
1488 DataAbstract_ptr
1489 borrowDataPtr(void) const;
1490
1491 ESCRIPT_DLL_API
1492 DataReady_ptr
1493 borrowReadyPtr(void) const;
1494
1495
1496
1497 /**
1498 \brief
1499 Return a pointer to the beginning of the datapoint at the specified offset.
1500 TODO Eventually these should be inlined.
1501 \param i - position(offset) in the underlying datastructure
1502 */
1503
1504 ESCRIPT_DLL_API
1505 DataTypes::ValueType::const_reference
1506 getDataAtOffsetRO(DataTypes::ValueType::size_type i);
1507
1508
1509 ESCRIPT_DLL_API
1510 DataTypes::ValueType::reference
1511 getDataAtOffsetRW(DataTypes::ValueType::size_type i);
1512
1513
1514
1515 /**
1516 \brief Create a buffer for use by getSample
1517 Allocates a DataVector large enough for DataLazy::resolveSample to operate on for the current Data.
1518 Do not use this buffer for other Data instances (unless you are sure they will be the same size).
1519
1520 In multi-threaded sections, this needs to be called on each thread.
1521
1522 \return A BufferGroup* if Data is lazy, NULL otherwise.
1523 \warning This pointer must be deallocated using freeSampleBuffer to avoid cross library memory issues.
1524 */
1525 ESCRIPT_DLL_API
1526 BufferGroup*
1527 allocSampleBuffer() const;
1528
1529 /**
1530 \brief Free a buffer allocated with allocSampleBuffer.
1531 \param buffer Input - pointer to the buffer to deallocate.
1532 */
1533 ESCRIPT_DLL_API void freeSampleBuffer(BufferGroup* buffer);
1534
1535 protected:
1536
1537 private:
1538
1539 double
1540 LsupWorker() const;
1541
1542 double
1543 supWorker() const;
1544
1545 double
1546 infWorker() const;
1547
1548 boost::python::object
1549 integrateWorker() const;
1550
1551 void
1552 calc_minGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1553
1554 void
1555 calc_maxGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1556
1557
1558 /**
1559 \brief
1560 Check *this and the right operand are compatible. Throws
1561 an exception if they aren't.
1562 \param right - Input - The right hand side.
1563 */
1564 inline
1565 void
1566 operandCheck(const Data& right) const
1567 {
1568 return m_data->operandCheck(*(right.m_data.get()));
1569 }
1570
1571 /**
1572 \brief
1573 Perform the specified reduction algorithm on every element of every data point in
1574 this Data object according to the given function and return the single value result.
1575 */
1576 template <class BinaryFunction>
1577 inline
1578 double
1579 algorithm(BinaryFunction operation,
1580 double initial_value) const;
1581
1582 /**
1583 \brief
1584 Reduce each data-point in this Data object using the given operation. Return a Data
1585 object with the same number of data-points, but with each data-point containing only
1586 one value - the result of the reduction operation on the corresponding data-point in
1587 this Data object
1588 */
1589 template <class BinaryFunction>
1590 inline
1591 Data
1592 dp_algorithm(BinaryFunction operation,
1593 double initial_value) const;
1594
1595 /**
1596 \brief
1597 Perform the given binary operation on all of the data's elements.
1598 The underlying type of the right hand side (right) determines the final
1599 type of *this after the operation. For example if the right hand side
1600 is expanded *this will be expanded if necessary.
1601 RHS is a Data object.
1602 */
1603 template <class BinaryFunction>
1604 inline
1605 void
1606 binaryOp(const Data& right,
1607 BinaryFunction operation);
1608
1609 /**
1610 \brief
1611 Convert the data type of the RHS to match this.
1612 \param right - Input - data type to match.
1613 */
1614 void
1615 typeMatchLeft(Data& right) const;
1616
1617 /**
1618 \brief
1619 Convert the data type of this to match the RHS.
1620 \param right - Input - data type to match.
1621 */
1622 void
1623 typeMatchRight(const Data& right);
1624
1625 /**
1626 \brief
1627 Construct a Data object of the appropriate type.
1628 */
1629
1630 void
1631 initialise(const DataTypes::ValueType& value,
1632 const DataTypes::ShapeType& shape,
1633 const FunctionSpace& what,
1634 bool expanded);
1635
1636 void
1637 initialise(const WrappedArray& value,
1638 const FunctionSpace& what,
1639 bool expanded);
1640
1641 //
1642 // flag to protect the data object against any update
1643 bool m_protected;
1644 mutable bool m_shared;
1645 bool m_lazy;
1646
1647 //
1648 // pointer to the actual data object
1649 // boost::shared_ptr<DataAbstract> m_data;
1650 DataAbstract_ptr m_data;
1651
1652 // If possible please use getReadyPtr instead.
1653 // But see warning below.
1654 const DataReady*
1655 getReady() const;
1656
1657 DataReady*
1658 getReady();
1659
1660
1661 // Be wary of using this for local operations since it (temporarily) increases reference count.
1662 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1663 // getReady() instead
1664 DataReady_ptr
1665 getReadyPtr();
1666
1667 const_DataReady_ptr
1668 getReadyPtr() const;
1669
1670
1671 /**
1672 \brief Update the Data's shared flag
1673 This indicates that the DataAbstract used by this object is now shared (or no longer shared).
1674 For internal use only.
1675 */
1676 void updateShareStatus(bool nowshared) const
1677 {
1678 m_shared=nowshared; // m_shared is mutable
1679 }
1680
1681 // In the isShared() method below:
1682 // A problem would occur if m_data (the address pointed to) were being modified
1683 // while the call m_data->is_shared is being executed.
1684 //
1685 // Q: So why do I think this code can be thread safe/correct?
1686 // A: We need to make some assumptions.
1687 // 1. We assume it is acceptable to return true under some conditions when we aren't shared.
1688 // 2. We assume that no constructions or assignments which will share previously unshared
1689 // will occur while this call is executing. This is consistent with the way Data:: and C are written.
1690 //
1691 // This means that the only transition we need to consider, is when a previously shared object is
1692 // not shared anymore. ie. the other objects have been destroyed or a deep copy has been made.
1693 // In those cases the m_shared flag changes to false after m_data has completed changing.
1694 // For any threads executing before the flag switches they will assume the object is still shared.
1695 bool isShared() const
1696 {
1697 return m_shared;
1698 /* if (m_shared) return true;
1699 if (m_data->isShared())
1700 {
1701 updateShareStatus(true);
1702 return true;
1703 }
1704 return false;*/
1705 }
1706
1707 void forceResolve()
1708 {
1709 if (isLazy())
1710 {
1711 #ifdef _OPENMP
1712 if (omp_in_parallel())
1713 { // Yes this is throwing an exception out of an omp thread which is forbidden.
1714 throw DataException("Please do not call forceResolve() in a parallel region.");
1715 }
1716 #endif
1717 resolve();
1718 }
1719 }
1720
1721 /**
1722 \brief if another object is sharing out member data make a copy to work with instead.
1723 This code should only be called from single threaded sections of code.
1724 */
1725 void exclusiveWrite()
1726 {
1727 #ifdef _OPENMP
1728 if (omp_in_parallel())
1729 {
1730 // *((int*)0)=17;
1731 throw DataException("Programming error. Please do not run exclusiveWrite() in multi-threaded sections.");
1732 }
1733 #endif
1734 forceResolve();
1735 if (isShared())
1736 {
1737 DataAbstract* t=m_data->deepCopy();
1738 set_m_data(DataAbstract_ptr(t));
1739 }
1740 }
1741
1742 /**
1743 \brief checks if caller can have exclusive write to the object
1744 */
1745 void checkExclusiveWrite()
1746 {
1747 if (isLazy() || isShared())
1748 {
1749 throw DataException("Programming error. ExclusiveWrite required - please call requireWrite()");
1750 }
1751 }
1752
1753 /**
1754 \brief Modify the data abstract hosted by this Data object
1755 For internal use only.
1756 Passing a pointer to null is permitted (do this in the destructor)
1757 \warning Only to be called in single threaded code or inside a single/critical section. This method needs to be atomic.
1758 */
1759 void set_m_data(DataAbstract_ptr p);
1760
1761 friend class DataAbstract; // To allow calls to updateShareStatus
1762
1763 };
1764
1765 } // end namespace escript
1766
1767
1768 // No, this is not supposed to be at the top of the file
1769 // DataAbstact needs to be declared first, then DataReady needs to be fully declared
1770 // so that I can dynamic cast between them below.
1771 #include "DataReady.h"
1772 #include "DataLazy.h"
1773
1774 namespace escript
1775 {
1776
1777 inline
1778 const DataReady*
1779 Data::getReady() const
1780 {
1781 const DataReady* dr=dynamic_cast<const DataReady*>(m_data.get());
1782 EsysAssert((dr!=0), "Error - casting to DataReady.");
1783 return dr;
1784 }
1785
1786 inline
1787 DataReady*
1788 Data::getReady()
1789 {
1790 DataReady* dr=dynamic_cast<DataReady*>(m_data.get());
1791 EsysAssert((dr!=0), "Error - casting to DataReady.");
1792 return dr;
1793 }
1794
1795 // Be wary of using this for local operations since it (temporarily) increases reference count.
1796 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1797 // getReady() instead
1798 inline
1799 DataReady_ptr
1800 Data::getReadyPtr()
1801 {
1802 DataReady_ptr dr=boost::dynamic_pointer_cast<DataReady>(m_data);
1803 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1804 return dr;
1805 }
1806
1807
1808 inline
1809 const_DataReady_ptr
1810 Data::getReadyPtr() const
1811 {
1812 const_DataReady_ptr dr=boost::dynamic_pointer_cast<const DataReady>(m_data);
1813 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1814 return dr;
1815 }
1816
1817 inline
1818 DataAbstract::ValueType::value_type*
1819 Data::getSampleDataRW(DataAbstract::ValueType::size_type sampleNo)
1820 {
1821 if (isLazy())
1822 {
1823 throw DataException("Error, attempt to acquire RW access to lazy data. Please call requireWrite() first.");
1824 }
1825 return getReady()->getSampleDataRW(sampleNo);
1826 }
1827
1828 inline
1829 const DataAbstract::ValueType::value_type*
1830 Data::getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, BufferGroup* bufferg)
1831 {
1832 DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
1833 if (l!=0)
1834 {
1835 size_t offset=0;
1836 if (bufferg==NULL)
1837 {
1838 throw DataException("Error, attempt to getSampleDataRO for lazy Data with buffer==NULL");
1839 }
1840 const DataTypes::ValueType* res=l->resolveSample(*bufferg,sampleNo,offset);
1841 return &((*res)[offset]);
1842 }
1843 return getReady()->getSampleDataRO(sampleNo);
1844 }
1845
1846
1847
1848 /**
1849 Modify a filename for MPI parallel output to multiple files
1850 */
1851 char *Escript_MPI_appendRankToFileName(const char *, int, int);
1852
1853 /**
1854 Binary Data object operators.
1855 */
1856 inline double rpow(double x,double y)
1857 {
1858 return pow(y,x);
1859 }
1860
1861 /**
1862 \brief
1863 Operator+
1864 Takes two Data objects.
1865 */
1866 ESCRIPT_DLL_API Data operator+(const Data& left, const Data& right);
1867
1868 /**
1869 \brief
1870 Operator-
1871 Takes two Data objects.
1872 */
1873 ESCRIPT_DLL_API Data operator-(const Data& left, const Data& right);
1874
1875 /**
1876 \brief
1877 Operator*
1878 Takes two Data objects.
1879 */
1880 ESCRIPT_DLL_API Data operator*(const Data& left, const Data& right);
1881
1882 /**
1883 \brief
1884 Operator/
1885 Takes two Data objects.
1886 */
1887 ESCRIPT_DLL_API Data operator/(const Data& left, const Data& right);
1888
1889 /**
1890 \brief
1891 Operator+
1892 Takes LHS Data object and RHS python::object.
1893 python::object must be convertable to Data type.
1894 */
1895 ESCRIPT_DLL_API Data operator+(const Data& left, const boost::python::object& right);
1896
1897 /**
1898 \brief
1899 Operator-
1900 Takes LHS Data object and RHS python::object.
1901 python::object must be convertable to Data type.
1902 */
1903 ESCRIPT_DLL_API Data operator-(const Data& left, const boost::python::object& right);
1904
1905 /**
1906 \brief
1907 Operator*
1908 Takes LHS Data object and RHS python::object.
1909 python::object must be convertable to Data type.
1910 */
1911 ESCRIPT_DLL_API Data operator*(const Data& left, const boost::python::object& right);
1912
1913 /**
1914 \brief
1915 Operator/
1916 Takes LHS Data object and RHS python::object.
1917 python::object must be convertable to Data type.
1918 */
1919 ESCRIPT_DLL_API Data operator/(const Data& left, const boost::python::object& right);
1920
1921 /**
1922 \brief
1923 Operator+
1924 Takes LHS python::object and RHS Data object.
1925 python::object must be convertable to Data type.
1926 */
1927 ESCRIPT_DLL_API Data operator+(const boost::python::object& left, const Data& right);
1928
1929 /**
1930 \brief
1931 Operator-
1932 Takes LHS python::object and RHS Data object.
1933 python::object must be convertable to Data type.
1934 */
1935 ESCRIPT_DLL_API Data operator-(const boost::python::object& left, const Data& right);
1936
1937 /**
1938 \brief
1939 Operator*
1940 Takes LHS python::object and RHS Data object.
1941 python::object must be convertable to Data type.
1942 */
1943 ESCRIPT_DLL_API Data operator*(const boost::python::object& left, const Data& right);
1944
1945 /**
1946 \brief
1947 Operator/
1948 Takes LHS python::object and RHS Data object.
1949 python::object must be convertable to Data type.
1950 */
1951 ESCRIPT_DLL_API Data operator/(const boost::python::object& left, const Data& right);
1952
1953
1954
1955 /**
1956 \brief
1957 Output operator
1958 */
1959 ESCRIPT_DLL_API std::ostream& operator<<(std::ostream& o, const Data& data);
1960
1961 /**
1962 \brief
1963 Compute a tensor product of two Data objects
1964 \param arg_0 - Input - Data object
1965 \param arg_1 - Input - Data object
1966 \param axis_offset - Input - axis offset
1967 \param transpose - Input - 0: transpose neither, 1: transpose arg0, 2: transpose arg1
1968 */
1969 ESCRIPT_DLL_API
1970 Data
1971 C_GeneralTensorProduct(Data& arg_0,
1972 Data& arg_1,
1973 int axis_offset=0,
1974 int transpose=0);
1975
1976 /**
1977 \brief
1978 Perform the given binary operation with this and right as operands.
1979 Right is a Data object.
1980 */
1981 template <class BinaryFunction>
1982 inline
1983 void
1984 Data::binaryOp(const Data& right,
1985 BinaryFunction operation)
1986 {
1987 //
1988 // if this has a rank of zero promote it to the rank of the RHS
1989 if (getDataPointRank()==0 && right.getDataPointRank()!=0) {
1990 throw DataException("Error - attempt to update rank zero object with object with rank bigger than zero.");
1991 }
1992
1993 if (isLazy() || right.isLazy())
1994 {
1995 throw DataException("Programmer error - attempt to call binaryOp with Lazy Data.");
1996 }
1997 //
1998 // initially make the temporary a shallow copy
1999 Data tempRight(right);
2000
2001 if (getFunctionSpace()!=right.getFunctionSpace()) {
2002 if (right.probeInterpolation(getFunctionSpace())) {
2003 //
2004 // an interpolation is required so create a new Data
2005 tempRight=Data(right,this->getFunctionSpace());
2006 } else if (probeInterpolation(right.getFunctionSpace())) {
2007 //
2008 // interpolate onto the RHS function space
2009 Data tempLeft(*this,right.getFunctionSpace());
2010 // m_data=tempLeft.m_data;
2011 set_m_data(tempLeft.m_data);
2012 }
2013 }
2014 operandCheck(tempRight);
2015 //
2016 // ensure this has the right type for the RHS
2017 typeMatchRight(tempRight);
2018 //
2019 // Need to cast to the concrete types so that the correct binaryOp
2020 // is called.
2021 if (isExpanded()) {
2022 //
2023 // Expanded data will be done in parallel, the right hand side can be
2024 // of any data type
2025 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2026 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2027 escript::binaryOp(*leftC,*(tempRight.getReady()),operation);
2028 } else if (isTagged()) {
2029 //
2030 // Tagged data is operated on serially, the right hand side can be
2031 // either DataConstant or DataTagged
2032 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2033 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2034 if (right.isTagged()) {
2035 DataTagged* rightC=dynamic_cast<DataTagged*>(tempRight.m_data.get());
2036 EsysAssert((rightC!=0), "Programming error - casting to DataTagged.");
2037 escript::binaryOp(*leftC,*rightC,operation);
2038 } else {
2039 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2040 EsysAssert((rightC!=0), "Programming error - casting to DataConstant.");
2041 escript::binaryOp(*leftC,*rightC,operation);
2042 }
2043 } else if (isConstant()) {
2044 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2045 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2046 EsysAssert((leftC!=0 && rightC!=0), "Programming error - casting to DataConstant.");
2047 escript::binaryOp(*leftC,*rightC,operation);
2048 }
2049 }
2050
2051 /**
2052 \brief
2053 Perform the given Data object reduction algorithm on this and return the result.
2054 Given operation combines each element of each data point, thus argument
2055 object (*this) is a rank n Data object, and returned object is a scalar.
2056 Calls escript::algorithm.
2057 */
2058 template <class BinaryFunction>
2059 inline
2060 double
2061 Data::algorithm(BinaryFunction operation, double initial_value) const
2062 {
2063 if (isExpanded()) {
2064 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2065 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2066 return escript::algorithm(*leftC,operation,initial_value);
2067 } else if (isTagged()) {
2068 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2069 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2070 return escript::algorithm(*leftC,operation,initial_value);
2071 } else if (isConstant()) {
2072 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2073 EsysAssert((leftC!=0), "Programming error - casting to DataConstant.");
2074 return escript::algorithm(*leftC,operation,initial_value);
2075 } else if (isEmpty()) {
2076 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2077 } else if (isLazy()) {
2078 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2079 } else {
2080 throw DataException("Error - Data encapsulates an unknown type.");
2081 }
2082 }
2083
2084 /**
2085 \brief
2086 Perform the given data point reduction algorithm on data and return the result.
2087 Given operation combines each element within each data point into a scalar,
2088 thus argument object is a rank n Data object, and returned object is a
2089 rank 0 Data object.
2090 Calls escript::dp_algorithm.
2091 */
2092 template <class BinaryFunction>
2093 inline
2094 Data
2095 Data::dp_algorithm(BinaryFunction operation, double initial_value) const
2096 {
2097 if (isEmpty()) {
2098 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2099 }
2100 else if (isExpanded()) {
2101 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2102 DataExpanded* dataE=dynamic_cast<DataExpanded*>(m_data.get());
2103 DataExpanded* resultE=dynamic_cast<DataExpanded*>(result.m_data.get());
2104 EsysAssert((dataE!=0), "Programming error - casting data to DataExpanded.");
2105 EsysAssert((resultE!=0), "Programming error - casting result to DataExpanded.");
2106 escript::dp_algorithm(*dataE,*resultE,operation,initial_value);
2107 return result;
2108 }
2109 else if (isTagged()) {
2110 DataTagged* dataT=dynamic_cast<DataTagged*>(m_data.get());
2111 EsysAssert((dataT!=0), "Programming error - casting data to DataTagged.");
2112 DataTypes::ValueType defval(1);
2113 defval[0]=0;
2114 DataTagged* resultT=new DataTagged(getFunctionSpace(), DataTypes::scalarShape, defval, dataT);
2115 escript::dp_algorithm(*dataT,*resultT,operation,initial_value);
2116 return Data(resultT); // note: the Data object now owns the resultT pointer
2117 }
2118 else if (isConstant()) {
2119 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2120 DataConstant* dataC=dynamic_cast<DataConstant*>(m_data.get());
2121 DataConstant* resultC=dynamic_cast<DataConstant*>(result.m_data.get());
2122 EsysAssert((dataC!=0), "Programming error - casting data to DataConstant.");
2123 EsysAssert((resultC!=0), "Programming error - casting result to DataConstant.");
2124 escript::dp_algorithm(*dataC,*resultC,operation,initial_value);
2125 return result;
2126 } else if (isLazy()) {
2127 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2128 } else {
2129 throw DataException("Error - Data encapsulates an unknown type.");
2130 }
2131 }
2132
2133 /**
2134 \brief
2135 Compute a tensor operation with two Data objects
2136 \param arg_0 - Input - Data object
2137 \param arg_1 - Input - Data object
2138 \param operation - Input - Binary op functor
2139 */
2140 template <typename BinaryFunction>
2141 inline
2142 Data
2143 C_TensorBinaryOperation(Data const &arg_0,
2144 Data const &arg_1,
2145 BinaryFunction operation)
2146 {
2147 if (arg_0.isEmpty() || arg_1.isEmpty())
2148 {
2149 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2150 }
2151 if (arg_0.isLazy() || arg_1.isLazy())
2152 {
2153 throw DataException("Error - Operations not permitted on lazy data.");
2154 }
2155 // Interpolate if necessary and find an appropriate function space
2156 Data arg_0_Z, arg_1_Z;
2157 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2158 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2159 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2160 arg_1_Z = Data(arg_1);
2161 }
2162 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2163 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2164 arg_0_Z =Data(arg_0);
2165 }
2166 else {
2167 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible function spaces.");
2168 }
2169 } else {
2170 arg_0_Z = Data(arg_0);
2171 arg_1_Z = Data(arg_1);
2172 }
2173 // Get rank and shape of inputs
2174 int rank0 = arg_0_Z.getDataPointRank();
2175 int rank1 = arg_1_Z.getDataPointRank();
2176 DataTypes::ShapeType shape0 = arg_0_Z.getDataPointShape();
2177 DataTypes::ShapeType shape1 = arg_1_Z.getDataPointShape();
2178 int size0 = arg_0_Z.getDataPointSize();
2179 int size1 = arg_1_Z.getDataPointSize();
2180 // Declare output Data object
2181 Data res;
2182
2183 if (shape0 == shape1) {
2184 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2185 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2186 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2187 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2188 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2189
2190 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2191 }
2192 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2193
2194 // Prepare the DataConstant input
2195 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2196
2197 // Borrow DataTagged input from Data object
2198 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2199
2200 // Prepare a DataTagged output 2
2201 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2202 res.tag();
2203 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2204
2205 // Prepare offset into DataConstant
2206 int offset_0 = tmp_0->getPointOffset(0,0);
2207 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2208
2209 // Get the pointers to the actual data
2210 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2211 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2212
2213 // Compute a result for the default
2214 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2215 // Compute a result for each tag
2216 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2217 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2218 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2219 tmp_2->addTag(i->first);
2220 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2221 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2222
2223 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2224 }
2225
2226 }
2227 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2228 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2229 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2230 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2231 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2232
2233 int sampleNo_1,dataPointNo_1;
2234 int numSamples_1 = arg_1_Z.getNumSamples();
2235 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2236 int offset_0 = tmp_0->getPointOffset(0,0);
2237 res.requireWrite();
2238 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2239 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2240 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2241 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2242 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2243 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2244 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2245 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2246 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2247 }
2248 }
2249
2250 }
2251 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2252 // Borrow DataTagged input from Data object
2253 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2254
2255 // Prepare the DataConstant input
2256 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2257
2258 // Prepare a DataTagged output 2
2259 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2260 res.tag();
2261 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2262
2263 // Prepare offset into DataConstant
2264 int offset_1 = tmp_1->getPointOffset(0,0);
2265
2266 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2267 // Get the pointers to the actual data
2268 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2269 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2270 // Compute a result for the default
2271 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2272 // Compute a result for each tag
2273 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2274 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2275 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2276 tmp_2->addTag(i->first);
2277 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2278 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2279 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2280 }
2281
2282 }
2283 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2284 // Borrow DataTagged input from Data object
2285 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2286
2287 // Borrow DataTagged input from Data object
2288 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2289
2290 // Prepare a DataTagged output 2
2291 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2292 res.tag(); // DataTagged output
2293 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2294
2295 // Get the pointers to the actual data
2296 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2297 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2298 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2299
2300 // Compute a result for the default
2301 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2302 // Merge the tags
2303 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2304 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2305 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2306 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2307 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2308 }
2309 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2310 tmp_2->addTag(i->first);
2311 }
2312 // Compute a result for each tag
2313 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2314 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2315
2316 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2317 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2318 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2319
2320 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2321 }
2322
2323 }
2324 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2325 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2326 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2327 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2328 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2329 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2330
2331 int sampleNo_0,dataPointNo_0;
2332 int numSamples_0 = arg_0_Z.getNumSamples();
2333 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2334 res.requireWrite();
2335 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2336 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2337 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2338 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2339 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2340 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2341 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2342 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2343 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2344 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2345 }
2346 }
2347
2348 }
2349 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2350 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2351 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2352 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2353 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2354
2355 int sampleNo_0,dataPointNo_0;
2356 int numSamples_0 = arg_0_Z.getNumSamples();
2357 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2358 int offset_1 = tmp_1->getPointOffset(0,0);
2359 res.requireWrite();
2360 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2361 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2362 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2363 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2364 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2365
2366 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2367 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2368 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2369
2370
2371 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2372 }
2373 }
2374
2375 }
2376 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2377 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2378 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2379 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2380 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2381 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2382
2383 int sampleNo_0,dataPointNo_0;
2384 int numSamples_0 = arg_0_Z.getNumSamples();
2385 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2386 res.requireWrite();
2387 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2388 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2389 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2390 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2391 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2392 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2393 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2394 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2395 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2396 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2397 }
2398 }
2399
2400 }
2401 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2402 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2403 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2404 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2405 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2406 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2407
2408 int sampleNo_0,dataPointNo_0;
2409 int numSamples_0 = arg_0_Z.getNumSamples();
2410 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2411 res.requireWrite();
2412 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2413 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2414 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2415 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2416 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2417 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2418 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2419 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2420 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2421 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2422 }
2423 }
2424
2425 }
2426 else {
2427 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2428 }
2429
2430 } else if (0 == rank0) {
2431 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2432 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataConstant output
2433 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2434 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2435 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2436 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2437 }
2438 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2439
2440 // Prepare the DataConstant input
2441 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2442
2443 // Borrow DataTagged input from Data object
2444 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2445
2446 // Prepare a DataTagged output 2
2447 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataTagged output
2448 res.tag();
2449 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2450
2451 // Prepare offset into DataConstant
2452 int offset_0 = tmp_0->getPointOffset(0,0);
2453 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2454
2455 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2456 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2457
2458 // Compute a result for the default
2459 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2460 // Compute a result for each tag
2461 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2462 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2463 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2464 tmp_2->addTag(i->first);
2465 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2466 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2467 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2468 }
2469
2470 }
2471 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2472
2473 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2474 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2475 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2476 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2477
2478 int sampleNo_1,dataPointNo_1;
2479 int numSamples_1 = arg_1_Z.getNumSamples();
2480 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2481 int offset_0 = tmp_0->getPointOffset(0,0);
2482 res.requireWrite();
2483 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2484 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2485 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2486 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2487 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2488 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2489 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2490 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2491 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2492
2493 }
2494 }
2495
2496 }
2497 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2498
2499 // Borrow DataTagged input from Data object
2500 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2501
2502 // Prepare the DataConstant input
2503 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2504
2505 // Prepare a DataTagged output 2
2506 res = Data(0.0, shape1, arg_0_Z.getFunctionSpace()); // DataTagged output
2507 res.tag();
2508 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2509
2510 // Prepare offset into DataConstant
2511 int offset_1 = tmp_1->getPointOffset(0,0);
2512 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2513
2514 // Get the pointers to the actual data
2515 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2516 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2517
2518
2519 // Compute a result for the default
2520 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2521 // Compute a result for each tag
2522 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2523 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2524 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2525 tmp_2->addTag(i->first);
2526 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2527 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2528
2529 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2530 }
2531
2532 }
2533 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2534
2535 // Borrow DataTagged input from Data object
2536 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2537
2538 // Borrow DataTagged input from Data object
2539 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2540
2541 // Prepare a DataTagged output 2
2542 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());
2543 res.tag(); // DataTagged output
2544 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2545
2546 // Get the pointers to the actual data
2547 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2548 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2549 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2550
2551 // Compute a result for the default
2552 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2553 // Merge the tags
2554 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2555 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2556 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2557 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2558 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2559 }
2560 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2561 tmp_2->addTag(i->first);
2562 }
2563 // Compute a result for each tag
2564 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2565 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2566 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2567 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2568 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2569
2570 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2571 }
2572
2573 }
2574 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2575
2576 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2577 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2578 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2579 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2580 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2581
2582 int sampleNo_0,dataPointNo_0;
2583 int numSamples_0 = arg_0_Z.getNumSamples();
2584 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2585 res.requireWrite();
2586 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2587 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2588 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2589 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2590 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2591 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2592 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2593 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2594 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2595 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2596 }
2597 }
2598
2599 }
2600 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2601 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2602 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2603 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2604 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2605
2606 int sampleNo_0,dataPointNo_0;
2607 int numSamples_0 = arg_0_Z.getNumSamples();
2608 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2609 int offset_1 = tmp_1->getPointOffset(0,0);
2610 res.requireWrite();
2611 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2612 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2613 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2614 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2615 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2616 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2617 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2618 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2619 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2620 }
2621 }
2622
2623
2624 }
2625 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2626
2627 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2628 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2629 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2630 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2631 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2632
2633 int sampleNo_0,dataPointNo_0;
2634 int numSamples_0 = arg_0_Z.getNumSamples();
2635 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2636 res.requireWrite();
2637 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2638 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2639 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2640 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2641 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2642 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2643 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2644 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2645 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2646 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2647 }
2648 }
2649
2650 }
2651 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2652
2653 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2654 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2655 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2656 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2657 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2658
2659 int sampleNo_0,dataPointNo_0;
2660 int numSamples_0 = arg_0_Z.getNumSamples();
2661 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2662 res.requireWrite();
2663 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2664 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2665 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2666 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2667 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2668 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2669 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2670 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2671 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2672 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2673 }
2674 }
2675
2676 }
2677 else {
2678 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2679 }
2680
2681 } else if (0 == rank1) {
2682 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2683 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2684 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2685 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2686 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2687 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2688 }
2689 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2690
2691 // Prepare the DataConstant input
2692 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2693
2694 // Borrow DataTagged input from Data object
2695 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2696
2697 // Prepare a DataTagged output 2
2698 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2699 res.tag();
2700 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2701
2702 // Prepare offset into DataConstant
2703 int offset_0 = tmp_0->getPointOffset(0,0);
2704 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2705
2706 //Get the pointers to the actual data
2707 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2708 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2709
2710 // Compute a result for the default
2711 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2712 // Compute a result for each tag
2713 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2714 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2715 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2716 tmp_2->addTag(i->first);
2717 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2718 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2719 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2720 }
2721 }
2722 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2723
2724 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2725 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2726 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2727 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2728
2729 int sampleNo_1,dataPointNo_1;
2730 int numSamples_1 = arg_1_Z.getNumSamples();
2731 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2732 int offset_0 = tmp_0->getPointOffset(0,0);
2733 res.requireWrite();
2734 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2735 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2736 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2737 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2738 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2739 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2740 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2741 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2742 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2743 }
2744 }
2745
2746 }
2747 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2748
2749 // Borrow DataTagged input from Data object
2750 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2751
2752 // Prepare the DataConstant input
2753 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2754
2755 // Prepare a DataTagged output 2
2756 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2757 res.tag();
2758 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2759
2760 // Prepare offset into DataConstant
2761 int offset_1 = tmp_1->getPointOffset(0,0);
2762 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2763 // Get the pointers to the actual data
2764 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2765 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2766 // Compute a result for the default
2767 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2768 // Compute a result for each tag
2769 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2770 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2771 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2772 tmp_2->addTag(i->first);
2773 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2774 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2775 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2776 }
2777
2778 }
2779 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2780
2781 // Borrow DataTagged input from Data object
2782 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2783
2784 // Borrow DataTagged input from Data object
2785 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2786
2787 // Prepare a DataTagged output 2
2788 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2789 res.tag(); // DataTagged output
2790 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2791
2792 // Get the pointers to the actual data
2793 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2794 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2795 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2796
2797 // Compute a result for the default
2798 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2799 // Merge the tags
2800 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2801 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2802 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2803 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2804 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2805 }
2806 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2807 tmp_2->addTag(i->first);
2808 }
2809 // Compute a result for each tag
2810 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2811 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2812 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2813 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2814 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2815 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2816 }
2817
2818 }
2819 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2820
2821 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2822 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2823 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2824 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2825 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2826
2827 int sampleNo_0,dataPointNo_0;
2828 int numSamples_0 = arg_0_Z.getNumSamples();
2829 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2830 res.requireWrite();
2831 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2832 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2833 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2834 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2835 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2836 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2837 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2838 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2839 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2840 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2841 }
2842 }
2843
2844 }
2845 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2846 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2847 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2848 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2849 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2850
2851 int sampleNo_0,dataPointNo_0;
2852 int numSamples_0 = arg_0_Z.getNumSamples();
2853 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2854 int offset_1 = tmp_1->getPointOffset(0,0);
2855 res.requireWrite();
2856 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2857 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2858 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2859 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2860 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2861 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2862 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2863 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2864 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2865 }
2866 }
2867
2868
2869 }
2870 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2871
2872 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2873 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2874 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2875 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2876 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2877
2878 int sampleNo_0,dataPointNo_0;
2879 int numSamples_0 = arg_0_Z.getNumSamples();
2880 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2881 res.requireWrite();
2882 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2883 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2884 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2885 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2886 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2887 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2888 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2889 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2890 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2891 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2892 }
2893 }
2894
2895 }
2896 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2897
2898 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2899 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2900 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2901 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2902 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2903
2904 int sampleNo_0,dataPointNo_0;
2905 int numSamples_0 = arg_0_Z.getNumSamples();
2906 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2907 res.requireWrite();
2908 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2909 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2910 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2911 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2912 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2913 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2914 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2915 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2916 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2917 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2918 }
2919 }
2920
2921 }
2922 else {
2923 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2924 }
2925
2926 } else {
2927 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible shapes");
2928 }
2929
2930 return res;
2931 }
2932
2933 template <typename UnaryFunction>
2934 Data
2935 C_TensorUnaryOperation(Data const &arg_0,
2936 UnaryFunction operation)
2937 {
2938 if (arg_0.isEmpty()) // do this before we attempt to interpolate
2939 {
2940 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2941 }
2942 if (arg_0.isLazy())
2943 {
2944 throw DataException("Error - Operations not permitted on lazy data.");
2945 }
2946 // Interpolate if necessary and find an appropriate function space
2947 Data arg_0_Z = Data(arg_0);
2948
2949 // Get rank and shape of inputs
2950 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
2951 int size0 = arg_0_Z.getDataPointSize();
2952
2953 // Declare output Data object
2954 Data res;
2955
2956 if (arg_0_Z.isConstant()) {
2957 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataConstant output
2958 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2959 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2960 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2961 }
2962 else if (arg_0_Z.isTagged()) {
2963
2964 // Borrow DataTagged input from Data object
2965 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2966
2967 // Prepare a DataTagged output 2
2968 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2969 res.tag();
2970 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2971
2972 // Get the pointers to the actual data
2973 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2974 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2975 // Compute a result for the default
2976 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2977 // Compute a result for each tag
2978 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2979 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2980 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2981 tmp_2->addTag(i->first);
2982 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2983 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2984 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2985 }
2986
2987 }
2988 else if (arg_0_Z.isExpanded()) {
2989
2990 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace(),true); // DataExpanded output
2991 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2992 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2993
2994 int sampleNo_0,dataPointNo_0;
2995 int numSamples_0 = arg_0_Z.getNumSamples();
2996 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2997 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2998 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2999 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3000 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
3001 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3002 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3003 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3004 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
3005 }
3006 }
3007 }
3008 else {
3009 throw DataException("Error - C_TensorUnaryOperation: unknown combination of inputs");
3010 }
3011
3012 return res;
3013 }
3014
3015 }
3016 #endif

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26