/[escript]/trunk/escript/src/Data.h
ViewVC logotype

Contents of /trunk/escript/src/Data.h

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2735 - (show annotations)
Mon Nov 2 02:03:24 2009 UTC (10 years, 2 months ago) by jfenwick
File MIME type: text/plain
File size: 92869 byte(s)
Fixed bug where calling minval() reintroduced laziness after it had been removed.

1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2009 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 /** \file Data.h */
16
17 #ifndef DATA_H
18 #define DATA_H
19 #include "system_dep.h"
20
21 #include "DataTypes.h"
22 #include "DataAbstract.h"
23 #include "DataAlgorithm.h"
24 #include "FunctionSpace.h"
25 #include "BinaryOp.h"
26 #include "UnaryOp.h"
27 #include "DataException.h"
28
29
30 extern "C" {
31 #include "DataC.h"
32 //#include <omp.h>
33 }
34
35 #include "esysmpi.h"
36 #include <string>
37 #include <algorithm>
38 #include <sstream>
39
40 #include <boost/shared_ptr.hpp>
41 #include <boost/python/object.hpp>
42 #include <boost/python/tuple.hpp>
43
44 #include "BufferGroup.h"
45
46 namespace escript {
47
48 //
49 // Forward declaration for various implementations of Data.
50 class DataConstant;
51 class DataTagged;
52 class DataExpanded;
53 class DataLazy;
54
55 /**
56 \brief
57 Data represents a collection of datapoints.
58
59 Description:
60 Internally, the datapoints are actually stored by a DataAbstract object.
61 The specific instance of DataAbstract used may vary over the lifetime
62 of the Data object.
63 Some methods on this class return references (eg getShape()).
64 These references should not be used after an operation which changes the underlying DataAbstract object.
65 Doing so will lead to invalid memory access.
66 This should not affect any methods exposed via boost::python.
67 */
68 class Data {
69
70 public:
71
72 // These typedefs allow function names to be cast to pointers
73 // to functions of the appropriate type when calling unaryOp etc.
74 typedef double (*UnaryDFunPtr)(double);
75 typedef double (*BinaryDFunPtr)(double,double);
76
77
78 /**
79 Constructors.
80 */
81
82 /**
83 \brief
84 Default constructor.
85 Creates a DataEmpty object.
86 */
87 ESCRIPT_DLL_API
88 Data();
89
90 /**
91 \brief
92 Copy constructor.
93 WARNING: Only performs a shallow copy.
94 */
95 ESCRIPT_DLL_API
96 Data(const Data& inData);
97
98 /**
99 \brief
100 Constructor from another Data object. If "what" is different from the
101 function space of inData the inData are tried to be interpolated to what,
102 otherwise a shallow copy of inData is returned.
103 */
104 ESCRIPT_DLL_API
105 Data(const Data& inData,
106 const FunctionSpace& what);
107
108 /**
109 \brief Copy Data from an existing vector
110 */
111
112 ESCRIPT_DLL_API
113 Data(const DataTypes::ValueType& value,
114 const DataTypes::ShapeType& shape,
115 const FunctionSpace& what=FunctionSpace(),
116 bool expanded=false);
117
118 /**
119 \brief
120 Constructor which creates a Data with points having the specified shape.
121
122 \param value - Input - Single value applied to all Data.
123 \param dataPointShape - Input - The shape of each data point.
124 \param what - Input - A description of what this data represents.
125 \param expanded - Input - Flag, if true fill the entire container with
126 the given value. Otherwise a more efficient storage
127 mechanism will be used.
128 */
129 ESCRIPT_DLL_API
130 Data(double value,
131 const DataTypes::ShapeType& dataPointShape=DataTypes::ShapeType(),
132 const FunctionSpace& what=FunctionSpace(),
133 bool expanded=false);
134
135 /**
136 \brief
137 Constructor which performs a deep copy of a region from another Data object.
138
139 \param inData - Input - Input Data object.
140 \param region - Input - Region to copy.
141 */
142 ESCRIPT_DLL_API
143 Data(const Data& inData,
144 const DataTypes::RegionType& region);
145
146 /**
147 \brief
148 Constructor which copies data from any object that can be treated like a python array/sequence.
149
150 \param value - Input - Input data.
151 \param what - Input - A description of what this data represents.
152 \param expanded - Input - Flag, if true fill the entire container with
153 the value. Otherwise a more efficient storage
154 mechanism will be used.
155 */
156 ESCRIPT_DLL_API
157 Data(const boost::python::object& value,
158 const FunctionSpace& what=FunctionSpace(),
159 bool expanded=false);
160
161 /**
162 \brief
163 Constructor which creates a DataConstant.
164 Copies data from any object that can be treated like a python array/sequence.
165 All other parameters are copied from other.
166
167 \param value - Input - Input data.
168 \param other - Input - contains all other parameters.
169 */
170 ESCRIPT_DLL_API
171 Data(const boost::python::object& value,
172 const Data& other);
173
174 /**
175 \brief
176 Constructor which creates a DataConstant of "shape" with constant value.
177 */
178 ESCRIPT_DLL_API
179 Data(double value,
180 const boost::python::tuple& shape=boost::python::make_tuple(),
181 const FunctionSpace& what=FunctionSpace(),
182 bool expanded=false);
183
184
185
186 /**
187 \brief Create a Data using an existing DataAbstract. Warning: The new object assumes ownership of the pointer!
188 Once you have passed the pointer, do not delete it.
189 */
190 ESCRIPT_DLL_API
191 explicit Data(DataAbstract* underlyingdata);
192
193 /**
194 \brief Create a Data based on the supplied DataAbstract
195 */
196 ESCRIPT_DLL_API
197 explicit Data(DataAbstract_ptr underlyingdata);
198
199 /**
200 \brief
201 Destructor
202 */
203 ESCRIPT_DLL_API
204 ~Data();
205
206 /**
207 \brief Make this object a deep copy of "other".
208 */
209 ESCRIPT_DLL_API
210 void
211 copy(const Data& other);
212
213 /**
214 \brief Return a pointer to a deep copy of this object.
215 */
216 ESCRIPT_DLL_API
217 Data
218 copySelf();
219
220
221 /**
222 \brief produce a delayed evaluation version of this Data.
223 */
224 ESCRIPT_DLL_API
225 Data
226 delay();
227
228 /**
229 \brief convert the current data into lazy data.
230 */
231 ESCRIPT_DLL_API
232 void
233 delaySelf();
234
235
236 /**
237 Member access methods.
238 */
239
240 /**
241 \brief
242 switches on update protection
243
244 */
245 ESCRIPT_DLL_API
246 void
247 setProtection();
248
249 /**
250 \brief
251 Returns true, if the data object is protected against update
252
253 */
254 ESCRIPT_DLL_API
255 bool
256 isProtected() const;
257
258
259 /**
260 \brief
261 Return the value of a data point as a python tuple.
262 */
263 ESCRIPT_DLL_API
264 const boost::python::object
265 getValueOfDataPointAsTuple(int dataPointNo);
266
267 /**
268 \brief
269 sets the values of a data-point from a python object on this process
270 */
271 ESCRIPT_DLL_API
272 void
273 setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object);
274
275 /**
276 \brief
277 sets the values of a data-point from a array-like object on this process
278 */
279 ESCRIPT_DLL_API
280 void
281 setValueOfDataPointToArray(int dataPointNo, const boost::python::object&);
282
283 /**
284 \brief
285 sets the values of a data-point on this process
286 */
287 ESCRIPT_DLL_API
288 void
289 setValueOfDataPoint(int dataPointNo, const double);
290
291 /**
292 \brief Return a data point across all processors as a python tuple.
293 */
294 ESCRIPT_DLL_API
295 const boost::python::object
296 getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo);
297
298 /**
299 \brief
300 Return the tag number associated with the given data-point.
301
302 */
303 ESCRIPT_DLL_API
304 int
305 getTagNumber(int dpno);
306
307 /**
308 \brief
309 Return the C wrapper for the Data object.
310 */
311 ESCRIPT_DLL_API
312 escriptDataC
313 getDataC();
314
315
316
317 /**
318 \brief
319 Return the C wrapper for the Data object - const version.
320 */
321 ESCRIPT_DLL_API
322 escriptDataC
323 getDataC() const;
324
325 /**
326 \brief How much space is required to evaulate a sample of the Data.
327 */
328 ESCRIPT_DLL_API
329 size_t
330 getSampleBufferSize() const;
331
332
333
334 /**
335 \brief
336 Write the data as a string. For large amounts of data, a summary is printed.
337 */
338 ESCRIPT_DLL_API
339 std::string
340 toString() const;
341
342 /**
343 \brief
344 Whatever the current Data type make this into a DataExpanded.
345 */
346 ESCRIPT_DLL_API
347 void
348 expand();
349
350 /**
351 \brief
352 If possible convert this Data to DataTagged. This will only allow
353 Constant data to be converted to tagged. An attempt to convert
354 Expanded data to tagged will throw an exception.
355 */
356 ESCRIPT_DLL_API
357 void
358 tag();
359
360 /**
361 \brief If this data is lazy, then convert it to ready data.
362 What type of ready data depends on the expression. For example, Constant+Tagged==Tagged.
363 */
364 ESCRIPT_DLL_API
365 void
366 resolve();
367
368
369 /**
370 \brief Ensures data is ready for write access.
371 This means that the data will be resolved if lazy and will be copied if shared with another Data object.
372 \warning This method should only be called in single threaded sections of code. (It modifies m_data).
373 Do not create any Data objects from this one between calling requireWrite and getSampleDataRW.
374 Doing so might introduce additional sharing.
375 */
376 ESCRIPT_DLL_API
377 void
378 requireWrite();
379
380 /**
381 \brief
382 Return true if this Data is expanded.
383 \note To determine if a sample will contain separate values for each datapoint. Use actsExpanded instead.
384 */
385 ESCRIPT_DLL_API
386 bool
387 isExpanded() const;
388
389 /**
390 \brief
391 Return true if this Data is expanded or resolves to expanded.
392 That is, if it has a separate value for each datapoint in the sample.
393 */
394 ESCRIPT_DLL_API
395 bool
396 actsExpanded() const;
397
398
399 /**
400 \brief
401 Return true if this Data is tagged.
402 */
403 ESCRIPT_DLL_API
404 bool
405 isTagged() const;
406
407 /**
408 \brief
409 Return true if this Data is constant.
410 */
411 ESCRIPT_DLL_API
412 bool
413 isConstant() const;
414
415 /**
416 \brief Return true if this Data is lazy.
417 */
418 ESCRIPT_DLL_API
419 bool
420 isLazy() const;
421
422 /**
423 \brief Return true if this data is ready.
424 */
425 ESCRIPT_DLL_API
426 bool
427 isReady() const;
428
429 /**
430 \brief
431 Return true if this Data holds an instance of DataEmpty. This is _not_ the same as asking if the object
432 contains datapoints.
433 */
434 ESCRIPT_DLL_API
435 bool
436 isEmpty() const;
437
438 /**
439 \brief
440 Return the function space.
441 */
442 ESCRIPT_DLL_API
443 inline
444 const FunctionSpace&
445 getFunctionSpace() const
446 {
447 return m_data->getFunctionSpace();
448 }
449
450 /**
451 \brief
452 Return a copy of the function space.
453 */
454 ESCRIPT_DLL_API
455 const FunctionSpace
456 getCopyOfFunctionSpace() const;
457
458 /**
459 \brief
460 Return the domain.
461 */
462 ESCRIPT_DLL_API
463 inline
464 // const AbstractDomain&
465 const_Domain_ptr
466 getDomain() const
467 {
468 return getFunctionSpace().getDomain();
469 }
470
471
472 /**
473 \brief
474 Return the domain.
475 TODO: For internal use only. This should be removed.
476 */
477 ESCRIPT_DLL_API
478 inline
479 // const AbstractDomain&
480 Domain_ptr
481 getDomainPython() const
482 {
483 return getFunctionSpace().getDomainPython();
484 }
485
486 /**
487 \brief
488 Return a copy of the domain.
489 */
490 ESCRIPT_DLL_API
491 const AbstractDomain
492 getCopyOfDomain() const;
493
494 /**
495 \brief
496 Return the rank of the point data.
497 */
498 ESCRIPT_DLL_API
499 inline
500 unsigned int
501 getDataPointRank() const
502 {
503 return m_data->getRank();
504 }
505
506 /**
507 \brief
508 Return the number of data points
509 */
510 ESCRIPT_DLL_API
511 inline
512 int
513 getNumDataPoints() const
514 {
515 return getNumSamples() * getNumDataPointsPerSample();
516 }
517 /**
518 \brief
519 Return the number of samples.
520 */
521 ESCRIPT_DLL_API
522 inline
523 int
524 getNumSamples() const
525 {
526 return m_data->getNumSamples();
527 }
528
529 /**
530 \brief
531 Return the number of data points per sample.
532 */
533 ESCRIPT_DLL_API
534 inline
535 int
536 getNumDataPointsPerSample() const
537 {
538 return m_data->getNumDPPSample();
539 }
540
541
542 /**
543 \brief
544 Return the number of values in the shape for this object.
545 */
546 ESCRIPT_DLL_API
547 int
548 getNoValues() const
549 {
550 return m_data->getNoValues();
551 }
552
553
554 /**
555 \brief
556 dumps the object into a netCDF file
557 */
558 ESCRIPT_DLL_API
559 void
560 dump(const std::string fileName) const;
561
562 /**
563 \brief returns the values of the object as a list of tuples (one for each datapoint).
564
565 \param scalarastuple If true, scalar data will produce single valued tuples [(1,) (2,) ...]
566 If false, the result is a list of scalars [1, 2, ...]
567 */
568 ESCRIPT_DLL_API
569 const boost::python::object
570 toListOfTuples(bool scalarastuple=true);
571
572
573 /**
574 \brief
575 Return the sample data for the given sample no. This is not the
576 preferred interface but is provided for use by C code.
577 The bufferg parameter is only required for LazyData.
578 \param sampleNo - Input - the given sample no.
579 \param bufferg - A buffer to compute (and store) sample data in will be selected from this group.
580 \return pointer to the sample data.
581 */
582 ESCRIPT_DLL_API
583 inline
584 const DataAbstract::ValueType::value_type*
585 getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, BufferGroup* bufferg=0);
586
587
588 /**
589 \brief
590 Return the sample data for the given sample no. This is not the
591 preferred interface but is provided for use by C code.
592 \param sampleNo - Input - the given sample no.
593 \return pointer to the sample data.
594 */
595 ESCRIPT_DLL_API
596 inline
597 DataAbstract::ValueType::value_type*
598 getSampleDataRW(DataAbstract::ValueType::size_type sampleNo);
599
600
601 /**
602 \brief
603 Return the sample data for the given tag. If an attempt is made to
604 access data that isn't tagged an exception will be thrown.
605 \param tag - Input - the tag key.
606 */
607 ESCRIPT_DLL_API
608 inline
609 DataAbstract::ValueType::value_type*
610 getSampleDataByTag(int tag)
611 {
612 return m_data->getSampleDataByTag(tag);
613 }
614
615 /**
616 \brief
617 Return a reference into the DataVector which points to the specified data point.
618 \param sampleNo - Input -
619 \param dataPointNo - Input -
620 */
621 ESCRIPT_DLL_API
622 DataTypes::ValueType::const_reference
623 getDataPointRO(int sampleNo, int dataPointNo);
624
625 /**
626 \brief
627 Return a reference into the DataVector which points to the specified data point.
628 \param sampleNo - Input -
629 \param dataPointNo - Input -
630 */
631 ESCRIPT_DLL_API
632 DataTypes::ValueType::reference
633 getDataPointRW(int sampleNo, int dataPointNo);
634
635
636
637 /**
638 \brief
639 Return the offset for the given sample and point within the sample
640 */
641 ESCRIPT_DLL_API
642 inline
643 DataTypes::ValueType::size_type
644 getDataOffset(int sampleNo,
645 int dataPointNo)
646 {
647 return m_data->getPointOffset(sampleNo,dataPointNo);
648 }
649
650 /**
651 \brief
652 Return a reference to the data point shape.
653 */
654 ESCRIPT_DLL_API
655 inline
656 const DataTypes::ShapeType&
657 getDataPointShape() const
658 {
659 return m_data->getShape();
660 }
661
662 /**
663 \brief
664 Return the data point shape as a tuple of integers.
665 */
666 ESCRIPT_DLL_API
667 const boost::python::tuple
668 getShapeTuple() const;
669
670 /**
671 \brief
672 Return the size of the data point. It is the product of the
673 data point shape dimensions.
674 */
675 ESCRIPT_DLL_API
676 int
677 getDataPointSize() const;
678
679 /**
680 \brief
681 Return the number of doubles stored for this Data.
682 */
683 ESCRIPT_DLL_API
684 DataTypes::ValueType::size_type
685 getLength() const;
686
687 /**
688 \brief Return true if this object contains no samples.
689 This is not the same as isEmpty()
690 */
691 ESCRIPT_DLL_API
692 bool
693 hasNoSamples() const
694 {
695 return getLength()==0;
696 }
697
698 /**
699 \brief
700 Assign the given value to the tag assocciated with name. Implicitly converts this
701 object to type DataTagged. Throws an exception if this object
702 cannot be converted to a DataTagged object or name cannot be mapped onto a tag key.
703 \param name - Input - name of tag.
704 \param value - Input - Value to associate with given key.
705 */
706 ESCRIPT_DLL_API
707 void
708 setTaggedValueByName(std::string name,
709 const boost::python::object& value);
710
711 /**
712 \brief
713 Assign the given value to the tag. Implicitly converts this
714 object to type DataTagged if it is constant.
715
716 \param tagKey - Input - Integer key.
717 \param value - Input - Value to associate with given key.
718 ==>*
719 */
720 ESCRIPT_DLL_API
721 void
722 setTaggedValue(int tagKey,
723 const boost::python::object& value);
724
725 /**
726 \brief
727 Assign the given value to the tag. Implicitly converts this
728 object to type DataTagged if it is constant.
729
730 \param tagKey - Input - Integer key.
731 \param pointshape - Input - The shape of the value parameter
732 \param value - Input - Value to associate with given key.
733 \param dataOffset - Input - Offset of the begining of the point within the value parameter
734 */
735 ESCRIPT_DLL_API
736 void
737 setTaggedValueFromCPP(int tagKey,
738 const DataTypes::ShapeType& pointshape,
739 const DataTypes::ValueType& value,
740 int dataOffset=0);
741
742
743
744 /**
745 \brief
746 Copy other Data object into this Data object where mask is positive.
747 */
748 ESCRIPT_DLL_API
749 void
750 copyWithMask(const Data& other,
751 const Data& mask);
752
753 /**
754 Data object operation methods and operators.
755 */
756
757 /**
758 \brief
759 set all values to zero
760 *
761 */
762 ESCRIPT_DLL_API
763 void
764 setToZero();
765
766 /**
767 \brief
768 Interpolates this onto the given functionspace and returns
769 the result as a Data object.
770 *
771 */
772 ESCRIPT_DLL_API
773 Data
774 interpolate(const FunctionSpace& functionspace) const;
775
776
777 ESCRIPT_DLL_API
778 Data
779 interpolateFromTable2D(const WrappedArray& table, double Amin, double Astep,
780 double undef, Data& B, double Bmin, double Bstep,bool check_boundaries);
781
782 ESCRIPT_DLL_API
783 Data
784 interpolateFromTable1D(const WrappedArray& table, double Amin, double Astep,
785 double undef,bool check_boundaries);
786
787
788
789
790 ESCRIPT_DLL_API
791 Data
792 interpolateFromTable2DP(boost::python::object table, double Amin, double Astep,
793 Data& B, double Bmin, double Bstep, double undef,bool check_boundaries);
794
795 ESCRIPT_DLL_API
796 Data
797 interpolateFromTable1DP(boost::python::object table, double Amin, double Astep,
798 double undef,bool check_boundaries);
799
800 /**
801 \brief
802 Calculates the gradient of the data at the data points of functionspace.
803 If functionspace is not present the function space of Function(getDomain()) is used.
804 *
805 */
806 ESCRIPT_DLL_API
807 Data
808 gradOn(const FunctionSpace& functionspace) const;
809
810 ESCRIPT_DLL_API
811 Data
812 grad() const;
813
814 /**
815 \brief
816 Calculate the integral over the function space domain as a python tuple.
817 */
818 ESCRIPT_DLL_API
819 boost::python::object
820 integrateToTuple_const() const;
821
822
823 /**
824 \brief
825 Calculate the integral over the function space domain as a python tuple.
826 */
827 ESCRIPT_DLL_API
828 boost::python::object
829 integrateToTuple();
830
831
832
833 /**
834 \brief
835 Returns 1./ Data object
836 *
837 */
838 ESCRIPT_DLL_API
839 Data
840 oneOver() const;
841 /**
842 \brief
843 Return a Data with a 1 for +ive values and a 0 for 0 or -ive values.
844 *
845 */
846 ESCRIPT_DLL_API
847 Data
848 wherePositive() const;
849
850 /**
851 \brief
852 Return a Data with a 1 for -ive values and a 0 for +ive or 0 values.
853 *
854 */
855 ESCRIPT_DLL_API
856 Data
857 whereNegative() const;
858
859 /**
860 \brief
861 Return a Data with a 1 for +ive or 0 values and a 0 for -ive values.
862 *
863 */
864 ESCRIPT_DLL_API
865 Data
866 whereNonNegative() const;
867
868 /**
869 \brief
870 Return a Data with a 1 for -ive or 0 values and a 0 for +ive values.
871 *
872 */
873 ESCRIPT_DLL_API
874 Data
875 whereNonPositive() const;
876
877 /**
878 \brief
879 Return a Data with a 1 for 0 values and a 0 for +ive or -ive values.
880 *
881 */
882 ESCRIPT_DLL_API
883 Data
884 whereZero(double tol=0.0) const;
885
886 /**
887 \brief
888 Return a Data with a 0 for 0 values and a 1 for +ive or -ive values.
889 *
890 */
891 ESCRIPT_DLL_API
892 Data
893 whereNonZero(double tol=0.0) const;
894
895 /**
896 \brief
897 Return the maximum absolute value of this Data object.
898
899 The method is not const because lazy data needs to be expanded before Lsup can be computed.
900 The _const form can be used when the Data object is const, however this will only work for
901 Data which is not Lazy.
902
903 For Data which contain no samples (or tagged Data for which no tags in use have a value)
904 zero is returned.
905 */
906 ESCRIPT_DLL_API
907 double
908 Lsup();
909
910 ESCRIPT_DLL_API
911 double
912 Lsup_const() const;
913
914
915 /**
916 \brief
917 Return the maximum value of this Data object.
918
919 The method is not const because lazy data needs to be expanded before sup can be computed.
920 The _const form can be used when the Data object is const, however this will only work for
921 Data which is not Lazy.
922
923 For Data which contain no samples (or tagged Data for which no tags in use have a value)
924 a large negative value is returned.
925 */
926 ESCRIPT_DLL_API
927 double
928 sup();
929
930 ESCRIPT_DLL_API
931 double
932 sup_const() const;
933
934
935 /**
936 \brief
937 Return the minimum value of this Data object.
938
939 The method is not const because lazy data needs to be expanded before inf can be computed.
940 The _const form can be used when the Data object is const, however this will only work for
941 Data which is not Lazy.
942
943 For Data which contain no samples (or tagged Data for which no tags in use have a value)
944 a large positive value is returned.
945 */
946 ESCRIPT_DLL_API
947 double
948 inf();
949
950 ESCRIPT_DLL_API
951 double
952 inf_const() const;
953
954
955
956 /**
957 \brief
958 Return the absolute value of each data point of this Data object.
959 *
960 */
961 ESCRIPT_DLL_API
962 Data
963 abs() const;
964
965 /**
966 \brief
967 Return the maximum value of each data point of this Data object.
968 *
969 */
970 ESCRIPT_DLL_API
971 Data
972 maxval() const;
973
974 /**
975 \brief
976 Return the minimum value of each data point of this Data object.
977 *
978 */
979 ESCRIPT_DLL_API
980 Data
981 minval() const;
982
983 /**
984 \brief
985 Return the (sample number, data-point number) of the data point with
986 the minimum component value in this Data object.
987 \note If you are working in python, please consider using Locator
988 instead of manually manipulating process and point IDs.
989 */
990 ESCRIPT_DLL_API
991 const boost::python::tuple
992 minGlobalDataPoint() const;
993
994 /**
995 \brief
996 Return the (sample number, data-point number) of the data point with
997 the minimum component value in this Data object.
998 \note If you are working in python, please consider using Locator
999 instead of manually manipulating process and point IDs.
1000 */
1001 ESCRIPT_DLL_API
1002 const boost::python::tuple
1003 maxGlobalDataPoint() const;
1004
1005
1006
1007 /**
1008 \brief
1009 Return the sign of each data point of this Data object.
1010 -1 for negative values, zero for zero values, 1 for positive values.
1011 *
1012 */
1013 ESCRIPT_DLL_API
1014 Data
1015 sign() const;
1016
1017 /**
1018 \brief
1019 Return the symmetric part of a matrix which is half the matrix plus its transpose.
1020 *
1021 */
1022 ESCRIPT_DLL_API
1023 Data
1024 symmetric() const;
1025
1026 /**
1027 \brief
1028 Return the nonsymmetric part of a matrix which is half the matrix minus its transpose.
1029 *
1030 */
1031 ESCRIPT_DLL_API
1032 Data
1033 nonsymmetric() const;
1034
1035 /**
1036 \brief
1037 Return the trace of a matrix
1038 *
1039 */
1040 ESCRIPT_DLL_API
1041 Data
1042 trace(int axis_offset) const;
1043
1044 /**
1045 \brief
1046 Transpose each data point of this Data object around the given axis.
1047 *
1048 */
1049 ESCRIPT_DLL_API
1050 Data
1051 transpose(int axis_offset) const;
1052
1053 /**
1054 \brief
1055 Return the eigenvalues of the symmetric part at each data point of this Data object in increasing values.
1056 Currently this function is restricted to rank 2, square shape, and dimension 3.
1057 *
1058 */
1059 ESCRIPT_DLL_API
1060 Data
1061 eigenvalues() const;
1062
1063 /**
1064 \brief
1065 Return the eigenvalues and corresponding eigenvcetors of the symmetric part at each data point of this Data object.
1066 the eigenvalues are ordered in increasing size where eigenvalues with relative difference less than
1067 tol are treated as equal. The eigenvectors are orthogonal, normalized and the sclaed such that the
1068 first non-zero entry is positive.
1069 Currently this function is restricted to rank 2, square shape, and dimension 3
1070 *
1071 */
1072 ESCRIPT_DLL_API
1073 const boost::python::tuple
1074 eigenvalues_and_eigenvectors(const double tol=1.e-12) const;
1075
1076 /**
1077 \brief
1078 swaps the components axis0 and axis1
1079 *
1080 */
1081 ESCRIPT_DLL_API
1082 Data
1083 swapaxes(const int axis0, const int axis1) const;
1084
1085 /**
1086 \brief
1087 Return the error function erf of each data point of this Data object.
1088 *
1089 */
1090 ESCRIPT_DLL_API
1091 Data
1092 erf() const;
1093
1094 /**
1095 \brief
1096 Return the sin of each data point of this Data object.
1097 *
1098 */
1099 ESCRIPT_DLL_API
1100 Data
1101 sin() const;
1102
1103 /**
1104 \brief
1105 Return the cos of each data point of this Data object.
1106 *
1107 */
1108 ESCRIPT_DLL_API
1109 Data
1110 cos() const;
1111
1112 /**
1113 \brief
1114 Return the tan of each data point of this Data object.
1115 *
1116 */
1117 ESCRIPT_DLL_API
1118 Data
1119 tan() const;
1120
1121 /**
1122 \brief
1123 Return the asin of each data point of this Data object.
1124 *
1125 */
1126 ESCRIPT_DLL_API
1127 Data
1128 asin() const;
1129
1130 /**
1131 \brief
1132 Return the acos of each data point of this Data object.
1133 *
1134 */
1135 ESCRIPT_DLL_API
1136 Data
1137 acos() const;
1138
1139 /**
1140 \brief
1141 Return the atan of each data point of this Data object.
1142 *
1143 */
1144 ESCRIPT_DLL_API
1145 Data
1146 atan() const;
1147
1148 /**
1149 \brief
1150 Return the sinh of each data point of this Data object.
1151 *
1152 */
1153 ESCRIPT_DLL_API
1154 Data
1155 sinh() const;
1156
1157 /**
1158 \brief
1159 Return the cosh of each data point of this Data object.
1160 *
1161 */
1162 ESCRIPT_DLL_API
1163 Data
1164 cosh() const;
1165
1166 /**
1167 \brief
1168 Return the tanh of each data point of this Data object.
1169 *
1170 */
1171 ESCRIPT_DLL_API
1172 Data
1173 tanh() const;
1174
1175 /**
1176 \brief
1177 Return the asinh of each data point of this Data object.
1178 *
1179 */
1180 ESCRIPT_DLL_API
1181 Data
1182 asinh() const;
1183
1184 /**
1185 \brief
1186 Return the acosh of each data point of this Data object.
1187 *
1188 */
1189 ESCRIPT_DLL_API
1190 Data
1191 acosh() const;
1192
1193 /**
1194 \brief
1195 Return the atanh of each data point of this Data object.
1196 *
1197 */
1198 ESCRIPT_DLL_API
1199 Data
1200 atanh() const;
1201
1202 /**
1203 \brief
1204 Return the log to base 10 of each data point of this Data object.
1205 *
1206 */
1207 ESCRIPT_DLL_API
1208 Data
1209 log10() const;
1210
1211 /**
1212 \brief
1213 Return the natural log of each data point of this Data object.
1214 *
1215 */
1216 ESCRIPT_DLL_API
1217 Data
1218 log() const;
1219
1220 /**
1221 \brief
1222 Return the exponential function of each data point of this Data object.
1223 *
1224 */
1225 ESCRIPT_DLL_API
1226 Data
1227 exp() const;
1228
1229 /**
1230 \brief
1231 Return the square root of each data point of this Data object.
1232 *
1233 */
1234 ESCRIPT_DLL_API
1235 Data
1236 sqrt() const;
1237
1238 /**
1239 \brief
1240 Return the negation of each data point of this Data object.
1241 *
1242 */
1243 ESCRIPT_DLL_API
1244 Data
1245 neg() const;
1246
1247 /**
1248 \brief
1249 Return the identity of each data point of this Data object.
1250 Simply returns this object unmodified.
1251 *
1252 */
1253 ESCRIPT_DLL_API
1254 Data
1255 pos() const;
1256
1257 /**
1258 \brief
1259 Return the given power of each data point of this Data object.
1260
1261 \param right Input - the power to raise the object to.
1262 *
1263 */
1264 ESCRIPT_DLL_API
1265 Data
1266 powD(const Data& right) const;
1267
1268 /**
1269 \brief
1270 Return the given power of each data point of this boost python object.
1271
1272 \param right Input - the power to raise the object to.
1273 *
1274 */
1275 ESCRIPT_DLL_API
1276 Data
1277 powO(const boost::python::object& right) const;
1278
1279 /**
1280 \brief
1281 Return the given power of each data point of this boost python object.
1282
1283 \param left Input - the bases
1284 *
1285 */
1286
1287 ESCRIPT_DLL_API
1288 Data
1289 rpowO(const boost::python::object& left) const;
1290
1291 /**
1292 \brief
1293 writes the object to a file in the DX file format
1294 */
1295 ESCRIPT_DLL_API
1296 void
1297 saveDX(std::string fileName) const;
1298
1299 /**
1300 \brief
1301 writes the object to a file in the VTK file format
1302 */
1303 ESCRIPT_DLL_API
1304 void
1305 saveVTK(std::string fileName) const;
1306
1307
1308
1309 /**
1310 \brief
1311 Overloaded operator +=
1312 \param right - Input - The right hand side.
1313 *
1314 */
1315 ESCRIPT_DLL_API
1316 Data& operator+=(const Data& right);
1317 ESCRIPT_DLL_API
1318 Data& operator+=(const boost::python::object& right);
1319
1320 ESCRIPT_DLL_API
1321 Data& operator=(const Data& other);
1322
1323 /**
1324 \brief
1325 Overloaded operator -=
1326 \param right - Input - The right hand side.
1327 *
1328 */
1329 ESCRIPT_DLL_API
1330 Data& operator-=(const Data& right);
1331 ESCRIPT_DLL_API
1332 Data& operator-=(const boost::python::object& right);
1333
1334 /**
1335 \brief
1336 Overloaded operator *=
1337 \param right - Input - The right hand side.
1338 *
1339 */
1340 ESCRIPT_DLL_API
1341 Data& operator*=(const Data& right);
1342 ESCRIPT_DLL_API
1343 Data& operator*=(const boost::python::object& right);
1344
1345 /**
1346 \brief
1347 Overloaded operator /=
1348 \param right - Input - The right hand side.
1349 *
1350 */
1351 ESCRIPT_DLL_API
1352 Data& operator/=(const Data& right);
1353 ESCRIPT_DLL_API
1354 Data& operator/=(const boost::python::object& right);
1355
1356 /**
1357 \brief
1358 Returns true if this can be interpolated to functionspace.
1359 */
1360 ESCRIPT_DLL_API
1361 bool
1362 probeInterpolation(const FunctionSpace& functionspace) const;
1363
1364 /**
1365 Data object slicing methods.
1366 */
1367
1368 /**
1369 \brief
1370 Returns a slice from this Data object.
1371
1372 /description
1373 Implements the [] get operator in python.
1374 Calls getSlice.
1375
1376 \param key - Input - python slice tuple specifying
1377 slice to return.
1378 */
1379 ESCRIPT_DLL_API
1380 Data
1381 getItem(const boost::python::object& key) const;
1382
1383 /**
1384 \brief
1385 Copies slice from value into this Data object.
1386
1387 Implements the [] set operator in python.
1388 Calls setSlice.
1389
1390 \param key - Input - python slice tuple specifying
1391 slice to copy from value.
1392 \param value - Input - Data object to copy from.
1393 */
1394 ESCRIPT_DLL_API
1395 void
1396 setItemD(const boost::python::object& key,
1397 const Data& value);
1398
1399 ESCRIPT_DLL_API
1400 void
1401 setItemO(const boost::python::object& key,
1402 const boost::python::object& value);
1403
1404 // These following public methods should be treated as private.
1405
1406 /**
1407 \brief
1408 Perform the given unary operation on every element of every data point in
1409 this Data object.
1410 */
1411 template <class UnaryFunction>
1412 ESCRIPT_DLL_API
1413 inline
1414 void
1415 unaryOp2(UnaryFunction operation);
1416
1417 /**
1418 \brief
1419 Return a Data object containing the specified slice of
1420 this Data object.
1421 \param region - Input - Region to copy.
1422 *
1423 */
1424 ESCRIPT_DLL_API
1425 Data
1426 getSlice(const DataTypes::RegionType& region) const;
1427
1428 /**
1429 \brief
1430 Copy the specified slice from the given value into this
1431 Data object.
1432 \param value - Input - Data to copy from.
1433 \param region - Input - Region to copy.
1434 *
1435 */
1436 ESCRIPT_DLL_API
1437 void
1438 setSlice(const Data& value,
1439 const DataTypes::RegionType& region);
1440
1441 /**
1442 \brief
1443 print the data values to stdout. Used for debugging
1444 */
1445 ESCRIPT_DLL_API
1446 void
1447 print(void);
1448
1449 /**
1450 \brief
1451 return the MPI rank number of the local data
1452 MPI_COMM_WORLD is assumed and the result of MPI_Comm_size()
1453 is returned
1454 */
1455 ESCRIPT_DLL_API
1456 int
1457 get_MPIRank(void) const;
1458
1459 /**
1460 \brief
1461 return the MPI rank number of the local data
1462 MPI_COMM_WORLD is assumed and the result of MPI_Comm_rank()
1463 is returned
1464 */
1465 ESCRIPT_DLL_API
1466 int
1467 get_MPISize(void) const;
1468
1469 /**
1470 \brief
1471 return the MPI rank number of the local data
1472 MPI_COMM_WORLD is assumed and returned.
1473 */
1474 ESCRIPT_DLL_API
1475 MPI_Comm
1476 get_MPIComm(void) const;
1477
1478 /**
1479 \brief
1480 return the object produced by the factory, which is a DataConstant or DataExpanded
1481 TODO Ownership of this object should be explained in doco.
1482 */
1483 ESCRIPT_DLL_API
1484 DataAbstract*
1485 borrowData(void) const;
1486
1487 ESCRIPT_DLL_API
1488 DataAbstract_ptr
1489 borrowDataPtr(void) const;
1490
1491 ESCRIPT_DLL_API
1492 DataReady_ptr
1493 borrowReadyPtr(void) const;
1494
1495
1496
1497 /**
1498 \brief
1499 Return a pointer to the beginning of the datapoint at the specified offset.
1500 TODO Eventually these should be inlined.
1501 \param i - position(offset) in the underlying datastructure
1502 */
1503
1504 ESCRIPT_DLL_API
1505 DataTypes::ValueType::const_reference
1506 getDataAtOffsetRO(DataTypes::ValueType::size_type i);
1507
1508
1509 ESCRIPT_DLL_API
1510 DataTypes::ValueType::reference
1511 getDataAtOffsetRW(DataTypes::ValueType::size_type i);
1512
1513
1514
1515 /**
1516 \brief Create a buffer for use by getSample
1517 Allocates a DataVector large enough for DataLazy::resolveSample to operate on for the current Data.
1518 Do not use this buffer for other Data instances (unless you are sure they will be the same size).
1519
1520 In multi-threaded sections, this needs to be called on each thread.
1521
1522 \return A BufferGroup* if Data is lazy, NULL otherwise.
1523 \warning This pointer must be deallocated using freeSampleBuffer to avoid cross library memory issues.
1524 */
1525 ESCRIPT_DLL_API
1526 BufferGroup*
1527 allocSampleBuffer() const;
1528
1529 /**
1530 \brief Free a buffer allocated with allocSampleBuffer.
1531 \param buffer Input - pointer to the buffer to deallocate.
1532 */
1533 ESCRIPT_DLL_API void freeSampleBuffer(BufferGroup* buffer);
1534
1535 protected:
1536
1537 private:
1538
1539 template <class BinaryOp>
1540 double
1541 #ifdef PASO_MPI
1542 lazyAlgWorker(double init, MPI_Op mpiop_type);
1543 #else
1544 lazyAlgWorker(double init);
1545 #endif
1546
1547 double
1548 LsupWorker() const;
1549
1550 double
1551 supWorker() const;
1552
1553 double
1554 infWorker() const;
1555
1556 boost::python::object
1557 integrateWorker() const;
1558
1559 void
1560 calc_minGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1561
1562 void
1563 calc_maxGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1564
1565 // For internal use in Data.cpp only!
1566 // other uses should call the main entry points and allow laziness
1567 Data
1568 minval_nonlazy() const;
1569
1570 // For internal use in Data.cpp only!
1571 Data
1572 maxval_nonlazy() const;
1573
1574
1575 /**
1576 \brief
1577 Check *this and the right operand are compatible. Throws
1578 an exception if they aren't.
1579 \param right - Input - The right hand side.
1580 */
1581 inline
1582 void
1583 operandCheck(const Data& right) const
1584 {
1585 return m_data->operandCheck(*(right.m_data.get()));
1586 }
1587
1588 /**
1589 \brief
1590 Perform the specified reduction algorithm on every element of every data point in
1591 this Data object according to the given function and return the single value result.
1592 */
1593 template <class BinaryFunction>
1594 inline
1595 double
1596 algorithm(BinaryFunction operation,
1597 double initial_value) const;
1598
1599 /**
1600 \brief
1601 Reduce each data-point in this Data object using the given operation. Return a Data
1602 object with the same number of data-points, but with each data-point containing only
1603 one value - the result of the reduction operation on the corresponding data-point in
1604 this Data object
1605 */
1606 template <class BinaryFunction>
1607 inline
1608 Data
1609 dp_algorithm(BinaryFunction operation,
1610 double initial_value) const;
1611
1612 /**
1613 \brief
1614 Perform the given binary operation on all of the data's elements.
1615 The underlying type of the right hand side (right) determines the final
1616 type of *this after the operation. For example if the right hand side
1617 is expanded *this will be expanded if necessary.
1618 RHS is a Data object.
1619 */
1620 template <class BinaryFunction>
1621 inline
1622 void
1623 binaryOp(const Data& right,
1624 BinaryFunction operation);
1625
1626 /**
1627 \brief
1628 Convert the data type of the RHS to match this.
1629 \param right - Input - data type to match.
1630 */
1631 void
1632 typeMatchLeft(Data& right) const;
1633
1634 /**
1635 \brief
1636 Convert the data type of this to match the RHS.
1637 \param right - Input - data type to match.
1638 */
1639 void
1640 typeMatchRight(const Data& right);
1641
1642 /**
1643 \brief
1644 Construct a Data object of the appropriate type.
1645 */
1646
1647 void
1648 initialise(const DataTypes::ValueType& value,
1649 const DataTypes::ShapeType& shape,
1650 const FunctionSpace& what,
1651 bool expanded);
1652
1653 void
1654 initialise(const WrappedArray& value,
1655 const FunctionSpace& what,
1656 bool expanded);
1657
1658 //
1659 // flag to protect the data object against any update
1660 bool m_protected;
1661 mutable bool m_shared;
1662 bool m_lazy;
1663
1664 //
1665 // pointer to the actual data object
1666 // boost::shared_ptr<DataAbstract> m_data;
1667 DataAbstract_ptr m_data;
1668
1669 // If possible please use getReadyPtr instead.
1670 // But see warning below.
1671 const DataReady*
1672 getReady() const;
1673
1674 DataReady*
1675 getReady();
1676
1677
1678 // Be wary of using this for local operations since it (temporarily) increases reference count.
1679 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1680 // getReady() instead
1681 DataReady_ptr
1682 getReadyPtr();
1683
1684 const_DataReady_ptr
1685 getReadyPtr() const;
1686
1687
1688 /**
1689 \brief Update the Data's shared flag
1690 This indicates that the DataAbstract used by this object is now shared (or no longer shared).
1691 For internal use only.
1692 */
1693 void updateShareStatus(bool nowshared) const
1694 {
1695 m_shared=nowshared; // m_shared is mutable
1696 }
1697
1698 // In the isShared() method below:
1699 // A problem would occur if m_data (the address pointed to) were being modified
1700 // while the call m_data->is_shared is being executed.
1701 //
1702 // Q: So why do I think this code can be thread safe/correct?
1703 // A: We need to make some assumptions.
1704 // 1. We assume it is acceptable to return true under some conditions when we aren't shared.
1705 // 2. We assume that no constructions or assignments which will share previously unshared
1706 // will occur while this call is executing. This is consistent with the way Data:: and C are written.
1707 //
1708 // This means that the only transition we need to consider, is when a previously shared object is
1709 // not shared anymore. ie. the other objects have been destroyed or a deep copy has been made.
1710 // In those cases the m_shared flag changes to false after m_data has completed changing.
1711 // For any threads executing before the flag switches they will assume the object is still shared.
1712 bool isShared() const
1713 {
1714 return m_shared;
1715 /* if (m_shared) return true;
1716 if (m_data->isShared())
1717 {
1718 updateShareStatus(true);
1719 return true;
1720 }
1721 return false;*/
1722 }
1723
1724 void forceResolve()
1725 {
1726 if (isLazy())
1727 {
1728 #ifdef _OPENMP
1729 if (omp_in_parallel())
1730 { // Yes this is throwing an exception out of an omp thread which is forbidden.
1731 throw DataException("Please do not call forceResolve() in a parallel region.");
1732 }
1733 #endif
1734 resolve();
1735 }
1736 }
1737
1738 /**
1739 \brief if another object is sharing out member data make a copy to work with instead.
1740 This code should only be called from single threaded sections of code.
1741 */
1742 void exclusiveWrite()
1743 {
1744 #ifdef _OPENMP
1745 if (omp_in_parallel())
1746 {
1747 // *((int*)0)=17;
1748 throw DataException("Programming error. Please do not run exclusiveWrite() in multi-threaded sections.");
1749 }
1750 #endif
1751 forceResolve();
1752 if (isShared())
1753 {
1754 DataAbstract* t=m_data->deepCopy();
1755 set_m_data(DataAbstract_ptr(t));
1756 }
1757 }
1758
1759 /**
1760 \brief checks if caller can have exclusive write to the object
1761 */
1762 void checkExclusiveWrite()
1763 {
1764 if (isLazy() || isShared())
1765 {
1766 throw DataException("Programming error. ExclusiveWrite required - please call requireWrite()");
1767 }
1768 }
1769
1770 /**
1771 \brief Modify the data abstract hosted by this Data object
1772 For internal use only.
1773 Passing a pointer to null is permitted (do this in the destructor)
1774 \warning Only to be called in single threaded code or inside a single/critical section. This method needs to be atomic.
1775 */
1776 void set_m_data(DataAbstract_ptr p);
1777
1778 friend class DataAbstract; // To allow calls to updateShareStatus
1779
1780 };
1781
1782 } // end namespace escript
1783
1784
1785 // No, this is not supposed to be at the top of the file
1786 // DataAbstact needs to be declared first, then DataReady needs to be fully declared
1787 // so that I can dynamic cast between them below.
1788 #include "DataReady.h"
1789 #include "DataLazy.h"
1790
1791 namespace escript
1792 {
1793
1794 inline
1795 const DataReady*
1796 Data::getReady() const
1797 {
1798 const DataReady* dr=dynamic_cast<const DataReady*>(m_data.get());
1799 EsysAssert((dr!=0), "Error - casting to DataReady.");
1800 return dr;
1801 }
1802
1803 inline
1804 DataReady*
1805 Data::getReady()
1806 {
1807 DataReady* dr=dynamic_cast<DataReady*>(m_data.get());
1808 EsysAssert((dr!=0), "Error - casting to DataReady.");
1809 return dr;
1810 }
1811
1812 // Be wary of using this for local operations since it (temporarily) increases reference count.
1813 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1814 // getReady() instead
1815 inline
1816 DataReady_ptr
1817 Data::getReadyPtr()
1818 {
1819 DataReady_ptr dr=boost::dynamic_pointer_cast<DataReady>(m_data);
1820 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1821 return dr;
1822 }
1823
1824
1825 inline
1826 const_DataReady_ptr
1827 Data::getReadyPtr() const
1828 {
1829 const_DataReady_ptr dr=boost::dynamic_pointer_cast<const DataReady>(m_data);
1830 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1831 return dr;
1832 }
1833
1834 inline
1835 DataAbstract::ValueType::value_type*
1836 Data::getSampleDataRW(DataAbstract::ValueType::size_type sampleNo)
1837 {
1838 if (isLazy())
1839 {
1840 throw DataException("Error, attempt to acquire RW access to lazy data. Please call requireWrite() first.");
1841 }
1842 return getReady()->getSampleDataRW(sampleNo);
1843 }
1844
1845 inline
1846 const DataAbstract::ValueType::value_type*
1847 Data::getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, BufferGroup* bufferg)
1848 {
1849 DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
1850 if (l!=0)
1851 {
1852 size_t offset=0;
1853 if (bufferg==NULL)
1854 {
1855 throw DataException("Error, attempt to getSampleDataRO for lazy Data with buffer==NULL");
1856 }
1857 const DataTypes::ValueType* res=l->resolveSample(*bufferg,sampleNo,offset);
1858 return &((*res)[offset]);
1859 }
1860 return getReady()->getSampleDataRO(sampleNo);
1861 }
1862
1863
1864
1865 /**
1866 Modify a filename for MPI parallel output to multiple files
1867 */
1868 char *Escript_MPI_appendRankToFileName(const char *, int, int);
1869
1870 /**
1871 Binary Data object operators.
1872 */
1873 inline double rpow(double x,double y)
1874 {
1875 return pow(y,x);
1876 }
1877
1878 /**
1879 \brief
1880 Operator+
1881 Takes two Data objects.
1882 */
1883 ESCRIPT_DLL_API Data operator+(const Data& left, const Data& right);
1884
1885 /**
1886 \brief
1887 Operator-
1888 Takes two Data objects.
1889 */
1890 ESCRIPT_DLL_API Data operator-(const Data& left, const Data& right);
1891
1892 /**
1893 \brief
1894 Operator*
1895 Takes two Data objects.
1896 */
1897 ESCRIPT_DLL_API Data operator*(const Data& left, const Data& right);
1898
1899 /**
1900 \brief
1901 Operator/
1902 Takes two Data objects.
1903 */
1904 ESCRIPT_DLL_API Data operator/(const Data& left, const Data& right);
1905
1906 /**
1907 \brief
1908 Operator+
1909 Takes LHS Data object and RHS python::object.
1910 python::object must be convertable to Data type.
1911 */
1912 ESCRIPT_DLL_API Data operator+(const Data& left, const boost::python::object& right);
1913
1914 /**
1915 \brief
1916 Operator-
1917 Takes LHS Data object and RHS python::object.
1918 python::object must be convertable to Data type.
1919 */
1920 ESCRIPT_DLL_API Data operator-(const Data& left, const boost::python::object& right);
1921
1922 /**
1923 \brief
1924 Operator*
1925 Takes LHS Data object and RHS python::object.
1926 python::object must be convertable to Data type.
1927 */
1928 ESCRIPT_DLL_API Data operator*(const Data& left, const boost::python::object& right);
1929
1930 /**
1931 \brief
1932 Operator/
1933 Takes LHS Data object and RHS python::object.
1934 python::object must be convertable to Data type.
1935 */
1936 ESCRIPT_DLL_API Data operator/(const Data& left, const boost::python::object& right);
1937
1938 /**
1939 \brief
1940 Operator+
1941 Takes LHS python::object and RHS Data object.
1942 python::object must be convertable to Data type.
1943 */
1944 ESCRIPT_DLL_API Data operator+(const boost::python::object& left, const Data& right);
1945
1946 /**
1947 \brief
1948 Operator-
1949 Takes LHS python::object and RHS Data object.
1950 python::object must be convertable to Data type.
1951 */
1952 ESCRIPT_DLL_API Data operator-(const boost::python::object& left, const Data& right);
1953
1954 /**
1955 \brief
1956 Operator*
1957 Takes LHS python::object and RHS Data object.
1958 python::object must be convertable to Data type.
1959 */
1960 ESCRIPT_DLL_API Data operator*(const boost::python::object& left, const Data& right);
1961
1962 /**
1963 \brief
1964 Operator/
1965 Takes LHS python::object and RHS Data object.
1966 python::object must be convertable to Data type.
1967 */
1968 ESCRIPT_DLL_API Data operator/(const boost::python::object& left, const Data& right);
1969
1970
1971
1972 /**
1973 \brief
1974 Output operator
1975 */
1976 ESCRIPT_DLL_API std::ostream& operator<<(std::ostream& o, const Data& data);
1977
1978 /**
1979 \brief
1980 Compute a tensor product of two Data objects
1981 \param arg_0 - Input - Data object
1982 \param arg_1 - Input - Data object
1983 \param axis_offset - Input - axis offset
1984 \param transpose - Input - 0: transpose neither, 1: transpose arg0, 2: transpose arg1
1985 */
1986 ESCRIPT_DLL_API
1987 Data
1988 C_GeneralTensorProduct(Data& arg_0,
1989 Data& arg_1,
1990 int axis_offset=0,
1991 int transpose=0);
1992
1993 /**
1994 \brief
1995 Perform the given binary operation with this and right as operands.
1996 Right is a Data object.
1997 */
1998 template <class BinaryFunction>
1999 inline
2000 void
2001 Data::binaryOp(const Data& right,
2002 BinaryFunction operation)
2003 {
2004 //
2005 // if this has a rank of zero promote it to the rank of the RHS
2006 if (getDataPointRank()==0 && right.getDataPointRank()!=0) {
2007 throw DataException("Error - attempt to update rank zero object with object with rank bigger than zero.");
2008 }
2009
2010 if (isLazy() || right.isLazy())
2011 {
2012 throw DataException("Programmer error - attempt to call binaryOp with Lazy Data.");
2013 }
2014 //
2015 // initially make the temporary a shallow copy
2016 Data tempRight(right);
2017
2018 if (getFunctionSpace()!=right.getFunctionSpace()) {
2019 if (right.probeInterpolation(getFunctionSpace())) {
2020 //
2021 // an interpolation is required so create a new Data
2022 tempRight=Data(right,this->getFunctionSpace());
2023 } else if (probeInterpolation(right.getFunctionSpace())) {
2024 //
2025 // interpolate onto the RHS function space
2026 Data tempLeft(*this,right.getFunctionSpace());
2027 // m_data=tempLeft.m_data;
2028 set_m_data(tempLeft.m_data);
2029 }
2030 }
2031 operandCheck(tempRight);
2032 //
2033 // ensure this has the right type for the RHS
2034 typeMatchRight(tempRight);
2035 //
2036 // Need to cast to the concrete types so that the correct binaryOp
2037 // is called.
2038 if (isExpanded()) {
2039 //
2040 // Expanded data will be done in parallel, the right hand side can be
2041 // of any data type
2042 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2043 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2044 escript::binaryOp(*leftC,*(tempRight.getReady()),operation);
2045 } else if (isTagged()) {
2046 //
2047 // Tagged data is operated on serially, the right hand side can be
2048 // either DataConstant or DataTagged
2049 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2050 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2051 if (right.isTagged()) {
2052 DataTagged* rightC=dynamic_cast<DataTagged*>(tempRight.m_data.get());
2053 EsysAssert((rightC!=0), "Programming error - casting to DataTagged.");
2054 escript::binaryOp(*leftC,*rightC,operation);
2055 } else {
2056 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2057 EsysAssert((rightC!=0), "Programming error - casting to DataConstant.");
2058 escript::binaryOp(*leftC,*rightC,operation);
2059 }
2060 } else if (isConstant()) {
2061 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2062 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2063 EsysAssert((leftC!=0 && rightC!=0), "Programming error - casting to DataConstant.");
2064 escript::binaryOp(*leftC,*rightC,operation);
2065 }
2066 }
2067
2068 /**
2069 \brief
2070 Perform the given Data object reduction algorithm on this and return the result.
2071 Given operation combines each element of each data point, thus argument
2072 object (*this) is a rank n Data object, and returned object is a scalar.
2073 Calls escript::algorithm.
2074 */
2075 template <class BinaryFunction>
2076 inline
2077 double
2078 Data::algorithm(BinaryFunction operation, double initial_value) const
2079 {
2080 if (isExpanded()) {
2081 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2082 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2083 return escript::algorithm(*leftC,operation,initial_value);
2084 } else if (isTagged()) {
2085 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2086 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2087 return escript::algorithm(*leftC,operation,initial_value);
2088 } else if (isConstant()) {
2089 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2090 EsysAssert((leftC!=0), "Programming error - casting to DataConstant.");
2091 return escript::algorithm(*leftC,operation,initial_value);
2092 } else if (isEmpty()) {
2093 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2094 } else if (isLazy()) {
2095 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2096 } else {
2097 throw DataException("Error - Data encapsulates an unknown type.");
2098 }
2099 }
2100
2101 /**
2102 \brief
2103 Perform the given data point reduction algorithm on data and return the result.
2104 Given operation combines each element within each data point into a scalar,
2105 thus argument object is a rank n Data object, and returned object is a
2106 rank 0 Data object.
2107 Calls escript::dp_algorithm.
2108 */
2109 template <class BinaryFunction>
2110 inline
2111 Data
2112 Data::dp_algorithm(BinaryFunction operation, double initial_value) const
2113 {
2114 if (isEmpty()) {
2115 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2116 }
2117 else if (isExpanded()) {
2118 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2119 DataExpanded* dataE=dynamic_cast<DataExpanded*>(m_data.get());
2120 DataExpanded* resultE=dynamic_cast<DataExpanded*>(result.m_data.get());
2121 EsysAssert((dataE!=0), "Programming error - casting data to DataExpanded.");
2122 EsysAssert((resultE!=0), "Programming error - casting result to DataExpanded.");
2123 escript::dp_algorithm(*dataE,*resultE,operation,initial_value);
2124 return result;
2125 }
2126 else if (isTagged()) {
2127 DataTagged* dataT=dynamic_cast<DataTagged*>(m_data.get());
2128 EsysAssert((dataT!=0), "Programming error - casting data to DataTagged.");
2129 DataTypes::ValueType defval(1);
2130 defval[0]=0;
2131 DataTagged* resultT=new DataTagged(getFunctionSpace(), DataTypes::scalarShape, defval, dataT);
2132 escript::dp_algorithm(*dataT,*resultT,operation,initial_value);
2133 return Data(resultT); // note: the Data object now owns the resultT pointer
2134 }
2135 else if (isConstant()) {
2136 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2137 DataConstant* dataC=dynamic_cast<DataConstant*>(m_data.get());
2138 DataConstant* resultC=dynamic_cast<DataConstant*>(result.m_data.get());
2139 EsysAssert((dataC!=0), "Programming error - casting data to DataConstant.");
2140 EsysAssert((resultC!=0), "Programming error - casting result to DataConstant.");
2141 escript::dp_algorithm(*dataC,*resultC,operation,initial_value);
2142 return result;
2143 } else if (isLazy()) {
2144 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2145 } else {
2146 throw DataException("Error - Data encapsulates an unknown type.");
2147 }
2148 }
2149
2150 /**
2151 \brief
2152 Compute a tensor operation with two Data objects
2153 \param arg_0 - Input - Data object
2154 \param arg_1 - Input - Data object
2155 \param operation - Input - Binary op functor
2156 */
2157 template <typename BinaryFunction>
2158 inline
2159 Data
2160 C_TensorBinaryOperation(Data const &arg_0,
2161 Data const &arg_1,
2162 BinaryFunction operation)
2163 {
2164 if (arg_0.isEmpty() || arg_1.isEmpty())
2165 {
2166 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2167 }
2168 if (arg_0.isLazy() || arg_1.isLazy())
2169 {
2170 throw DataException("Error - Operations not permitted on lazy data.");
2171 }
2172 // Interpolate if necessary and find an appropriate function space
2173 Data arg_0_Z, arg_1_Z;
2174 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2175 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2176 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2177 arg_1_Z = Data(arg_1);
2178 }
2179 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2180 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2181 arg_0_Z =Data(arg_0);
2182 }
2183 else {
2184 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible function spaces.");
2185 }
2186 } else {
2187 arg_0_Z = Data(arg_0);
2188 arg_1_Z = Data(arg_1);
2189 }
2190 // Get rank and shape of inputs
2191 int rank0 = arg_0_Z.getDataPointRank();
2192 int rank1 = arg_1_Z.getDataPointRank();
2193 DataTypes::ShapeType shape0 = arg_0_Z.getDataPointShape();
2194 DataTypes::ShapeType shape1 = arg_1_Z.getDataPointShape();
2195 int size0 = arg_0_Z.getDataPointSize();
2196 int size1 = arg_1_Z.getDataPointSize();
2197 // Declare output Data object
2198 Data res;
2199
2200 if (shape0 == shape1) {
2201 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2202 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2203 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2204 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2205 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2206
2207 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2208 }
2209 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2210
2211 // Prepare the DataConstant input
2212 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2213
2214 // Borrow DataTagged input from Data object
2215 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2216
2217 // Prepare a DataTagged output 2
2218 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2219 res.tag();
2220 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2221
2222 // Prepare offset into DataConstant
2223 int offset_0 = tmp_0->getPointOffset(0,0);
2224 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2225
2226 // Get the pointers to the actual data
2227 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2228 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2229
2230 // Compute a result for the default
2231 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2232 // Compute a result for each tag
2233 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2234 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2235 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2236 tmp_2->addTag(i->first);
2237 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2238 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2239
2240 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2241 }
2242
2243 }
2244 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2245 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2246 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2247 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2248 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2249
2250 int sampleNo_1,dataPointNo_1;
2251 int numSamples_1 = arg_1_Z.getNumSamples();
2252 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2253 int offset_0 = tmp_0->getPointOffset(0,0);
2254 res.requireWrite();
2255 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2256 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2257 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2258 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2259 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2260 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2261 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2262 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2263 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2264 }
2265 }
2266
2267 }
2268 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2269 // Borrow DataTagged input from Data object
2270 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2271
2272 // Prepare the DataConstant input
2273 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2274
2275 // Prepare a DataTagged output 2
2276 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2277 res.tag();
2278 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2279
2280 // Prepare offset into DataConstant
2281 int offset_1 = tmp_1->getPointOffset(0,0);
2282
2283 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2284 // Get the pointers to the actual data
2285 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2286 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2287 // Compute a result for the default
2288 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2289 // Compute a result for each tag
2290 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2291 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2292 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2293 tmp_2->addTag(i->first);
2294 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2295 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2296 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2297 }
2298
2299 }
2300 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2301 // Borrow DataTagged input from Data object
2302 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2303
2304 // Borrow DataTagged input from Data object
2305 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2306
2307 // Prepare a DataTagged output 2
2308 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2309 res.tag(); // DataTagged output
2310 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2311
2312 // Get the pointers to the actual data
2313 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2314 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2315 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2316
2317 // Compute a result for the default
2318 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2319 // Merge the tags
2320 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2321 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2322 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2323 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2324 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2325 }
2326 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2327 tmp_2->addTag(i->first);
2328 }
2329 // Compute a result for each tag
2330 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2331 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2332
2333 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2334 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2335 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2336
2337 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2338 }
2339
2340 }
2341 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2342 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2343 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2344 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2345 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2346 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2347
2348 int sampleNo_0,dataPointNo_0;
2349 int numSamples_0 = arg_0_Z.getNumSamples();
2350 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2351 res.requireWrite();
2352 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2353 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2354 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2355 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2356 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2357 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2358 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2359 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2360 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2361 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2362 }
2363 }
2364
2365 }
2366 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2367 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2368 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2369 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2370 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2371
2372 int sampleNo_0,dataPointNo_0;
2373 int numSamples_0 = arg_0_Z.getNumSamples();
2374 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2375 int offset_1 = tmp_1->getPointOffset(0,0);
2376 res.requireWrite();
2377 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2378 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2379 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2380 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2381 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2382
2383 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2384 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2385 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2386
2387
2388 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2389 }
2390 }
2391
2392 }
2393 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2394 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2395 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2396 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2397 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2398 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2399
2400 int sampleNo_0,dataPointNo_0;
2401 int numSamples_0 = arg_0_Z.getNumSamples();
2402 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2403 res.requireWrite();
2404 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2405 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2406 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2407 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2408 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2409 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2410 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2411 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2412 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2413 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2414 }
2415 }
2416
2417 }
2418 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2419 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2420 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2421 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2422 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2423 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2424
2425 int sampleNo_0,dataPointNo_0;
2426 int numSamples_0 = arg_0_Z.getNumSamples();
2427 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2428 res.requireWrite();
2429 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2430 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2431 dataPointNo_0=0;
2432 // for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2433 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2434 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2435 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2436 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2437 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2438 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2439 tensor_binary_operation(size0*numDataPointsPerSample_0, ptr_0, ptr_1, ptr_2, operation);
2440 // }
2441 }
2442
2443 }
2444 else {
2445 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2446 }
2447
2448 } else if (0 == rank0) {
2449 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2450 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataConstant output
2451 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2452 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2453 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2454 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2455 }
2456 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2457
2458 // Prepare the DataConstant input
2459 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2460
2461 // Borrow DataTagged input from Data object
2462 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2463
2464 // Prepare a DataTagged output 2
2465 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataTagged output
2466 res.tag();
2467 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2468
2469 // Prepare offset into DataConstant
2470 int offset_0 = tmp_0->getPointOffset(0,0);
2471 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2472
2473 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2474 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2475
2476 // Compute a result for the default
2477 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2478 // Compute a result for each tag
2479 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2480 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2481 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2482 tmp_2->addTag(i->first);
2483 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2484 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2485 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2486 }
2487
2488 }
2489 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2490
2491 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2492 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2493 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2494 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2495
2496 int sampleNo_1,dataPointNo_1;
2497 int numSamples_1 = arg_1_Z.getNumSamples();
2498 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2499 int offset_0 = tmp_0->getPointOffset(0,0);
2500 res.requireWrite();
2501 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2502 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2503 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2504 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2505 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2506 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2507 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2508 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2509 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2510
2511 }
2512 }
2513
2514 }
2515 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2516
2517 // Borrow DataTagged input from Data object
2518 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2519
2520 // Prepare the DataConstant input
2521 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2522
2523 // Prepare a DataTagged output 2
2524 res = Data(0.0, shape1, arg_0_Z.getFunctionSpace()); // DataTagged output
2525 res.tag();
2526 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2527
2528 // Prepare offset into DataConstant
2529 int offset_1 = tmp_1->getPointOffset(0,0);
2530 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2531
2532 // Get the pointers to the actual data
2533 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2534 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2535
2536
2537 // Compute a result for the default
2538 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2539 // Compute a result for each tag
2540 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2541 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2542 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2543 tmp_2->addTag(i->first);
2544 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2545 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2546
2547 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2548 }
2549
2550 }
2551 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2552
2553 // Borrow DataTagged input from Data object
2554 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2555
2556 // Borrow DataTagged input from Data object
2557 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2558
2559 // Prepare a DataTagged output 2
2560 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());
2561 res.tag(); // DataTagged output
2562 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2563
2564 // Get the pointers to the actual data
2565 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2566 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2567 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2568
2569 // Compute a result for the default
2570 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2571 // Merge the tags
2572 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2573 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2574 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2575 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2576 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2577 }
2578 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2579 tmp_2->addTag(i->first);
2580 }
2581 // Compute a result for each tag
2582 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2583 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2584 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2585 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2586 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2587
2588 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2589 }
2590
2591 }
2592 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2593
2594 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2595 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2596 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2597 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2598 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2599
2600 int sampleNo_0,dataPointNo_0;
2601 int numSamples_0 = arg_0_Z.getNumSamples();
2602 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2603 res.requireWrite();
2604 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2605 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2606 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2607 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2608 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2609 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2610 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2611 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2612 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2613 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2614 }
2615 }
2616
2617 }
2618 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2619 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2620 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2621 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2622 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2623
2624 int sampleNo_0,dataPointNo_0;
2625 int numSamples_0 = arg_0_Z.getNumSamples();
2626 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2627 int offset_1 = tmp_1->getPointOffset(0,0);
2628 res.requireWrite();
2629 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2630 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2631 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2632 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2633 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2634 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2635 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2636 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2637 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2638 }
2639 }
2640
2641
2642 }
2643 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2644
2645 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2646 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2647 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2648 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2649 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2650
2651 int sampleNo_0,dataPointNo_0;
2652 int numSamples_0 = arg_0_Z.getNumSamples();
2653 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2654 res.requireWrite();
2655 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2656 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2657 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2658 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2659 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2660 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2661 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2662 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2663 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2664 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2665 }
2666 }
2667
2668 }
2669 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2670
2671 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2672 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2673 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2674 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2675 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2676
2677 int sampleNo_0,dataPointNo_0;
2678 int numSamples_0 = arg_0_Z.getNumSamples();
2679 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2680 res.requireWrite();
2681 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2682 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2683 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2684 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2685 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2686 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2687 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2688 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2689 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2690 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2691 }
2692 }
2693
2694 }
2695 else {
2696 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2697 }
2698
2699 } else if (0 == rank1) {
2700 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2701 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2702 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2703 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2704 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2705 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2706 }
2707 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2708
2709 // Prepare the DataConstant input
2710 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2711
2712 // Borrow DataTagged input from Data object
2713 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2714
2715 // Prepare a DataTagged output 2
2716 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2717 res.tag();
2718 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2719
2720 // Prepare offset into DataConstant
2721 int offset_0 = tmp_0->getPointOffset(0,0);
2722 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2723
2724 //Get the pointers to the actual data
2725 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2726 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2727
2728 // Compute a result for the default
2729 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2730 // Compute a result for each tag
2731 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2732 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2733 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2734 tmp_2->addTag(i->first);
2735 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2736 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2737 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2738 }
2739 }
2740 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2741
2742 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2743 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2744 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2745 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2746
2747 int sampleNo_1,dataPointNo_1;
2748 int numSamples_1 = arg_1_Z.getNumSamples();
2749 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2750 int offset_0 = tmp_0->getPointOffset(0,0);
2751 res.requireWrite();
2752 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2753 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2754 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2755 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2756 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2757 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2758 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2759 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2760 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2761 }
2762 }
2763
2764 }
2765 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2766
2767 // Borrow DataTagged input from Data object
2768 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2769
2770 // Prepare the DataConstant input
2771 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2772
2773 // Prepare a DataTagged output 2
2774 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2775 res.tag();
2776 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2777
2778 // Prepare offset into DataConstant
2779 int offset_1 = tmp_1->getPointOffset(0,0);
2780 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2781 // Get the pointers to the actual data
2782 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2783 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2784 // Compute a result for the default
2785 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2786 // Compute a result for each tag
2787 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2788 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2789 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2790 tmp_2->addTag(i->first);
2791 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2792 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2793 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2794 }
2795
2796 }
2797 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2798
2799 // Borrow DataTagged input from Data object
2800 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2801
2802 // Borrow DataTagged input from Data object
2803 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2804
2805 // Prepare a DataTagged output 2
2806 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2807 res.tag(); // DataTagged output
2808 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2809
2810 // Get the pointers to the actual data
2811 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2812 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2813 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2814
2815 // Compute a result for the default
2816 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2817 // Merge the tags
2818 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2819 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2820 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2821 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2822 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2823 }
2824 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2825 tmp_2->addTag(i->first);
2826 }
2827 // Compute a result for each tag
2828 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2829 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2830 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2831 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2832 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2833 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2834 }
2835
2836 }
2837 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2838
2839 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2840 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2841 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2842 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2843 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2844
2845 int sampleNo_0,dataPointNo_0;
2846 int numSamples_0 = arg_0_Z.getNumSamples();
2847 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2848 res.requireWrite();
2849 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2850 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2851 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2852 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2853 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2854 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2855 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2856 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2857 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2858 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2859 }
2860 }
2861
2862 }
2863 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2864 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2865 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2866 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2867 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2868
2869 int sampleNo_0,dataPointNo_0;
2870 int numSamples_0 = arg_0_Z.getNumSamples();
2871 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2872 int offset_1 = tmp_1->getPointOffset(0,0);
2873 res.requireWrite();
2874 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2875 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2876 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2877 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2878 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2879 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2880 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2881 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2882 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2883 }
2884 }
2885
2886
2887 }
2888 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2889
2890 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2891 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2892 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2893 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2894 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2895
2896 int sampleNo_0,dataPointNo_0;
2897 int numSamples_0 = arg_0_Z.getNumSamples();
2898 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2899 res.requireWrite();
2900 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2901 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2902 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2903 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2904 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2905 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2906 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2907 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2908 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2909 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2910 }
2911 }
2912
2913 }
2914 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2915
2916 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2917 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2918 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2919 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2920 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2921
2922 int sampleNo_0,dataPointNo_0;
2923 int numSamples_0 = arg_0_Z.getNumSamples();
2924 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2925 res.requireWrite();
2926 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2927 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2928 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2929 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2930 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2931 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2932 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2933 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2934 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2935 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2936 }
2937 }
2938
2939 }
2940 else {
2941 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2942 }
2943
2944 } else {
2945 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible shapes");
2946 }
2947
2948 return res;
2949 }
2950
2951 template <typename UnaryFunction>
2952 Data
2953 C_TensorUnaryOperation(Data const &arg_0,
2954 UnaryFunction operation)
2955 {
2956 if (arg_0.isEmpty()) // do this before we attempt to interpolate
2957 {
2958 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2959 }
2960 if (arg_0.isLazy())
2961 {
2962 throw DataException("Error - Operations not permitted on lazy data.");
2963 }
2964 // Interpolate if necessary and find an appropriate function space
2965 Data arg_0_Z = Data(arg_0);
2966
2967 // Get rank and shape of inputs
2968 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
2969 int size0 = arg_0_Z.getDataPointSize();
2970
2971 // Declare output Data object
2972 Data res;
2973
2974 if (arg_0_Z.isConstant()) {
2975 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataConstant output
2976 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2977 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2978 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2979 }
2980 else if (arg_0_Z.isTagged()) {
2981
2982 // Borrow DataTagged input from Data object
2983 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2984
2985 // Prepare a DataTagged output 2
2986 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2987 res.tag();
2988 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2989
2990 // Get the pointers to the actual data
2991 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2992 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2993 // Compute a result for the default
2994 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2995 // Compute a result for each tag
2996 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2997 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2998 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2999 tmp_2->addTag(i->first);
3000 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
3001 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
3002 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
3003 }
3004
3005 }
3006 else if (arg_0_Z.isExpanded()) {
3007
3008 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace(),true); // DataExpanded output
3009 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3010 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3011
3012 int sampleNo_0,dataPointNo_0;
3013 int numSamples_0 = arg_0_Z.getNumSamples();
3014 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3015 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3016 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3017 dataPointNo_0=0;
3018 // for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3019 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
3020 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3021 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3022 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3023 tensor_unary_operation(size0*numDataPointsPerSample_0, ptr_0, ptr_2, operation);
3024 // }
3025 }
3026 }
3027 else {
3028 throw DataException("Error - C_TensorUnaryOperation: unknown combination of inputs");
3029 }
3030
3031 return res;
3032 }
3033
3034 }
3035 #endif

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26