/[escript]/trunk/escript/src/Data.h
ViewVC logotype

Contents of /trunk/escript/src/Data.h

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2723 - (show annotations)
Sun Oct 18 23:44:37 2009 UTC (10 years, 4 months ago) by jfenwick
File MIME type: text/plain
File size: 92652 byte(s)
Use correct types for MPI op parameters

1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2009 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 /** \file Data.h */
16
17 #ifndef DATA_H
18 #define DATA_H
19 #include "system_dep.h"
20
21 #include "DataTypes.h"
22 #include "DataAbstract.h"
23 #include "DataAlgorithm.h"
24 #include "FunctionSpace.h"
25 #include "BinaryOp.h"
26 #include "UnaryOp.h"
27 #include "DataException.h"
28
29
30 extern "C" {
31 #include "DataC.h"
32 //#include <omp.h>
33 }
34
35 #include "esysmpi.h"
36 #include <string>
37 #include <algorithm>
38 #include <sstream>
39
40 #include <boost/shared_ptr.hpp>
41 #include <boost/python/object.hpp>
42 #include <boost/python/tuple.hpp>
43
44 #include "BufferGroup.h"
45
46 namespace escript {
47
48 //
49 // Forward declaration for various implementations of Data.
50 class DataConstant;
51 class DataTagged;
52 class DataExpanded;
53 class DataLazy;
54
55 /**
56 \brief
57 Data represents a collection of datapoints.
58
59 Description:
60 Internally, the datapoints are actually stored by a DataAbstract object.
61 The specific instance of DataAbstract used may vary over the lifetime
62 of the Data object.
63 Some methods on this class return references (eg getShape()).
64 These references should not be used after an operation which changes the underlying DataAbstract object.
65 Doing so will lead to invalid memory access.
66 This should not affect any methods exposed via boost::python.
67 */
68 class Data {
69
70 public:
71
72 // These typedefs allow function names to be cast to pointers
73 // to functions of the appropriate type when calling unaryOp etc.
74 typedef double (*UnaryDFunPtr)(double);
75 typedef double (*BinaryDFunPtr)(double,double);
76
77
78 /**
79 Constructors.
80 */
81
82 /**
83 \brief
84 Default constructor.
85 Creates a DataEmpty object.
86 */
87 ESCRIPT_DLL_API
88 Data();
89
90 /**
91 \brief
92 Copy constructor.
93 WARNING: Only performs a shallow copy.
94 */
95 ESCRIPT_DLL_API
96 Data(const Data& inData);
97
98 /**
99 \brief
100 Constructor from another Data object. If "what" is different from the
101 function space of inData the inData are tried to be interpolated to what,
102 otherwise a shallow copy of inData is returned.
103 */
104 ESCRIPT_DLL_API
105 Data(const Data& inData,
106 const FunctionSpace& what);
107
108 /**
109 \brief Copy Data from an existing vector
110 */
111
112 ESCRIPT_DLL_API
113 Data(const DataTypes::ValueType& value,
114 const DataTypes::ShapeType& shape,
115 const FunctionSpace& what=FunctionSpace(),
116 bool expanded=false);
117
118 /**
119 \brief
120 Constructor which creates a Data with points having the specified shape.
121
122 \param value - Input - Single value applied to all Data.
123 \param dataPointShape - Input - The shape of each data point.
124 \param what - Input - A description of what this data represents.
125 \param expanded - Input - Flag, if true fill the entire container with
126 the given value. Otherwise a more efficient storage
127 mechanism will be used.
128 */
129 ESCRIPT_DLL_API
130 Data(double value,
131 const DataTypes::ShapeType& dataPointShape=DataTypes::ShapeType(),
132 const FunctionSpace& what=FunctionSpace(),
133 bool expanded=false);
134
135 /**
136 \brief
137 Constructor which performs a deep copy of a region from another Data object.
138
139 \param inData - Input - Input Data object.
140 \param region - Input - Region to copy.
141 */
142 ESCRIPT_DLL_API
143 Data(const Data& inData,
144 const DataTypes::RegionType& region);
145
146 /**
147 \brief
148 Constructor which copies data from any object that can be treated like a python array/sequence.
149
150 \param value - Input - Input data.
151 \param what - Input - A description of what this data represents.
152 \param expanded - Input - Flag, if true fill the entire container with
153 the value. Otherwise a more efficient storage
154 mechanism will be used.
155 */
156 ESCRIPT_DLL_API
157 Data(const boost::python::object& value,
158 const FunctionSpace& what=FunctionSpace(),
159 bool expanded=false);
160
161 /**
162 \brief
163 Constructor which creates a DataConstant.
164 Copies data from any object that can be treated like a python array/sequence.
165 All other parameters are copied from other.
166
167 \param value - Input - Input data.
168 \param other - Input - contains all other parameters.
169 */
170 ESCRIPT_DLL_API
171 Data(const boost::python::object& value,
172 const Data& other);
173
174 /**
175 \brief
176 Constructor which creates a DataConstant of "shape" with constant value.
177 */
178 ESCRIPT_DLL_API
179 Data(double value,
180 const boost::python::tuple& shape=boost::python::make_tuple(),
181 const FunctionSpace& what=FunctionSpace(),
182 bool expanded=false);
183
184
185
186 /**
187 \brief Create a Data using an existing DataAbstract. Warning: The new object assumes ownership of the pointer!
188 Once you have passed the pointer, do not delete it.
189 */
190 ESCRIPT_DLL_API
191 explicit Data(DataAbstract* underlyingdata);
192
193 /**
194 \brief Create a Data based on the supplied DataAbstract
195 */
196 ESCRIPT_DLL_API
197 explicit Data(DataAbstract_ptr underlyingdata);
198
199 /**
200 \brief
201 Destructor
202 */
203 ESCRIPT_DLL_API
204 ~Data();
205
206 /**
207 \brief Make this object a deep copy of "other".
208 */
209 ESCRIPT_DLL_API
210 void
211 copy(const Data& other);
212
213 /**
214 \brief Return a pointer to a deep copy of this object.
215 */
216 ESCRIPT_DLL_API
217 Data
218 copySelf();
219
220
221 /**
222 \brief produce a delayed evaluation version of this Data.
223 */
224 ESCRIPT_DLL_API
225 Data
226 delay();
227
228 /**
229 \brief convert the current data into lazy data.
230 */
231 ESCRIPT_DLL_API
232 void
233 delaySelf();
234
235
236 /**
237 Member access methods.
238 */
239
240 /**
241 \brief
242 switches on update protection
243
244 */
245 ESCRIPT_DLL_API
246 void
247 setProtection();
248
249 /**
250 \brief
251 Returns true, if the data object is protected against update
252
253 */
254 ESCRIPT_DLL_API
255 bool
256 isProtected() const;
257
258
259 /**
260 \brief
261 Return the value of a data point as a python tuple.
262 */
263 ESCRIPT_DLL_API
264 const boost::python::object
265 getValueOfDataPointAsTuple(int dataPointNo);
266
267 /**
268 \brief
269 sets the values of a data-point from a python object on this process
270 */
271 ESCRIPT_DLL_API
272 void
273 setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object);
274
275 /**
276 \brief
277 sets the values of a data-point from a array-like object on this process
278 */
279 ESCRIPT_DLL_API
280 void
281 setValueOfDataPointToArray(int dataPointNo, const boost::python::object&);
282
283 /**
284 \brief
285 sets the values of a data-point on this process
286 */
287 ESCRIPT_DLL_API
288 void
289 setValueOfDataPoint(int dataPointNo, const double);
290
291 /**
292 \brief Return a data point across all processors as a python tuple.
293 */
294 ESCRIPT_DLL_API
295 const boost::python::object
296 getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo);
297
298 /**
299 \brief
300 Return the tag number associated with the given data-point.
301
302 */
303 ESCRIPT_DLL_API
304 int
305 getTagNumber(int dpno);
306
307 /**
308 \brief
309 Return the C wrapper for the Data object.
310 */
311 ESCRIPT_DLL_API
312 escriptDataC
313 getDataC();
314
315
316
317 /**
318 \brief
319 Return the C wrapper for the Data object - const version.
320 */
321 ESCRIPT_DLL_API
322 escriptDataC
323 getDataC() const;
324
325 /**
326 \brief How much space is required to evaulate a sample of the Data.
327 */
328 ESCRIPT_DLL_API
329 size_t
330 getSampleBufferSize() const;
331
332
333
334 /**
335 \brief
336 Write the data as a string. For large amounts of data, a summary is printed.
337 */
338 ESCRIPT_DLL_API
339 std::string
340 toString() const;
341
342 /**
343 \brief
344 Whatever the current Data type make this into a DataExpanded.
345 */
346 ESCRIPT_DLL_API
347 void
348 expand();
349
350 /**
351 \brief
352 If possible convert this Data to DataTagged. This will only allow
353 Constant data to be converted to tagged. An attempt to convert
354 Expanded data to tagged will throw an exception.
355 */
356 ESCRIPT_DLL_API
357 void
358 tag();
359
360 /**
361 \brief If this data is lazy, then convert it to ready data.
362 What type of ready data depends on the expression. For example, Constant+Tagged==Tagged.
363 */
364 ESCRIPT_DLL_API
365 void
366 resolve();
367
368
369 /**
370 \brief Ensures data is ready for write access.
371 This means that the data will be resolved if lazy and will be copied if shared with another Data object.
372 \warning This method should only be called in single threaded sections of code. (It modifies m_data).
373 Do not create any Data objects from this one between calling requireWrite and getSampleDataRW.
374 Doing so might introduce additional sharing.
375 */
376 ESCRIPT_DLL_API
377 void
378 requireWrite();
379
380 /**
381 \brief
382 Return true if this Data is expanded.
383 \note To determine if a sample will contain separate values for each datapoint. Use actsExpanded instead.
384 */
385 ESCRIPT_DLL_API
386 bool
387 isExpanded() const;
388
389 /**
390 \brief
391 Return true if this Data is expanded or resolves to expanded.
392 That is, if it has a separate value for each datapoint in the sample.
393 */
394 ESCRIPT_DLL_API
395 bool
396 actsExpanded() const;
397
398
399 /**
400 \brief
401 Return true if this Data is tagged.
402 */
403 ESCRIPT_DLL_API
404 bool
405 isTagged() const;
406
407 /**
408 \brief
409 Return true if this Data is constant.
410 */
411 ESCRIPT_DLL_API
412 bool
413 isConstant() const;
414
415 /**
416 \brief Return true if this Data is lazy.
417 */
418 ESCRIPT_DLL_API
419 bool
420 isLazy() const;
421
422 /**
423 \brief Return true if this data is ready.
424 */
425 ESCRIPT_DLL_API
426 bool
427 isReady() const;
428
429 /**
430 \brief
431 Return true if this Data holds an instance of DataEmpty. This is _not_ the same as asking if the object
432 contains datapoints.
433 */
434 ESCRIPT_DLL_API
435 bool
436 isEmpty() const;
437
438 /**
439 \brief
440 Return the function space.
441 */
442 ESCRIPT_DLL_API
443 inline
444 const FunctionSpace&
445 getFunctionSpace() const
446 {
447 return m_data->getFunctionSpace();
448 }
449
450 /**
451 \brief
452 Return a copy of the function space.
453 */
454 ESCRIPT_DLL_API
455 const FunctionSpace
456 getCopyOfFunctionSpace() const;
457
458 /**
459 \brief
460 Return the domain.
461 */
462 ESCRIPT_DLL_API
463 inline
464 // const AbstractDomain&
465 const_Domain_ptr
466 getDomain() const
467 {
468 return getFunctionSpace().getDomain();
469 }
470
471
472 /**
473 \brief
474 Return the domain.
475 TODO: For internal use only. This should be removed.
476 */
477 ESCRIPT_DLL_API
478 inline
479 // const AbstractDomain&
480 Domain_ptr
481 getDomainPython() const
482 {
483 return getFunctionSpace().getDomainPython();
484 }
485
486 /**
487 \brief
488 Return a copy of the domain.
489 */
490 ESCRIPT_DLL_API
491 const AbstractDomain
492 getCopyOfDomain() const;
493
494 /**
495 \brief
496 Return the rank of the point data.
497 */
498 ESCRIPT_DLL_API
499 inline
500 unsigned int
501 getDataPointRank() const
502 {
503 return m_data->getRank();
504 }
505
506 /**
507 \brief
508 Return the number of data points
509 */
510 ESCRIPT_DLL_API
511 inline
512 int
513 getNumDataPoints() const
514 {
515 return getNumSamples() * getNumDataPointsPerSample();
516 }
517 /**
518 \brief
519 Return the number of samples.
520 */
521 ESCRIPT_DLL_API
522 inline
523 int
524 getNumSamples() const
525 {
526 return m_data->getNumSamples();
527 }
528
529 /**
530 \brief
531 Return the number of data points per sample.
532 */
533 ESCRIPT_DLL_API
534 inline
535 int
536 getNumDataPointsPerSample() const
537 {
538 return m_data->getNumDPPSample();
539 }
540
541
542 /**
543 \brief
544 Return the number of values in the shape for this object.
545 */
546 ESCRIPT_DLL_API
547 int
548 getNoValues() const
549 {
550 return m_data->getNoValues();
551 }
552
553
554 /**
555 \brief
556 dumps the object into a netCDF file
557 */
558 ESCRIPT_DLL_API
559 void
560 dump(const std::string fileName) const;
561
562 /**
563 \brief returns the values of the object as a list of tuples (one for each datapoint).
564
565 \param scalarastuple If true, scalar data will produce single valued tuples [(1,) (2,) ...]
566 If false, the result is a list of scalars [1, 2, ...]
567 */
568 ESCRIPT_DLL_API
569 const boost::python::object
570 toListOfTuples(bool scalarastuple=true);
571
572
573 /**
574 \brief
575 Return the sample data for the given sample no. This is not the
576 preferred interface but is provided for use by C code.
577 The bufferg parameter is only required for LazyData.
578 \param sampleNo - Input - the given sample no.
579 \param bufferg - A buffer to compute (and store) sample data in will be selected from this group.
580 \return pointer to the sample data.
581 */
582 ESCRIPT_DLL_API
583 inline
584 const DataAbstract::ValueType::value_type*
585 getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, BufferGroup* bufferg=0);
586
587
588 /**
589 \brief
590 Return the sample data for the given sample no. This is not the
591 preferred interface but is provided for use by C code.
592 \param sampleNo - Input - the given sample no.
593 \return pointer to the sample data.
594 */
595 ESCRIPT_DLL_API
596 inline
597 DataAbstract::ValueType::value_type*
598 getSampleDataRW(DataAbstract::ValueType::size_type sampleNo);
599
600
601 /**
602 \brief
603 Return the sample data for the given tag. If an attempt is made to
604 access data that isn't tagged an exception will be thrown.
605 \param tag - Input - the tag key.
606 */
607 ESCRIPT_DLL_API
608 inline
609 DataAbstract::ValueType::value_type*
610 getSampleDataByTag(int tag)
611 {
612 return m_data->getSampleDataByTag(tag);
613 }
614
615 /**
616 \brief
617 Return a reference into the DataVector which points to the specified data point.
618 \param sampleNo - Input -
619 \param dataPointNo - Input -
620 */
621 ESCRIPT_DLL_API
622 DataTypes::ValueType::const_reference
623 getDataPointRO(int sampleNo, int dataPointNo);
624
625 /**
626 \brief
627 Return a reference into the DataVector which points to the specified data point.
628 \param sampleNo - Input -
629 \param dataPointNo - Input -
630 */
631 ESCRIPT_DLL_API
632 DataTypes::ValueType::reference
633 getDataPointRW(int sampleNo, int dataPointNo);
634
635
636
637 /**
638 \brief
639 Return the offset for the given sample and point within the sample
640 */
641 ESCRIPT_DLL_API
642 inline
643 DataTypes::ValueType::size_type
644 getDataOffset(int sampleNo,
645 int dataPointNo)
646 {
647 return m_data->getPointOffset(sampleNo,dataPointNo);
648 }
649
650 /**
651 \brief
652 Return a reference to the data point shape.
653 */
654 ESCRIPT_DLL_API
655 inline
656 const DataTypes::ShapeType&
657 getDataPointShape() const
658 {
659 return m_data->getShape();
660 }
661
662 /**
663 \brief
664 Return the data point shape as a tuple of integers.
665 */
666 ESCRIPT_DLL_API
667 const boost::python::tuple
668 getShapeTuple() const;
669
670 /**
671 \brief
672 Return the size of the data point. It is the product of the
673 data point shape dimensions.
674 */
675 ESCRIPT_DLL_API
676 int
677 getDataPointSize() const;
678
679 /**
680 \brief
681 Return the number of doubles stored for this Data.
682 */
683 ESCRIPT_DLL_API
684 DataTypes::ValueType::size_type
685 getLength() const;
686
687 /**
688 \brief Return true if this object contains no samples.
689 This is not the same as isEmpty()
690 */
691 ESCRIPT_DLL_API
692 bool
693 hasNoSamples() const
694 {
695 return getLength()==0;
696 }
697
698 /**
699 \brief
700 Assign the given value to the tag assocciated with name. Implicitly converts this
701 object to type DataTagged. Throws an exception if this object
702 cannot be converted to a DataTagged object or name cannot be mapped onto a tag key.
703 \param name - Input - name of tag.
704 \param value - Input - Value to associate with given key.
705 */
706 ESCRIPT_DLL_API
707 void
708 setTaggedValueByName(std::string name,
709 const boost::python::object& value);
710
711 /**
712 \brief
713 Assign the given value to the tag. Implicitly converts this
714 object to type DataTagged if it is constant.
715
716 \param tagKey - Input - Integer key.
717 \param value - Input - Value to associate with given key.
718 ==>*
719 */
720 ESCRIPT_DLL_API
721 void
722 setTaggedValue(int tagKey,
723 const boost::python::object& value);
724
725 /**
726 \brief
727 Assign the given value to the tag. Implicitly converts this
728 object to type DataTagged if it is constant.
729
730 \param tagKey - Input - Integer key.
731 \param pointshape - Input - The shape of the value parameter
732 \param value - Input - Value to associate with given key.
733 \param dataOffset - Input - Offset of the begining of the point within the value parameter
734 */
735 ESCRIPT_DLL_API
736 void
737 setTaggedValueFromCPP(int tagKey,
738 const DataTypes::ShapeType& pointshape,
739 const DataTypes::ValueType& value,
740 int dataOffset=0);
741
742
743
744 /**
745 \brief
746 Copy other Data object into this Data object where mask is positive.
747 */
748 ESCRIPT_DLL_API
749 void
750 copyWithMask(const Data& other,
751 const Data& mask);
752
753 /**
754 Data object operation methods and operators.
755 */
756
757 /**
758 \brief
759 set all values to zero
760 *
761 */
762 ESCRIPT_DLL_API
763 void
764 setToZero();
765
766 /**
767 \brief
768 Interpolates this onto the given functionspace and returns
769 the result as a Data object.
770 *
771 */
772 ESCRIPT_DLL_API
773 Data
774 interpolate(const FunctionSpace& functionspace) const;
775
776
777 ESCRIPT_DLL_API
778 Data
779 interpolateFromTable2D(const WrappedArray& table, double Amin, double Astep,
780 double undef, Data& B, double Bmin, double Bstep,bool check_boundaries);
781
782 ESCRIPT_DLL_API
783 Data
784 interpolateFromTable1D(const WrappedArray& table, double Amin, double Astep,
785 double undef,bool check_boundaries);
786
787
788
789
790 ESCRIPT_DLL_API
791 Data
792 interpolateFromTable2DP(boost::python::object table, double Amin, double Astep,
793 Data& B, double Bmin, double Bstep, double undef,bool check_boundaries);
794
795 ESCRIPT_DLL_API
796 Data
797 interpolateFromTable1DP(boost::python::object table, double Amin, double Astep,
798 double undef,bool check_boundaries);
799
800 /**
801 \brief
802 Calculates the gradient of the data at the data points of functionspace.
803 If functionspace is not present the function space of Function(getDomain()) is used.
804 *
805 */
806 ESCRIPT_DLL_API
807 Data
808 gradOn(const FunctionSpace& functionspace) const;
809
810 ESCRIPT_DLL_API
811 Data
812 grad() const;
813
814 /**
815 \brief
816 Calculate the integral over the function space domain as a python tuple.
817 */
818 ESCRIPT_DLL_API
819 boost::python::object
820 integrateToTuple_const() const;
821
822
823 /**
824 \brief
825 Calculate the integral over the function space domain as a python tuple.
826 */
827 ESCRIPT_DLL_API
828 boost::python::object
829 integrateToTuple();
830
831
832
833 /**
834 \brief
835 Returns 1./ Data object
836 *
837 */
838 ESCRIPT_DLL_API
839 Data
840 oneOver() const;
841 /**
842 \brief
843 Return a Data with a 1 for +ive values and a 0 for 0 or -ive values.
844 *
845 */
846 ESCRIPT_DLL_API
847 Data
848 wherePositive() const;
849
850 /**
851 \brief
852 Return a Data with a 1 for -ive values and a 0 for +ive or 0 values.
853 *
854 */
855 ESCRIPT_DLL_API
856 Data
857 whereNegative() const;
858
859 /**
860 \brief
861 Return a Data with a 1 for +ive or 0 values and a 0 for -ive values.
862 *
863 */
864 ESCRIPT_DLL_API
865 Data
866 whereNonNegative() const;
867
868 /**
869 \brief
870 Return a Data with a 1 for -ive or 0 values and a 0 for +ive values.
871 *
872 */
873 ESCRIPT_DLL_API
874 Data
875 whereNonPositive() const;
876
877 /**
878 \brief
879 Return a Data with a 1 for 0 values and a 0 for +ive or -ive values.
880 *
881 */
882 ESCRIPT_DLL_API
883 Data
884 whereZero(double tol=0.0) const;
885
886 /**
887 \brief
888 Return a Data with a 0 for 0 values and a 1 for +ive or -ive values.
889 *
890 */
891 ESCRIPT_DLL_API
892 Data
893 whereNonZero(double tol=0.0) const;
894
895 /**
896 \brief
897 Return the maximum absolute value of this Data object.
898
899 The method is not const because lazy data needs to be expanded before Lsup can be computed.
900 The _const form can be used when the Data object is const, however this will only work for
901 Data which is not Lazy.
902
903 For Data which contain no samples (or tagged Data for which no tags in use have a value)
904 zero is returned.
905 */
906 ESCRIPT_DLL_API
907 double
908 Lsup();
909
910 ESCRIPT_DLL_API
911 double
912 Lsup_const() const;
913
914
915 /**
916 \brief
917 Return the maximum value of this Data object.
918
919 The method is not const because lazy data needs to be expanded before sup can be computed.
920 The _const form can be used when the Data object is const, however this will only work for
921 Data which is not Lazy.
922
923 For Data which contain no samples (or tagged Data for which no tags in use have a value)
924 a large negative value is returned.
925 */
926 ESCRIPT_DLL_API
927 double
928 sup();
929
930 ESCRIPT_DLL_API
931 double
932 sup_const() const;
933
934
935 /**
936 \brief
937 Return the minimum value of this Data object.
938
939 The method is not const because lazy data needs to be expanded before inf can be computed.
940 The _const form can be used when the Data object is const, however this will only work for
941 Data which is not Lazy.
942
943 For Data which contain no samples (or tagged Data for which no tags in use have a value)
944 a large positive value is returned.
945 */
946 ESCRIPT_DLL_API
947 double
948 inf();
949
950 ESCRIPT_DLL_API
951 double
952 inf_const() const;
953
954
955
956 /**
957 \brief
958 Return the absolute value of each data point of this Data object.
959 *
960 */
961 ESCRIPT_DLL_API
962 Data
963 abs() const;
964
965 /**
966 \brief
967 Return the maximum value of each data point of this Data object.
968 *
969 */
970 ESCRIPT_DLL_API
971 Data
972 maxval() const;
973
974 /**
975 \brief
976 Return the minimum value of each data point of this Data object.
977 *
978 */
979 ESCRIPT_DLL_API
980 Data
981 minval() const;
982
983 /**
984 \brief
985 Return the (sample number, data-point number) of the data point with
986 the minimum component value in this Data object.
987 \note If you are working in python, please consider using Locator
988 instead of manually manipulating process and point IDs.
989 */
990 ESCRIPT_DLL_API
991 const boost::python::tuple
992 minGlobalDataPoint() const;
993
994 /**
995 \brief
996 Return the (sample number, data-point number) of the data point with
997 the minimum component value in this Data object.
998 \note If you are working in python, please consider using Locator
999 instead of manually manipulating process and point IDs.
1000 */
1001 ESCRIPT_DLL_API
1002 const boost::python::tuple
1003 maxGlobalDataPoint() const;
1004
1005
1006
1007 /**
1008 \brief
1009 Return the sign of each data point of this Data object.
1010 -1 for negative values, zero for zero values, 1 for positive values.
1011 *
1012 */
1013 ESCRIPT_DLL_API
1014 Data
1015 sign() const;
1016
1017 /**
1018 \brief
1019 Return the symmetric part of a matrix which is half the matrix plus its transpose.
1020 *
1021 */
1022 ESCRIPT_DLL_API
1023 Data
1024 symmetric() const;
1025
1026 /**
1027 \brief
1028 Return the nonsymmetric part of a matrix which is half the matrix minus its transpose.
1029 *
1030 */
1031 ESCRIPT_DLL_API
1032 Data
1033 nonsymmetric() const;
1034
1035 /**
1036 \brief
1037 Return the trace of a matrix
1038 *
1039 */
1040 ESCRIPT_DLL_API
1041 Data
1042 trace(int axis_offset) const;
1043
1044 /**
1045 \brief
1046 Transpose each data point of this Data object around the given axis.
1047 *
1048 */
1049 ESCRIPT_DLL_API
1050 Data
1051 transpose(int axis_offset) const;
1052
1053 /**
1054 \brief
1055 Return the eigenvalues of the symmetric part at each data point of this Data object in increasing values.
1056 Currently this function is restricted to rank 2, square shape, and dimension 3.
1057 *
1058 */
1059 ESCRIPT_DLL_API
1060 Data
1061 eigenvalues() const;
1062
1063 /**
1064 \brief
1065 Return the eigenvalues and corresponding eigenvcetors of the symmetric part at each data point of this Data object.
1066 the eigenvalues are ordered in increasing size where eigenvalues with relative difference less than
1067 tol are treated as equal. The eigenvectors are orthogonal, normalized and the sclaed such that the
1068 first non-zero entry is positive.
1069 Currently this function is restricted to rank 2, square shape, and dimension 3
1070 *
1071 */
1072 ESCRIPT_DLL_API
1073 const boost::python::tuple
1074 eigenvalues_and_eigenvectors(const double tol=1.e-12) const;
1075
1076 /**
1077 \brief
1078 swaps the components axis0 and axis1
1079 *
1080 */
1081 ESCRIPT_DLL_API
1082 Data
1083 swapaxes(const int axis0, const int axis1) const;
1084
1085 /**
1086 \brief
1087 Return the error function erf of each data point of this Data object.
1088 *
1089 */
1090 ESCRIPT_DLL_API
1091 Data
1092 erf() const;
1093
1094 /**
1095 \brief
1096 Return the sin of each data point of this Data object.
1097 *
1098 */
1099 ESCRIPT_DLL_API
1100 Data
1101 sin() const;
1102
1103 /**
1104 \brief
1105 Return the cos of each data point of this Data object.
1106 *
1107 */
1108 ESCRIPT_DLL_API
1109 Data
1110 cos() const;
1111
1112 /**
1113 \brief
1114 Return the tan of each data point of this Data object.
1115 *
1116 */
1117 ESCRIPT_DLL_API
1118 Data
1119 tan() const;
1120
1121 /**
1122 \brief
1123 Return the asin of each data point of this Data object.
1124 *
1125 */
1126 ESCRIPT_DLL_API
1127 Data
1128 asin() const;
1129
1130 /**
1131 \brief
1132 Return the acos of each data point of this Data object.
1133 *
1134 */
1135 ESCRIPT_DLL_API
1136 Data
1137 acos() const;
1138
1139 /**
1140 \brief
1141 Return the atan of each data point of this Data object.
1142 *
1143 */
1144 ESCRIPT_DLL_API
1145 Data
1146 atan() const;
1147
1148 /**
1149 \brief
1150 Return the sinh of each data point of this Data object.
1151 *
1152 */
1153 ESCRIPT_DLL_API
1154 Data
1155 sinh() const;
1156
1157 /**
1158 \brief
1159 Return the cosh of each data point of this Data object.
1160 *
1161 */
1162 ESCRIPT_DLL_API
1163 Data
1164 cosh() const;
1165
1166 /**
1167 \brief
1168 Return the tanh of each data point of this Data object.
1169 *
1170 */
1171 ESCRIPT_DLL_API
1172 Data
1173 tanh() const;
1174
1175 /**
1176 \brief
1177 Return the asinh of each data point of this Data object.
1178 *
1179 */
1180 ESCRIPT_DLL_API
1181 Data
1182 asinh() const;
1183
1184 /**
1185 \brief
1186 Return the acosh of each data point of this Data object.
1187 *
1188 */
1189 ESCRIPT_DLL_API
1190 Data
1191 acosh() const;
1192
1193 /**
1194 \brief
1195 Return the atanh of each data point of this Data object.
1196 *
1197 */
1198 ESCRIPT_DLL_API
1199 Data
1200 atanh() const;
1201
1202 /**
1203 \brief
1204 Return the log to base 10 of each data point of this Data object.
1205 *
1206 */
1207 ESCRIPT_DLL_API
1208 Data
1209 log10() const;
1210
1211 /**
1212 \brief
1213 Return the natural log of each data point of this Data object.
1214 *
1215 */
1216 ESCRIPT_DLL_API
1217 Data
1218 log() const;
1219
1220 /**
1221 \brief
1222 Return the exponential function of each data point of this Data object.
1223 *
1224 */
1225 ESCRIPT_DLL_API
1226 Data
1227 exp() const;
1228
1229 /**
1230 \brief
1231 Return the square root of each data point of this Data object.
1232 *
1233 */
1234 ESCRIPT_DLL_API
1235 Data
1236 sqrt() const;
1237
1238 /**
1239 \brief
1240 Return the negation of each data point of this Data object.
1241 *
1242 */
1243 ESCRIPT_DLL_API
1244 Data
1245 neg() const;
1246
1247 /**
1248 \brief
1249 Return the identity of each data point of this Data object.
1250 Simply returns this object unmodified.
1251 *
1252 */
1253 ESCRIPT_DLL_API
1254 Data
1255 pos() const;
1256
1257 /**
1258 \brief
1259 Return the given power of each data point of this Data object.
1260
1261 \param right Input - the power to raise the object to.
1262 *
1263 */
1264 ESCRIPT_DLL_API
1265 Data
1266 powD(const Data& right) const;
1267
1268 /**
1269 \brief
1270 Return the given power of each data point of this boost python object.
1271
1272 \param right Input - the power to raise the object to.
1273 *
1274 */
1275 ESCRIPT_DLL_API
1276 Data
1277 powO(const boost::python::object& right) const;
1278
1279 /**
1280 \brief
1281 Return the given power of each data point of this boost python object.
1282
1283 \param left Input - the bases
1284 *
1285 */
1286
1287 ESCRIPT_DLL_API
1288 Data
1289 rpowO(const boost::python::object& left) const;
1290
1291 /**
1292 \brief
1293 writes the object to a file in the DX file format
1294 */
1295 ESCRIPT_DLL_API
1296 void
1297 saveDX(std::string fileName) const;
1298
1299 /**
1300 \brief
1301 writes the object to a file in the VTK file format
1302 */
1303 ESCRIPT_DLL_API
1304 void
1305 saveVTK(std::string fileName) const;
1306
1307
1308
1309 /**
1310 \brief
1311 Overloaded operator +=
1312 \param right - Input - The right hand side.
1313 *
1314 */
1315 ESCRIPT_DLL_API
1316 Data& operator+=(const Data& right);
1317 ESCRIPT_DLL_API
1318 Data& operator+=(const boost::python::object& right);
1319
1320 ESCRIPT_DLL_API
1321 Data& operator=(const Data& other);
1322
1323 /**
1324 \brief
1325 Overloaded operator -=
1326 \param right - Input - The right hand side.
1327 *
1328 */
1329 ESCRIPT_DLL_API
1330 Data& operator-=(const Data& right);
1331 ESCRIPT_DLL_API
1332 Data& operator-=(const boost::python::object& right);
1333
1334 /**
1335 \brief
1336 Overloaded operator *=
1337 \param right - Input - The right hand side.
1338 *
1339 */
1340 ESCRIPT_DLL_API
1341 Data& operator*=(const Data& right);
1342 ESCRIPT_DLL_API
1343 Data& operator*=(const boost::python::object& right);
1344
1345 /**
1346 \brief
1347 Overloaded operator /=
1348 \param right - Input - The right hand side.
1349 *
1350 */
1351 ESCRIPT_DLL_API
1352 Data& operator/=(const Data& right);
1353 ESCRIPT_DLL_API
1354 Data& operator/=(const boost::python::object& right);
1355
1356 /**
1357 \brief
1358 Returns true if this can be interpolated to functionspace.
1359 */
1360 ESCRIPT_DLL_API
1361 bool
1362 probeInterpolation(const FunctionSpace& functionspace) const;
1363
1364 /**
1365 Data object slicing methods.
1366 */
1367
1368 /**
1369 \brief
1370 Returns a slice from this Data object.
1371
1372 /description
1373 Implements the [] get operator in python.
1374 Calls getSlice.
1375
1376 \param key - Input - python slice tuple specifying
1377 slice to return.
1378 */
1379 ESCRIPT_DLL_API
1380 Data
1381 getItem(const boost::python::object& key) const;
1382
1383 /**
1384 \brief
1385 Copies slice from value into this Data object.
1386
1387 Implements the [] set operator in python.
1388 Calls setSlice.
1389
1390 \param key - Input - python slice tuple specifying
1391 slice to copy from value.
1392 \param value - Input - Data object to copy from.
1393 */
1394 ESCRIPT_DLL_API
1395 void
1396 setItemD(const boost::python::object& key,
1397 const Data& value);
1398
1399 ESCRIPT_DLL_API
1400 void
1401 setItemO(const boost::python::object& key,
1402 const boost::python::object& value);
1403
1404 // These following public methods should be treated as private.
1405
1406 /**
1407 \brief
1408 Perform the given unary operation on every element of every data point in
1409 this Data object.
1410 */
1411 template <class UnaryFunction>
1412 ESCRIPT_DLL_API
1413 inline
1414 void
1415 unaryOp2(UnaryFunction operation);
1416
1417 /**
1418 \brief
1419 Return a Data object containing the specified slice of
1420 this Data object.
1421 \param region - Input - Region to copy.
1422 *
1423 */
1424 ESCRIPT_DLL_API
1425 Data
1426 getSlice(const DataTypes::RegionType& region) const;
1427
1428 /**
1429 \brief
1430 Copy the specified slice from the given value into this
1431 Data object.
1432 \param value - Input - Data to copy from.
1433 \param region - Input - Region to copy.
1434 *
1435 */
1436 ESCRIPT_DLL_API
1437 void
1438 setSlice(const Data& value,
1439 const DataTypes::RegionType& region);
1440
1441 /**
1442 \brief
1443 print the data values to stdout. Used for debugging
1444 */
1445 ESCRIPT_DLL_API
1446 void
1447 print(void);
1448
1449 /**
1450 \brief
1451 return the MPI rank number of the local data
1452 MPI_COMM_WORLD is assumed and the result of MPI_Comm_size()
1453 is returned
1454 */
1455 ESCRIPT_DLL_API
1456 int
1457 get_MPIRank(void) const;
1458
1459 /**
1460 \brief
1461 return the MPI rank number of the local data
1462 MPI_COMM_WORLD is assumed and the result of MPI_Comm_rank()
1463 is returned
1464 */
1465 ESCRIPT_DLL_API
1466 int
1467 get_MPISize(void) const;
1468
1469 /**
1470 \brief
1471 return the MPI rank number of the local data
1472 MPI_COMM_WORLD is assumed and returned.
1473 */
1474 ESCRIPT_DLL_API
1475 MPI_Comm
1476 get_MPIComm(void) const;
1477
1478 /**
1479 \brief
1480 return the object produced by the factory, which is a DataConstant or DataExpanded
1481 TODO Ownership of this object should be explained in doco.
1482 */
1483 ESCRIPT_DLL_API
1484 DataAbstract*
1485 borrowData(void) const;
1486
1487 ESCRIPT_DLL_API
1488 DataAbstract_ptr
1489 borrowDataPtr(void) const;
1490
1491 ESCRIPT_DLL_API
1492 DataReady_ptr
1493 borrowReadyPtr(void) const;
1494
1495
1496
1497 /**
1498 \brief
1499 Return a pointer to the beginning of the datapoint at the specified offset.
1500 TODO Eventually these should be inlined.
1501 \param i - position(offset) in the underlying datastructure
1502 */
1503
1504 ESCRIPT_DLL_API
1505 DataTypes::ValueType::const_reference
1506 getDataAtOffsetRO(DataTypes::ValueType::size_type i);
1507
1508
1509 ESCRIPT_DLL_API
1510 DataTypes::ValueType::reference
1511 getDataAtOffsetRW(DataTypes::ValueType::size_type i);
1512
1513
1514
1515 /**
1516 \brief Create a buffer for use by getSample
1517 Allocates a DataVector large enough for DataLazy::resolveSample to operate on for the current Data.
1518 Do not use this buffer for other Data instances (unless you are sure they will be the same size).
1519
1520 In multi-threaded sections, this needs to be called on each thread.
1521
1522 \return A BufferGroup* if Data is lazy, NULL otherwise.
1523 \warning This pointer must be deallocated using freeSampleBuffer to avoid cross library memory issues.
1524 */
1525 ESCRIPT_DLL_API
1526 BufferGroup*
1527 allocSampleBuffer() const;
1528
1529 /**
1530 \brief Free a buffer allocated with allocSampleBuffer.
1531 \param buffer Input - pointer to the buffer to deallocate.
1532 */
1533 ESCRIPT_DLL_API void freeSampleBuffer(BufferGroup* buffer);
1534
1535 protected:
1536
1537 private:
1538
1539 template <class BinaryOp>
1540 double
1541 #ifdef PASO_MPI
1542 lazyAlgWorker(double init, MPI_Op mpiop_type);
1543 #else
1544 lazyAlgWorker(double init);
1545 #endif
1546
1547 double
1548 LsupWorker() const;
1549
1550 double
1551 supWorker() const;
1552
1553 double
1554 infWorker() const;
1555
1556 boost::python::object
1557 integrateWorker() const;
1558
1559 void
1560 calc_minGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1561
1562 void
1563 calc_maxGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1564
1565
1566 /**
1567 \brief
1568 Check *this and the right operand are compatible. Throws
1569 an exception if they aren't.
1570 \param right - Input - The right hand side.
1571 */
1572 inline
1573 void
1574 operandCheck(const Data& right) const
1575 {
1576 return m_data->operandCheck(*(right.m_data.get()));
1577 }
1578
1579 /**
1580 \brief
1581 Perform the specified reduction algorithm on every element of every data point in
1582 this Data object according to the given function and return the single value result.
1583 */
1584 template <class BinaryFunction>
1585 inline
1586 double
1587 algorithm(BinaryFunction operation,
1588 double initial_value) const;
1589
1590 /**
1591 \brief
1592 Reduce each data-point in this Data object using the given operation. Return a Data
1593 object with the same number of data-points, but with each data-point containing only
1594 one value - the result of the reduction operation on the corresponding data-point in
1595 this Data object
1596 */
1597 template <class BinaryFunction>
1598 inline
1599 Data
1600 dp_algorithm(BinaryFunction operation,
1601 double initial_value) const;
1602
1603 /**
1604 \brief
1605 Perform the given binary operation on all of the data's elements.
1606 The underlying type of the right hand side (right) determines the final
1607 type of *this after the operation. For example if the right hand side
1608 is expanded *this will be expanded if necessary.
1609 RHS is a Data object.
1610 */
1611 template <class BinaryFunction>
1612 inline
1613 void
1614 binaryOp(const Data& right,
1615 BinaryFunction operation);
1616
1617 /**
1618 \brief
1619 Convert the data type of the RHS to match this.
1620 \param right - Input - data type to match.
1621 */
1622 void
1623 typeMatchLeft(Data& right) const;
1624
1625 /**
1626 \brief
1627 Convert the data type of this to match the RHS.
1628 \param right - Input - data type to match.
1629 */
1630 void
1631 typeMatchRight(const Data& right);
1632
1633 /**
1634 \brief
1635 Construct a Data object of the appropriate type.
1636 */
1637
1638 void
1639 initialise(const DataTypes::ValueType& value,
1640 const DataTypes::ShapeType& shape,
1641 const FunctionSpace& what,
1642 bool expanded);
1643
1644 void
1645 initialise(const WrappedArray& value,
1646 const FunctionSpace& what,
1647 bool expanded);
1648
1649 //
1650 // flag to protect the data object against any update
1651 bool m_protected;
1652 mutable bool m_shared;
1653 bool m_lazy;
1654
1655 //
1656 // pointer to the actual data object
1657 // boost::shared_ptr<DataAbstract> m_data;
1658 DataAbstract_ptr m_data;
1659
1660 // If possible please use getReadyPtr instead.
1661 // But see warning below.
1662 const DataReady*
1663 getReady() const;
1664
1665 DataReady*
1666 getReady();
1667
1668
1669 // Be wary of using this for local operations since it (temporarily) increases reference count.
1670 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1671 // getReady() instead
1672 DataReady_ptr
1673 getReadyPtr();
1674
1675 const_DataReady_ptr
1676 getReadyPtr() const;
1677
1678
1679 /**
1680 \brief Update the Data's shared flag
1681 This indicates that the DataAbstract used by this object is now shared (or no longer shared).
1682 For internal use only.
1683 */
1684 void updateShareStatus(bool nowshared) const
1685 {
1686 m_shared=nowshared; // m_shared is mutable
1687 }
1688
1689 // In the isShared() method below:
1690 // A problem would occur if m_data (the address pointed to) were being modified
1691 // while the call m_data->is_shared is being executed.
1692 //
1693 // Q: So why do I think this code can be thread safe/correct?
1694 // A: We need to make some assumptions.
1695 // 1. We assume it is acceptable to return true under some conditions when we aren't shared.
1696 // 2. We assume that no constructions or assignments which will share previously unshared
1697 // will occur while this call is executing. This is consistent with the way Data:: and C are written.
1698 //
1699 // This means that the only transition we need to consider, is when a previously shared object is
1700 // not shared anymore. ie. the other objects have been destroyed or a deep copy has been made.
1701 // In those cases the m_shared flag changes to false after m_data has completed changing.
1702 // For any threads executing before the flag switches they will assume the object is still shared.
1703 bool isShared() const
1704 {
1705 return m_shared;
1706 /* if (m_shared) return true;
1707 if (m_data->isShared())
1708 {
1709 updateShareStatus(true);
1710 return true;
1711 }
1712 return false;*/
1713 }
1714
1715 void forceResolve()
1716 {
1717 if (isLazy())
1718 {
1719 #ifdef _OPENMP
1720 if (omp_in_parallel())
1721 { // Yes this is throwing an exception out of an omp thread which is forbidden.
1722 throw DataException("Please do not call forceResolve() in a parallel region.");
1723 }
1724 #endif
1725 resolve();
1726 }
1727 }
1728
1729 /**
1730 \brief if another object is sharing out member data make a copy to work with instead.
1731 This code should only be called from single threaded sections of code.
1732 */
1733 void exclusiveWrite()
1734 {
1735 #ifdef _OPENMP
1736 if (omp_in_parallel())
1737 {
1738 // *((int*)0)=17;
1739 throw DataException("Programming error. Please do not run exclusiveWrite() in multi-threaded sections.");
1740 }
1741 #endif
1742 forceResolve();
1743 if (isShared())
1744 {
1745 DataAbstract* t=m_data->deepCopy();
1746 set_m_data(DataAbstract_ptr(t));
1747 }
1748 }
1749
1750 /**
1751 \brief checks if caller can have exclusive write to the object
1752 */
1753 void checkExclusiveWrite()
1754 {
1755 if (isLazy() || isShared())
1756 {
1757 throw DataException("Programming error. ExclusiveWrite required - please call requireWrite()");
1758 }
1759 }
1760
1761 /**
1762 \brief Modify the data abstract hosted by this Data object
1763 For internal use only.
1764 Passing a pointer to null is permitted (do this in the destructor)
1765 \warning Only to be called in single threaded code or inside a single/critical section. This method needs to be atomic.
1766 */
1767 void set_m_data(DataAbstract_ptr p);
1768
1769 friend class DataAbstract; // To allow calls to updateShareStatus
1770
1771 };
1772
1773 } // end namespace escript
1774
1775
1776 // No, this is not supposed to be at the top of the file
1777 // DataAbstact needs to be declared first, then DataReady needs to be fully declared
1778 // so that I can dynamic cast between them below.
1779 #include "DataReady.h"
1780 #include "DataLazy.h"
1781
1782 namespace escript
1783 {
1784
1785 inline
1786 const DataReady*
1787 Data::getReady() const
1788 {
1789 const DataReady* dr=dynamic_cast<const DataReady*>(m_data.get());
1790 EsysAssert((dr!=0), "Error - casting to DataReady.");
1791 return dr;
1792 }
1793
1794 inline
1795 DataReady*
1796 Data::getReady()
1797 {
1798 DataReady* dr=dynamic_cast<DataReady*>(m_data.get());
1799 EsysAssert((dr!=0), "Error - casting to DataReady.");
1800 return dr;
1801 }
1802
1803 // Be wary of using this for local operations since it (temporarily) increases reference count.
1804 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1805 // getReady() instead
1806 inline
1807 DataReady_ptr
1808 Data::getReadyPtr()
1809 {
1810 DataReady_ptr dr=boost::dynamic_pointer_cast<DataReady>(m_data);
1811 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1812 return dr;
1813 }
1814
1815
1816 inline
1817 const_DataReady_ptr
1818 Data::getReadyPtr() const
1819 {
1820 const_DataReady_ptr dr=boost::dynamic_pointer_cast<const DataReady>(m_data);
1821 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1822 return dr;
1823 }
1824
1825 inline
1826 DataAbstract::ValueType::value_type*
1827 Data::getSampleDataRW(DataAbstract::ValueType::size_type sampleNo)
1828 {
1829 if (isLazy())
1830 {
1831 throw DataException("Error, attempt to acquire RW access to lazy data. Please call requireWrite() first.");
1832 }
1833 return getReady()->getSampleDataRW(sampleNo);
1834 }
1835
1836 inline
1837 const DataAbstract::ValueType::value_type*
1838 Data::getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, BufferGroup* bufferg)
1839 {
1840 DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
1841 if (l!=0)
1842 {
1843 size_t offset=0;
1844 if (bufferg==NULL)
1845 {
1846 throw DataException("Error, attempt to getSampleDataRO for lazy Data with buffer==NULL");
1847 }
1848 const DataTypes::ValueType* res=l->resolveSample(*bufferg,sampleNo,offset);
1849 return &((*res)[offset]);
1850 }
1851 return getReady()->getSampleDataRO(sampleNo);
1852 }
1853
1854
1855
1856 /**
1857 Modify a filename for MPI parallel output to multiple files
1858 */
1859 char *Escript_MPI_appendRankToFileName(const char *, int, int);
1860
1861 /**
1862 Binary Data object operators.
1863 */
1864 inline double rpow(double x,double y)
1865 {
1866 return pow(y,x);
1867 }
1868
1869 /**
1870 \brief
1871 Operator+
1872 Takes two Data objects.
1873 */
1874 ESCRIPT_DLL_API Data operator+(const Data& left, const Data& right);
1875
1876 /**
1877 \brief
1878 Operator-
1879 Takes two Data objects.
1880 */
1881 ESCRIPT_DLL_API Data operator-(const Data& left, const Data& right);
1882
1883 /**
1884 \brief
1885 Operator*
1886 Takes two Data objects.
1887 */
1888 ESCRIPT_DLL_API Data operator*(const Data& left, const Data& right);
1889
1890 /**
1891 \brief
1892 Operator/
1893 Takes two Data objects.
1894 */
1895 ESCRIPT_DLL_API Data operator/(const Data& left, const Data& right);
1896
1897 /**
1898 \brief
1899 Operator+
1900 Takes LHS Data object and RHS python::object.
1901 python::object must be convertable to Data type.
1902 */
1903 ESCRIPT_DLL_API Data operator+(const Data& left, const boost::python::object& right);
1904
1905 /**
1906 \brief
1907 Operator-
1908 Takes LHS Data object and RHS python::object.
1909 python::object must be convertable to Data type.
1910 */
1911 ESCRIPT_DLL_API Data operator-(const Data& left, const boost::python::object& right);
1912
1913 /**
1914 \brief
1915 Operator*
1916 Takes LHS Data object and RHS python::object.
1917 python::object must be convertable to Data type.
1918 */
1919 ESCRIPT_DLL_API Data operator*(const Data& left, const boost::python::object& right);
1920
1921 /**
1922 \brief
1923 Operator/
1924 Takes LHS Data object and RHS python::object.
1925 python::object must be convertable to Data type.
1926 */
1927 ESCRIPT_DLL_API Data operator/(const Data& left, const boost::python::object& right);
1928
1929 /**
1930 \brief
1931 Operator+
1932 Takes LHS python::object and RHS Data object.
1933 python::object must be convertable to Data type.
1934 */
1935 ESCRIPT_DLL_API Data operator+(const boost::python::object& left, const Data& right);
1936
1937 /**
1938 \brief
1939 Operator-
1940 Takes LHS python::object and RHS Data object.
1941 python::object must be convertable to Data type.
1942 */
1943 ESCRIPT_DLL_API Data operator-(const boost::python::object& left, const Data& right);
1944
1945 /**
1946 \brief
1947 Operator*
1948 Takes LHS python::object and RHS Data object.
1949 python::object must be convertable to Data type.
1950 */
1951 ESCRIPT_DLL_API Data operator*(const boost::python::object& left, const Data& right);
1952
1953 /**
1954 \brief
1955 Operator/
1956 Takes LHS python::object and RHS Data object.
1957 python::object must be convertable to Data type.
1958 */
1959 ESCRIPT_DLL_API Data operator/(const boost::python::object& left, const Data& right);
1960
1961
1962
1963 /**
1964 \brief
1965 Output operator
1966 */
1967 ESCRIPT_DLL_API std::ostream& operator<<(std::ostream& o, const Data& data);
1968
1969 /**
1970 \brief
1971 Compute a tensor product of two Data objects
1972 \param arg_0 - Input - Data object
1973 \param arg_1 - Input - Data object
1974 \param axis_offset - Input - axis offset
1975 \param transpose - Input - 0: transpose neither, 1: transpose arg0, 2: transpose arg1
1976 */
1977 ESCRIPT_DLL_API
1978 Data
1979 C_GeneralTensorProduct(Data& arg_0,
1980 Data& arg_1,
1981 int axis_offset=0,
1982 int transpose=0);
1983
1984 /**
1985 \brief
1986 Perform the given binary operation with this and right as operands.
1987 Right is a Data object.
1988 */
1989 template <class BinaryFunction>
1990 inline
1991 void
1992 Data::binaryOp(const Data& right,
1993 BinaryFunction operation)
1994 {
1995 //
1996 // if this has a rank of zero promote it to the rank of the RHS
1997 if (getDataPointRank()==0 && right.getDataPointRank()!=0) {
1998 throw DataException("Error - attempt to update rank zero object with object with rank bigger than zero.");
1999 }
2000
2001 if (isLazy() || right.isLazy())
2002 {
2003 throw DataException("Programmer error - attempt to call binaryOp with Lazy Data.");
2004 }
2005 //
2006 // initially make the temporary a shallow copy
2007 Data tempRight(right);
2008
2009 if (getFunctionSpace()!=right.getFunctionSpace()) {
2010 if (right.probeInterpolation(getFunctionSpace())) {
2011 //
2012 // an interpolation is required so create a new Data
2013 tempRight=Data(right,this->getFunctionSpace());
2014 } else if (probeInterpolation(right.getFunctionSpace())) {
2015 //
2016 // interpolate onto the RHS function space
2017 Data tempLeft(*this,right.getFunctionSpace());
2018 // m_data=tempLeft.m_data;
2019 set_m_data(tempLeft.m_data);
2020 }
2021 }
2022 operandCheck(tempRight);
2023 //
2024 // ensure this has the right type for the RHS
2025 typeMatchRight(tempRight);
2026 //
2027 // Need to cast to the concrete types so that the correct binaryOp
2028 // is called.
2029 if (isExpanded()) {
2030 //
2031 // Expanded data will be done in parallel, the right hand side can be
2032 // of any data type
2033 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2034 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2035 escript::binaryOp(*leftC,*(tempRight.getReady()),operation);
2036 } else if (isTagged()) {
2037 //
2038 // Tagged data is operated on serially, the right hand side can be
2039 // either DataConstant or DataTagged
2040 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2041 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2042 if (right.isTagged()) {
2043 DataTagged* rightC=dynamic_cast<DataTagged*>(tempRight.m_data.get());
2044 EsysAssert((rightC!=0), "Programming error - casting to DataTagged.");
2045 escript::binaryOp(*leftC,*rightC,operation);
2046 } else {
2047 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2048 EsysAssert((rightC!=0), "Programming error - casting to DataConstant.");
2049 escript::binaryOp(*leftC,*rightC,operation);
2050 }
2051 } else if (isConstant()) {
2052 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2053 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2054 EsysAssert((leftC!=0 && rightC!=0), "Programming error - casting to DataConstant.");
2055 escript::binaryOp(*leftC,*rightC,operation);
2056 }
2057 }
2058
2059 /**
2060 \brief
2061 Perform the given Data object reduction algorithm on this and return the result.
2062 Given operation combines each element of each data point, thus argument
2063 object (*this) is a rank n Data object, and returned object is a scalar.
2064 Calls escript::algorithm.
2065 */
2066 template <class BinaryFunction>
2067 inline
2068 double
2069 Data::algorithm(BinaryFunction operation, double initial_value) const
2070 {
2071 if (isExpanded()) {
2072 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2073 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2074 return escript::algorithm(*leftC,operation,initial_value);
2075 } else if (isTagged()) {
2076 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2077 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2078 return escript::algorithm(*leftC,operation,initial_value);
2079 } else if (isConstant()) {
2080 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2081 EsysAssert((leftC!=0), "Programming error - casting to DataConstant.");
2082 return escript::algorithm(*leftC,operation,initial_value);
2083 } else if (isEmpty()) {
2084 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2085 } else if (isLazy()) {
2086 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2087 } else {
2088 throw DataException("Error - Data encapsulates an unknown type.");
2089 }
2090 }
2091
2092 /**
2093 \brief
2094 Perform the given data point reduction algorithm on data and return the result.
2095 Given operation combines each element within each data point into a scalar,
2096 thus argument object is a rank n Data object, and returned object is a
2097 rank 0 Data object.
2098 Calls escript::dp_algorithm.
2099 */
2100 template <class BinaryFunction>
2101 inline
2102 Data
2103 Data::dp_algorithm(BinaryFunction operation, double initial_value) const
2104 {
2105 if (isEmpty()) {
2106 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2107 }
2108 else if (isExpanded()) {
2109 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2110 DataExpanded* dataE=dynamic_cast<DataExpanded*>(m_data.get());
2111 DataExpanded* resultE=dynamic_cast<DataExpanded*>(result.m_data.get());
2112 EsysAssert((dataE!=0), "Programming error - casting data to DataExpanded.");
2113 EsysAssert((resultE!=0), "Programming error - casting result to DataExpanded.");
2114 escript::dp_algorithm(*dataE,*resultE,operation,initial_value);
2115 return result;
2116 }
2117 else if (isTagged()) {
2118 DataTagged* dataT=dynamic_cast<DataTagged*>(m_data.get());
2119 EsysAssert((dataT!=0), "Programming error - casting data to DataTagged.");
2120 DataTypes::ValueType defval(1);
2121 defval[0]=0;
2122 DataTagged* resultT=new DataTagged(getFunctionSpace(), DataTypes::scalarShape, defval, dataT);
2123 escript::dp_algorithm(*dataT,*resultT,operation,initial_value);
2124 return Data(resultT); // note: the Data object now owns the resultT pointer
2125 }
2126 else if (isConstant()) {
2127 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2128 DataConstant* dataC=dynamic_cast<DataConstant*>(m_data.get());
2129 DataConstant* resultC=dynamic_cast<DataConstant*>(result.m_data.get());
2130 EsysAssert((dataC!=0), "Programming error - casting data to DataConstant.");
2131 EsysAssert((resultC!=0), "Programming error - casting result to DataConstant.");
2132 escript::dp_algorithm(*dataC,*resultC,operation,initial_value);
2133 return result;
2134 } else if (isLazy()) {
2135 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2136 } else {
2137 throw DataException("Error - Data encapsulates an unknown type.");
2138 }
2139 }
2140
2141 /**
2142 \brief
2143 Compute a tensor operation with two Data objects
2144 \param arg_0 - Input - Data object
2145 \param arg_1 - Input - Data object
2146 \param operation - Input - Binary op functor
2147 */
2148 template <typename BinaryFunction>
2149 inline
2150 Data
2151 C_TensorBinaryOperation(Data const &arg_0,
2152 Data const &arg_1,
2153 BinaryFunction operation)
2154 {
2155 if (arg_0.isEmpty() || arg_1.isEmpty())
2156 {
2157 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2158 }
2159 if (arg_0.isLazy() || arg_1.isLazy())
2160 {
2161 throw DataException("Error - Operations not permitted on lazy data.");
2162 }
2163 // Interpolate if necessary and find an appropriate function space
2164 Data arg_0_Z, arg_1_Z;
2165 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2166 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2167 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2168 arg_1_Z = Data(arg_1);
2169 }
2170 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2171 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2172 arg_0_Z =Data(arg_0);
2173 }
2174 else {
2175 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible function spaces.");
2176 }
2177 } else {
2178 arg_0_Z = Data(arg_0);
2179 arg_1_Z = Data(arg_1);
2180 }
2181 // Get rank and shape of inputs
2182 int rank0 = arg_0_Z.getDataPointRank();
2183 int rank1 = arg_1_Z.getDataPointRank();
2184 DataTypes::ShapeType shape0 = arg_0_Z.getDataPointShape();
2185 DataTypes::ShapeType shape1 = arg_1_Z.getDataPointShape();
2186 int size0 = arg_0_Z.getDataPointSize();
2187 int size1 = arg_1_Z.getDataPointSize();
2188 // Declare output Data object
2189 Data res;
2190
2191 if (shape0 == shape1) {
2192 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2193 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2194 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2195 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2196 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2197
2198 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2199 }
2200 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2201
2202 // Prepare the DataConstant input
2203 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2204
2205 // Borrow DataTagged input from Data object
2206 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2207
2208 // Prepare a DataTagged output 2
2209 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2210 res.tag();
2211 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2212
2213 // Prepare offset into DataConstant
2214 int offset_0 = tmp_0->getPointOffset(0,0);
2215 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2216
2217 // Get the pointers to the actual data
2218 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2219 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2220
2221 // Compute a result for the default
2222 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2223 // Compute a result for each tag
2224 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2225 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2226 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2227 tmp_2->addTag(i->first);
2228 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2229 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2230
2231 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2232 }
2233
2234 }
2235 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2236 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2237 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2238 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2239 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2240
2241 int sampleNo_1,dataPointNo_1;
2242 int numSamples_1 = arg_1_Z.getNumSamples();
2243 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2244 int offset_0 = tmp_0->getPointOffset(0,0);
2245 res.requireWrite();
2246 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2247 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2248 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2249 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2250 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2251 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2252 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2253 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2254 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2255 }
2256 }
2257
2258 }
2259 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2260 // Borrow DataTagged input from Data object
2261 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2262
2263 // Prepare the DataConstant input
2264 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2265
2266 // Prepare a DataTagged output 2
2267 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2268 res.tag();
2269 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2270
2271 // Prepare offset into DataConstant
2272 int offset_1 = tmp_1->getPointOffset(0,0);
2273
2274 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2275 // Get the pointers to the actual data
2276 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2277 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2278 // Compute a result for the default
2279 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2280 // Compute a result for each tag
2281 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2282 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2283 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2284 tmp_2->addTag(i->first);
2285 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2286 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2287 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2288 }
2289
2290 }
2291 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2292 // Borrow DataTagged input from Data object
2293 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2294
2295 // Borrow DataTagged input from Data object
2296 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2297
2298 // Prepare a DataTagged output 2
2299 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2300 res.tag(); // DataTagged output
2301 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2302
2303 // Get the pointers to the actual data
2304 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2305 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2306 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2307
2308 // Compute a result for the default
2309 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2310 // Merge the tags
2311 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2312 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2313 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2314 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2315 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2316 }
2317 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2318 tmp_2->addTag(i->first);
2319 }
2320 // Compute a result for each tag
2321 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2322 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2323
2324 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2325 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2326 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2327
2328 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2329 }
2330
2331 }
2332 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2333 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2334 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2335 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2336 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2337 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2338
2339 int sampleNo_0,dataPointNo_0;
2340 int numSamples_0 = arg_0_Z.getNumSamples();
2341 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2342 res.requireWrite();
2343 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2344 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2345 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2346 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2347 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2348 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2349 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2350 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2351 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2352 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2353 }
2354 }
2355
2356 }
2357 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2358 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2359 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2360 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2361 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2362
2363 int sampleNo_0,dataPointNo_0;
2364 int numSamples_0 = arg_0_Z.getNumSamples();
2365 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2366 int offset_1 = tmp_1->getPointOffset(0,0);
2367 res.requireWrite();
2368 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2369 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2370 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2371 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2372 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2373
2374 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2375 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2376 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2377
2378
2379 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2380 }
2381 }
2382
2383 }
2384 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2385 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2386 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2387 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2388 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2389 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2390
2391 int sampleNo_0,dataPointNo_0;
2392 int numSamples_0 = arg_0_Z.getNumSamples();
2393 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2394 res.requireWrite();
2395 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2396 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2397 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2398 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2399 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2400 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2401 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2402 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2403 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2404 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2405 }
2406 }
2407
2408 }
2409 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2410 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2411 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2412 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2413 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2414 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2415
2416 int sampleNo_0,dataPointNo_0;
2417 int numSamples_0 = arg_0_Z.getNumSamples();
2418 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2419 res.requireWrite();
2420 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2421 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2422 dataPointNo_0=0;
2423 // for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2424 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2425 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2426 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2427 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2428 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2429 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2430 tensor_binary_operation(size0*numDataPointsPerSample_0, ptr_0, ptr_1, ptr_2, operation);
2431 // }
2432 }
2433
2434 }
2435 else {
2436 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2437 }
2438
2439 } else if (0 == rank0) {
2440 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2441 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataConstant output
2442 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2443 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2444 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2445 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2446 }
2447 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2448
2449 // Prepare the DataConstant input
2450 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2451
2452 // Borrow DataTagged input from Data object
2453 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2454
2455 // Prepare a DataTagged output 2
2456 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataTagged output
2457 res.tag();
2458 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2459
2460 // Prepare offset into DataConstant
2461 int offset_0 = tmp_0->getPointOffset(0,0);
2462 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2463
2464 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2465 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2466
2467 // Compute a result for the default
2468 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2469 // Compute a result for each tag
2470 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2471 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2472 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2473 tmp_2->addTag(i->first);
2474 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2475 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2476 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2477 }
2478
2479 }
2480 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2481
2482 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2483 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2484 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2485 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2486
2487 int sampleNo_1,dataPointNo_1;
2488 int numSamples_1 = arg_1_Z.getNumSamples();
2489 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2490 int offset_0 = tmp_0->getPointOffset(0,0);
2491 res.requireWrite();
2492 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2493 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2494 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2495 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2496 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2497 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2498 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2499 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2500 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2501
2502 }
2503 }
2504
2505 }
2506 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2507
2508 // Borrow DataTagged input from Data object
2509 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2510
2511 // Prepare the DataConstant input
2512 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2513
2514 // Prepare a DataTagged output 2
2515 res = Data(0.0, shape1, arg_0_Z.getFunctionSpace()); // DataTagged output
2516 res.tag();
2517 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2518
2519 // Prepare offset into DataConstant
2520 int offset_1 = tmp_1->getPointOffset(0,0);
2521 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2522
2523 // Get the pointers to the actual data
2524 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2525 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2526
2527
2528 // Compute a result for the default
2529 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2530 // Compute a result for each tag
2531 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2532 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2533 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2534 tmp_2->addTag(i->first);
2535 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2536 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2537
2538 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2539 }
2540
2541 }
2542 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2543
2544 // Borrow DataTagged input from Data object
2545 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2546
2547 // Borrow DataTagged input from Data object
2548 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2549
2550 // Prepare a DataTagged output 2
2551 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());
2552 res.tag(); // DataTagged output
2553 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2554
2555 // Get the pointers to the actual data
2556 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2557 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2558 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2559
2560 // Compute a result for the default
2561 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2562 // Merge the tags
2563 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2564 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2565 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2566 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2567 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2568 }
2569 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2570 tmp_2->addTag(i->first);
2571 }
2572 // Compute a result for each tag
2573 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2574 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2575 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2576 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2577 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2578
2579 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2580 }
2581
2582 }
2583 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2584
2585 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2586 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2587 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2588 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2589 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2590
2591 int sampleNo_0,dataPointNo_0;
2592 int numSamples_0 = arg_0_Z.getNumSamples();
2593 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2594 res.requireWrite();
2595 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2596 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2597 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2598 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2599 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2600 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2601 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2602 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2603 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2604 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2605 }
2606 }
2607
2608 }
2609 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2610 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2611 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2612 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2613 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2614
2615 int sampleNo_0,dataPointNo_0;
2616 int numSamples_0 = arg_0_Z.getNumSamples();
2617 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2618 int offset_1 = tmp_1->getPointOffset(0,0);
2619 res.requireWrite();
2620 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2621 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2622 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2623 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2624 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2625 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2626 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2627 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2628 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2629 }
2630 }
2631
2632
2633 }
2634 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2635
2636 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2637 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2638 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2639 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2640 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2641
2642 int sampleNo_0,dataPointNo_0;
2643 int numSamples_0 = arg_0_Z.getNumSamples();
2644 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2645 res.requireWrite();
2646 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2647 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2648 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2649 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2650 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2651 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2652 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2653 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2654 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2655 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2656 }
2657 }
2658
2659 }
2660 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2661
2662 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2663 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2664 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2665 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2666 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2667
2668 int sampleNo_0,dataPointNo_0;
2669 int numSamples_0 = arg_0_Z.getNumSamples();
2670 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2671 res.requireWrite();
2672 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2673 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2674 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2675 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2676 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2677 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2678 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2679 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2680 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2681 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2682 }
2683 }
2684
2685 }
2686 else {
2687 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2688 }
2689
2690 } else if (0 == rank1) {
2691 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2692 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2693 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2694 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2695 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2696 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2697 }
2698 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2699
2700 // Prepare the DataConstant input
2701 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2702
2703 // Borrow DataTagged input from Data object
2704 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2705
2706 // Prepare a DataTagged output 2
2707 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2708 res.tag();
2709 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2710
2711 // Prepare offset into DataConstant
2712 int offset_0 = tmp_0->getPointOffset(0,0);
2713 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2714
2715 //Get the pointers to the actual data
2716 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2717 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2718
2719 // Compute a result for the default
2720 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2721 // Compute a result for each tag
2722 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2723 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2724 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2725 tmp_2->addTag(i->first);
2726 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2727 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2728 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2729 }
2730 }
2731 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2732
2733 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2734 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2735 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2736 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2737
2738 int sampleNo_1,dataPointNo_1;
2739 int numSamples_1 = arg_1_Z.getNumSamples();
2740 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2741 int offset_0 = tmp_0->getPointOffset(0,0);
2742 res.requireWrite();
2743 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2744 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2745 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2746 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2747 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2748 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2749 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2750 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2751 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2752 }
2753 }
2754
2755 }
2756 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2757
2758 // Borrow DataTagged input from Data object
2759 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2760
2761 // Prepare the DataConstant input
2762 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2763
2764 // Prepare a DataTagged output 2
2765 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2766 res.tag();
2767 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2768
2769 // Prepare offset into DataConstant
2770 int offset_1 = tmp_1->getPointOffset(0,0);
2771 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2772 // Get the pointers to the actual data
2773 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2774 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2775 // Compute a result for the default
2776 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2777 // Compute a result for each tag
2778 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2779 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2780 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2781 tmp_2->addTag(i->first);
2782 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2783 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2784 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2785 }
2786
2787 }
2788 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2789
2790 // Borrow DataTagged input from Data object
2791 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2792
2793 // Borrow DataTagged input from Data object
2794 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2795
2796 // Prepare a DataTagged output 2
2797 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2798 res.tag(); // DataTagged output
2799 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2800
2801 // Get the pointers to the actual data
2802 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2803 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2804 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2805
2806 // Compute a result for the default
2807 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2808 // Merge the tags
2809 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2810 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2811 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2812 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2813 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2814 }
2815 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2816 tmp_2->addTag(i->first);
2817 }
2818 // Compute a result for each tag
2819 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2820 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2821 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2822 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2823 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2824 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2825 }
2826
2827 }
2828 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2829
2830 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2831 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2832 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2833 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2834 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2835
2836 int sampleNo_0,dataPointNo_0;
2837 int numSamples_0 = arg_0_Z.getNumSamples();
2838 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2839 res.requireWrite();
2840 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2841 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2842 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2843 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2844 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2845 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2846 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2847 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2848 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2849 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2850 }
2851 }
2852
2853 }
2854 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2855 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2856 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2857 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2858 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2859
2860 int sampleNo_0,dataPointNo_0;
2861 int numSamples_0 = arg_0_Z.getNumSamples();
2862 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2863 int offset_1 = tmp_1->getPointOffset(0,0);
2864 res.requireWrite();
2865 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2866 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2867 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2868 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2869 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2870 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2871 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2872 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2873 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2874 }
2875 }
2876
2877
2878 }
2879 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2880
2881 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2882 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2883 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2884 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2885 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2886
2887 int sampleNo_0,dataPointNo_0;
2888 int numSamples_0 = arg_0_Z.getNumSamples();
2889 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2890 res.requireWrite();
2891 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2892 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2893 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2894 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2895 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2896 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2897 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2898 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2899 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2900 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2901 }
2902 }
2903
2904 }
2905 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2906
2907 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2908 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2909 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2910 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2911 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2912
2913 int sampleNo_0,dataPointNo_0;
2914 int numSamples_0 = arg_0_Z.getNumSamples();
2915 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2916 res.requireWrite();
2917 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2918 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2919 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2920 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2921 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2922 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2923 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2924 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2925 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2926 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2927 }
2928 }
2929
2930 }
2931 else {
2932 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2933 }
2934
2935 } else {
2936 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible shapes");
2937 }
2938
2939 return res;
2940 }
2941
2942 template <typename UnaryFunction>
2943 Data
2944 C_TensorUnaryOperation(Data const &arg_0,
2945 UnaryFunction operation)
2946 {
2947 if (arg_0.isEmpty()) // do this before we attempt to interpolate
2948 {
2949 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2950 }
2951 if (arg_0.isLazy())
2952 {
2953 throw DataException("Error - Operations not permitted on lazy data.");
2954 }
2955 // Interpolate if necessary and find an appropriate function space
2956 Data arg_0_Z = Data(arg_0);
2957
2958 // Get rank and shape of inputs
2959 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
2960 int size0 = arg_0_Z.getDataPointSize();
2961
2962 // Declare output Data object
2963 Data res;
2964
2965 if (arg_0_Z.isConstant()) {
2966 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataConstant output
2967 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2968 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2969 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2970 }
2971 else if (arg_0_Z.isTagged()) {
2972
2973 // Borrow DataTagged input from Data object
2974 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2975
2976 // Prepare a DataTagged output 2
2977 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2978 res.tag();
2979 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2980
2981 // Get the pointers to the actual data
2982 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2983 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2984 // Compute a result for the default
2985 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2986 // Compute a result for each tag
2987 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2988 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2989 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2990 tmp_2->addTag(i->first);
2991 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2992 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2993 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2994 }
2995
2996 }
2997 else if (arg_0_Z.isExpanded()) {
2998
2999 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace(),true); // DataExpanded output
3000 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3001 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3002
3003 int sampleNo_0,dataPointNo_0;
3004 int numSamples_0 = arg_0_Z.getNumSamples();
3005 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3006 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3007 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3008 dataPointNo_0=0;
3009 // for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3010 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
3011 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3012 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3013 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3014 tensor_unary_operation(size0*numDataPointsPerSample_0, ptr_0, ptr_2, operation);
3015 // }
3016 }
3017 }
3018 else {
3019 throw DataException("Error - C_TensorUnaryOperation: unknown combination of inputs");
3020 }
3021
3022 return res;
3023 }
3024
3025 }
3026 #endif

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26