/[escript]/trunk/escript/src/Data.h
ViewVC logotype

Contents of /trunk/escript/src/Data.h

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2459 - (show annotations)
Thu Jun 4 06:17:54 2009 UTC (10 years, 8 months ago) by jfenwick
File MIME type: text/plain
File size: 91577 byte(s)
Removed some commented out code.
Added toListOfTuples to the DataObject.
1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2008 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 /** \file Data.h */
16
17 #ifndef DATA_H
18 #define DATA_H
19 #include "system_dep.h"
20
21 #include "DataTypes.h"
22 #include "DataAbstract.h"
23 #include "DataAlgorithm.h"
24 #include "FunctionSpace.h"
25 #include "BinaryOp.h"
26 #include "UnaryOp.h"
27 #include "DataException.h"
28
29
30 extern "C" {
31 #include "DataC.h"
32 //#include <omp.h>
33 }
34
35 #include "esysmpi.h"
36 #include <string>
37 #include <algorithm>
38 #include <sstream>
39
40 #include <boost/shared_ptr.hpp>
41 #include <boost/python/object.hpp>
42 #include <boost/python/tuple.hpp>
43
44 #include "BufferGroup.h"
45
46 namespace escript {
47
48 //
49 // Forward declaration for various implementations of Data.
50 class DataConstant;
51 class DataTagged;
52 class DataExpanded;
53 class DataLazy;
54
55 /**
56 \brief
57 Data represents a collection of datapoints.
58
59 Description:
60 Internally, the datapoints are actually stored by a DataAbstract object.
61 The specific instance of DataAbstract used may vary over the lifetime
62 of the Data object.
63 Some methods on this class return references (eg getShape()).
64 These references should not be used after an operation which changes the underlying DataAbstract object.
65 Doing so will lead to invalid memory access.
66 This should not affect any methods exposed via boost::python.
67 */
68 class Data {
69
70 public:
71
72 // These typedefs allow function names to be cast to pointers
73 // to functions of the appropriate type when calling unaryOp etc.
74 typedef double (*UnaryDFunPtr)(double);
75 typedef double (*BinaryDFunPtr)(double,double);
76
77
78 /**
79 Constructors.
80 */
81
82 /**
83 \brief
84 Default constructor.
85 Creates a DataEmpty object.
86 */
87 ESCRIPT_DLL_API
88 Data();
89
90 /**
91 \brief
92 Copy constructor.
93 WARNING: Only performs a shallow copy.
94 */
95 ESCRIPT_DLL_API
96 Data(const Data& inData);
97
98 /**
99 \brief
100 Constructor from another Data object. If "what" is different from the
101 function space of inData the inData are tried to be interpolated to what,
102 otherwise a shallow copy of inData is returned.
103 */
104 ESCRIPT_DLL_API
105 Data(const Data& inData,
106 const FunctionSpace& what);
107
108 /**
109 \brief Copy Data from an existing vector
110 */
111
112 ESCRIPT_DLL_API
113 Data(const DataTypes::ValueType& value,
114 const DataTypes::ShapeType& shape,
115 const FunctionSpace& what=FunctionSpace(),
116 bool expanded=false);
117
118 /**
119 \brief
120 Constructor which creates a Data with points having the specified shape.
121
122 \param value - Input - Single value applied to all Data.
123 \param dataPointShape - Input - The shape of each data point.
124 \param what - Input - A description of what this data represents.
125 \param expanded - Input - Flag, if true fill the entire container with
126 the given value. Otherwise a more efficient storage
127 mechanism will be used.
128 */
129 ESCRIPT_DLL_API
130 Data(double value,
131 const DataTypes::ShapeType& dataPointShape=DataTypes::ShapeType(),
132 const FunctionSpace& what=FunctionSpace(),
133 bool expanded=false);
134
135 /**
136 \brief
137 Constructor which performs a deep copy of a region from another Data object.
138
139 \param inData - Input - Input Data object.
140 \param region - Input - Region to copy.
141 */
142 ESCRIPT_DLL_API
143 Data(const Data& inData,
144 const DataTypes::RegionType& region);
145
146 /**
147 \brief
148 Constructor which copies data from any object that can be treated like a python array/sequence.
149
150 \param value - Input - Input data.
151 \param what - Input - A description of what this data represents.
152 \param expanded - Input - Flag, if true fill the entire container with
153 the value. Otherwise a more efficient storage
154 mechanism will be used.
155 */
156 ESCRIPT_DLL_API
157 Data(const boost::python::object& value,
158 const FunctionSpace& what=FunctionSpace(),
159 bool expanded=false);
160
161 /**
162 \brief
163 Constructor which creates a DataConstant.
164 Copies data from any object that can be treated like a python array/sequence.
165 All other parameters are copied from other.
166
167 \param value - Input - Input data.
168 \param other - Input - contains all other parameters.
169 */
170 ESCRIPT_DLL_API
171 Data(const boost::python::object& value,
172 const Data& other);
173
174 /**
175 \brief
176 Constructor which creates a DataConstant of "shape" with constant value.
177 */
178 ESCRIPT_DLL_API
179 Data(double value,
180 const boost::python::tuple& shape=boost::python::make_tuple(),
181 const FunctionSpace& what=FunctionSpace(),
182 bool expanded=false);
183
184
185
186 /**
187 \brief Create a Data using an existing DataAbstract. Warning: The new object assumes ownership of the pointer!
188 Once you have passed the pointer, do not delete it.
189 */
190 ESCRIPT_DLL_API
191 explicit Data(DataAbstract* underlyingdata);
192
193 /**
194 \brief Create a Data based on the supplied DataAbstract
195 */
196 ESCRIPT_DLL_API
197 explicit Data(DataAbstract_ptr underlyingdata);
198
199 /**
200 \brief
201 Destructor
202 */
203 ESCRIPT_DLL_API
204 ~Data();
205
206 /**
207 \brief Make this object a deep copy of "other".
208 */
209 ESCRIPT_DLL_API
210 void
211 copy(const Data& other);
212
213 /**
214 \brief Return a pointer to a deep copy of this object.
215 */
216 ESCRIPT_DLL_API
217 Data
218 copySelf();
219
220
221 /**
222 \brief produce a delayed evaluation version of this Data.
223 */
224 ESCRIPT_DLL_API
225 Data
226 delay();
227
228 /**
229 \brief convert the current data into lazy data.
230 */
231 ESCRIPT_DLL_API
232 void
233 delaySelf();
234
235
236 /**
237 Member access methods.
238 */
239
240 /**
241 \brief
242 switches on update protection
243
244 */
245 ESCRIPT_DLL_API
246 void
247 setProtection();
248
249 /**
250 \brief
251 Returns trueif the data object is protected against update
252
253 */
254 ESCRIPT_DLL_API
255 bool
256 isProtected() const;
257
258
259 /**
260 \brief
261 Return teh value of a data point as a python tuple.
262 */
263 ESCRIPT_DLL_API
264 const boost::python::object
265 getValueOfDataPointAsTuple(int dataPointNo);
266
267 /**
268 \brief
269 sets the values of a data-point from a python object on this process
270 */
271 ESCRIPT_DLL_API
272 void
273 setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object);
274
275 /**
276 \brief
277 sets the values of a data-point from a array-like object on this process
278 */
279 ESCRIPT_DLL_API
280 void
281 setValueOfDataPointToArray(int dataPointNo, const boost::python::object&);
282
283 /**
284 \brief
285 sets the values of a data-point on this process
286 */
287 ESCRIPT_DLL_API
288 void
289 setValueOfDataPoint(int dataPointNo, const double);
290
291 /**
292 \brief Return a data point across all processors as a python tuple.
293 */
294 ESCRIPT_DLL_API
295 const boost::python::object
296 getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo);
297
298 /**
299 \brief
300 Return the tag number associated with the given data-point.
301
302 */
303 ESCRIPT_DLL_API
304 int
305 getTagNumber(int dpno);
306
307 /**
308 \brief
309 Return the C wrapper for the Data object.
310 */
311 ESCRIPT_DLL_API
312 escriptDataC
313 getDataC();
314
315
316
317 /**
318 \brief
319 Return the C wrapper for the Data object - const version.
320 */
321 ESCRIPT_DLL_API
322 escriptDataC
323 getDataC() const;
324
325 /**
326 \brief How much space is required to evaulate a sample of the Data.
327 */
328 ESCRIPT_DLL_API
329 size_t
330 getSampleBufferSize() const;
331
332
333
334 /**
335 \brief
336 Write the data as a string. For large amounts of data, a summary is printed.
337 */
338 ESCRIPT_DLL_API
339 std::string
340 toString() const;
341
342 /**
343 \brief
344 Whatever the current Data type make this into a DataExpanded.
345 */
346 ESCRIPT_DLL_API
347 void
348 expand();
349
350 /**
351 \brief
352 If possible convert this Data to DataTagged. This will only allow
353 Constant data to be converted to tagged. An attempt to convert
354 Expanded data to tagged will throw an exception.
355 */
356 ESCRIPT_DLL_API
357 void
358 tag();
359
360 /**
361 \brief If this data is lazy, then convert it to ready data.
362 What type of ready data depends on the expression. For example, Constant+Tagged==Tagged.
363 */
364 ESCRIPT_DLL_API
365 void
366 resolve();
367
368
369 /**
370 \brief Ensures data is ready for write access.
371 This means that the data will be resolved if lazy and will be copied if shared with another Data object.
372 \warning This method should only be called in single threaded sections of code. (It modifies m_data).
373 Do not create any Data objects from this one between calling requireWrite and getSampleDataRW.
374 Doing so might introduce additional sharing.
375 */
376 ESCRIPT_DLL_API
377 void
378 requireWrite();
379
380 /**
381 \brief
382 Return true if this Data is expanded.
383 \note To determine if a sample will contain separate values for each datapoint. Use actsExpanded instead.
384 */
385 ESCRIPT_DLL_API
386 bool
387 isExpanded() const;
388
389 /**
390 \brief
391 Return true if this Data is expanded or resolves to expanded.
392 That is, if it has a separate value for each datapoint in the sample.
393 */
394 ESCRIPT_DLL_API
395 bool
396 actsExpanded() const;
397
398
399 /**
400 \brief
401 Return true if this Data is tagged.
402 */
403 ESCRIPT_DLL_API
404 bool
405 isTagged() const;
406
407 /**
408 \brief
409 Return true if this Data is constant.
410 */
411 ESCRIPT_DLL_API
412 bool
413 isConstant() const;
414
415 /**
416 \brief Return true if this Data is lazy.
417 */
418 ESCRIPT_DLL_API
419 bool
420 isLazy() const;
421
422 /**
423 \brief Return true if this data is ready.
424 */
425 ESCRIPT_DLL_API
426 bool
427 isReady() const;
428
429 /**
430 \brief
431 Return true if this Data holds an instance of DataEmpty. This is _not_ the same as asking if the object
432 contains datapoints.
433 */
434 ESCRIPT_DLL_API
435 bool
436 isEmpty() const;
437
438 /**
439 \brief
440 Return the function space.
441 */
442 ESCRIPT_DLL_API
443 inline
444 const FunctionSpace&
445 getFunctionSpace() const
446 {
447 return m_data->getFunctionSpace();
448 }
449
450 /**
451 \brief
452 Return a copy of the function space.
453 */
454 ESCRIPT_DLL_API
455 const FunctionSpace
456 getCopyOfFunctionSpace() const;
457
458 /**
459 \brief
460 Return the domain.
461 */
462 ESCRIPT_DLL_API
463 inline
464 // const AbstractDomain&
465 const_Domain_ptr
466 getDomain() const
467 {
468 return getFunctionSpace().getDomain();
469 }
470
471
472 /**
473 \brief
474 Return the domain.
475 TODO: For internal use only. This should be removed.
476 */
477 ESCRIPT_DLL_API
478 inline
479 // const AbstractDomain&
480 Domain_ptr
481 getDomainPython() const
482 {
483 return getFunctionSpace().getDomainPython();
484 }
485
486 /**
487 \brief
488 Return a copy of the domain.
489 */
490 ESCRIPT_DLL_API
491 const AbstractDomain
492 getCopyOfDomain() const;
493
494 /**
495 \brief
496 Return the rank of the point data.
497 */
498 ESCRIPT_DLL_API
499 inline
500 unsigned int
501 getDataPointRank() const
502 {
503 return m_data->getRank();
504 }
505
506 /**
507 \brief
508 Return the number of data points
509 */
510 ESCRIPT_DLL_API
511 inline
512 int
513 getNumDataPoints() const
514 {
515 return getNumSamples() * getNumDataPointsPerSample();
516 }
517 /**
518 \brief
519 Return the number of samples.
520 */
521 ESCRIPT_DLL_API
522 inline
523 int
524 getNumSamples() const
525 {
526 return m_data->getNumSamples();
527 }
528
529 /**
530 \brief
531 Return the number of data points per sample.
532 */
533 ESCRIPT_DLL_API
534 inline
535 int
536 getNumDataPointsPerSample() const
537 {
538 return m_data->getNumDPPSample();
539 }
540
541
542 /**
543 \brief
544 Return the number of values in the shape for this object.
545 */
546 ESCRIPT_DLL_API
547 int
548 getNoValues() const
549 {
550 return m_data->getNoValues();
551 }
552
553
554 /**
555 \brief
556 dumps the object into a netCDF file
557 */
558 ESCRIPT_DLL_API
559 void
560 dump(const std::string fileName) const;
561
562 /**
563 \brief returns the values of the object as a list of tuples (one for each datapoint).
564
565 \param scalarastuple If true, scalar data will produce single valued tuples [(1,) (2,) ...]
566 If false, the result is a list of scalars [1, 2, ...]
567 */
568 ESCRIPT_DLL_API
569 const boost::python::object
570 toListOfTuples(bool scalarastuple=false);
571
572
573 /**
574 \brief
575 Return the sample data for the given sample no. This is not the
576 preferred interface but is provided for use by C code.
577 The bufferg parameter is only required for LazyData.
578 \param sampleNo - Input - the given sample no.
579 \param buffer - A buffer to to compute (and store) sample data in will be selected from this group.
580 \return pointer to the sample data.
581 */
582 ESCRIPT_DLL_API
583 inline
584 const DataAbstract::ValueType::value_type*
585 getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, BufferGroup* bufferg=0);
586
587
588 /**
589 \brief
590 Return the sample data for the given sample no. This is not the
591 preferred interface but is provided for use by C code.
592 \param sampleNo - Input - the given sample no.
593 \return pointer to the sample data.
594 */
595 ESCRIPT_DLL_API
596 inline
597 DataAbstract::ValueType::value_type*
598 getSampleDataRW(DataAbstract::ValueType::size_type sampleNo);
599
600
601 /**
602 \brief
603 Return the sample data for the given tag. If an attempt is made to
604 access data that isn't tagged an exception will be thrown.
605 \param tag - Input - the tag key.
606 */
607 ESCRIPT_DLL_API
608 inline
609 DataAbstract::ValueType::value_type*
610 getSampleDataByTag(int tag)
611 {
612 return m_data->getSampleDataByTag(tag);
613 }
614
615 /**
616 \brief
617 Return a reference into the DataVector which points to the specified data point.
618 \param sampleNo - Input -
619 \param dataPointNo - Input -
620 */
621 ESCRIPT_DLL_API
622 DataTypes::ValueType::const_reference
623 getDataPointRO(int sampleNo, int dataPointNo);
624
625 /**
626 \brief
627 Return a reference into the DataVector which points to the specified data point.
628 \param sampleNo - Input -
629 \param dataPointNo - Input -
630 */
631 ESCRIPT_DLL_API
632 DataTypes::ValueType::reference
633 getDataPointRW(int sampleNo, int dataPointNo);
634
635
636
637 /**
638 \brief
639 Return the offset for the given sample and point within the sample
640 */
641 ESCRIPT_DLL_API
642 inline
643 DataTypes::ValueType::size_type
644 getDataOffset(int sampleNo,
645 int dataPointNo)
646 {
647 return m_data->getPointOffset(sampleNo,dataPointNo);
648 }
649
650 /**
651 \brief
652 Return a reference to the data point shape.
653 */
654 ESCRIPT_DLL_API
655 inline
656 const DataTypes::ShapeType&
657 getDataPointShape() const
658 {
659 return m_data->getShape();
660 }
661
662 /**
663 \brief
664 Return the data point shape as a tuple of integers.
665 */
666 ESCRIPT_DLL_API
667 const boost::python::tuple
668 getShapeTuple() const;
669
670 /**
671 \brief
672 Return the size of the data point. It is the product of the
673 data point shape dimensions.
674 */
675 ESCRIPT_DLL_API
676 int
677 getDataPointSize() const;
678
679 /**
680 \brief
681 Return the number of doubles stored for this Data.
682 */
683 ESCRIPT_DLL_API
684 DataTypes::ValueType::size_type
685 getLength() const;
686
687
688
689 /**
690 \brief
691 Assign the given value to the tag assocciated with name. Implicitly converts this
692 object to type DataTagged. Throws an exception if this object
693 cannot be converted to a DataTagged object or name cannot be mapped onto a tag key.
694 \param tagKey - Input - Integer key.
695 \param value - Input - Value to associate with given key.
696 ==>*
697 */
698 ESCRIPT_DLL_API
699 void
700 setTaggedValueByName(std::string name,
701 const boost::python::object& value);
702
703 /**
704 \brief
705 Assign the given value to the tag. Implicitly converts this
706 object to type DataTagged if it is constant.
707
708 \param tagKey - Input - Integer key.
709 \param value - Input - Value to associate with given key.
710 ==>*
711 */
712 ESCRIPT_DLL_API
713 void
714 setTaggedValue(int tagKey,
715 const boost::python::object& value);
716
717 /**
718 \brief
719 Assign the given value to the tag. Implicitly converts this
720 object to type DataTagged if it is constant.
721
722 \param tagKey - Input - Integer key.
723 \param pointshape - Input - The shape of the value parameter
724 \param value - Input - Value to associate with given key.
725 \param dataOffset - Input - Offset of the begining of the point within the value parameter
726 */
727 ESCRIPT_DLL_API
728 void
729 setTaggedValueFromCPP(int tagKey,
730 const DataTypes::ShapeType& pointshape,
731 const DataTypes::ValueType& value,
732 int dataOffset=0);
733
734
735
736 /**
737 \brief
738 Copy other Data object into this Data object where mask is positive.
739 */
740 ESCRIPT_DLL_API
741 void
742 copyWithMask(const Data& other,
743 const Data& mask);
744
745 /**
746 Data object operation methods and operators.
747 */
748
749 /**
750 \brief
751 set all values to zero
752 *
753 */
754 ESCRIPT_DLL_API
755 void
756 setToZero();
757
758 /**
759 \brief
760 Interpolates this onto the given functionspace and returns
761 the result as a Data object.
762 *
763 */
764 ESCRIPT_DLL_API
765 Data
766 interpolate(const FunctionSpace& functionspace) const;
767 /**
768 \brief
769 Calculates the gradient of the data at the data points of functionspace.
770 If functionspace is not present the function space of Function(getDomain()) is used.
771 *
772 */
773 ESCRIPT_DLL_API
774 Data
775 gradOn(const FunctionSpace& functionspace) const;
776
777 ESCRIPT_DLL_API
778 Data
779 grad() const;
780
781 /**
782 \brief
783 Calculate the integral over the function space domain as a python tuple.
784 */
785 ESCRIPT_DLL_API
786 boost::python::object
787 integrateToTuple_const() const;
788
789
790 /**
791 \brief
792 Calculate the integral over the function space domain as a python tuple.
793 */
794 ESCRIPT_DLL_API
795 boost::python::object
796 integrateToTuple();
797
798
799
800 /**
801 \brief
802 Returns 1./ Data object
803 *
804 */
805 ESCRIPT_DLL_API
806 Data
807 oneOver() const;
808 /**
809 \brief
810 Return a Data with a 1 for +ive values and a 0 for 0 or -ive values.
811 *
812 */
813 ESCRIPT_DLL_API
814 Data
815 wherePositive() const;
816
817 /**
818 \brief
819 Return a Data with a 1 for -ive values and a 0 for +ive or 0 values.
820 *
821 */
822 ESCRIPT_DLL_API
823 Data
824 whereNegative() const;
825
826 /**
827 \brief
828 Return a Data with a 1 for +ive or 0 values and a 0 for -ive values.
829 *
830 */
831 ESCRIPT_DLL_API
832 Data
833 whereNonNegative() const;
834
835 /**
836 \brief
837 Return a Data with a 1 for -ive or 0 values and a 0 for +ive values.
838 *
839 */
840 ESCRIPT_DLL_API
841 Data
842 whereNonPositive() const;
843
844 /**
845 \brief
846 Return a Data with a 1 for 0 values and a 0 for +ive or -ive values.
847 *
848 */
849 ESCRIPT_DLL_API
850 Data
851 whereZero(double tol=0.0) const;
852
853 /**
854 \brief
855 Return a Data with a 0 for 0 values and a 1 for +ive or -ive values.
856 *
857 */
858 ESCRIPT_DLL_API
859 Data
860 whereNonZero(double tol=0.0) const;
861
862 /**
863 \brief
864 Return the maximum absolute value of this Data object.
865
866 The method is not const because lazy data needs to be expanded before Lsup can be computed.
867 The _const form can be used when the Data object is const, however this will only work for
868 Data which is not Lazy.
869
870 For Data which contain no samples (or tagged Data for which no tags in use have a value)
871 zero is returned.
872 */
873 ESCRIPT_DLL_API
874 double
875 Lsup();
876
877 ESCRIPT_DLL_API
878 double
879 Lsup_const() const;
880
881
882 /**
883 \brief
884 Return the maximum value of this Data object.
885
886 The method is not const because lazy data needs to be expanded before sup can be computed.
887 The _const form can be used when the Data object is const, however this will only work for
888 Data which is not Lazy.
889
890 For Data which contain no samples (or tagged Data for which no tags in use have a value)
891 a large negative value is returned.
892 */
893 ESCRIPT_DLL_API
894 double
895 sup();
896
897 ESCRIPT_DLL_API
898 double
899 sup_const() const;
900
901
902 /**
903 \brief
904 Return the minimum value of this Data object.
905
906 The method is not const because lazy data needs to be expanded before inf can be computed.
907 The _const form can be used when the Data object is const, however this will only work for
908 Data which is not Lazy.
909
910 For Data which contain no samples (or tagged Data for which no tags in use have a value)
911 a large positive value is returned.
912 */
913 ESCRIPT_DLL_API
914 double
915 inf();
916
917 ESCRIPT_DLL_API
918 double
919 inf_const() const;
920
921
922
923 /**
924 \brief
925 Return the absolute value of each data point of this Data object.
926 *
927 */
928 ESCRIPT_DLL_API
929 Data
930 abs() const;
931
932 /**
933 \brief
934 Return the maximum value of each data point of this Data object.
935 *
936 */
937 ESCRIPT_DLL_API
938 Data
939 maxval() const;
940
941 /**
942 \brief
943 Return the minimum value of each data point of this Data object.
944 *
945 */
946 ESCRIPT_DLL_API
947 Data
948 minval() const;
949
950 /**
951 \brief
952 Return the (sample number, data-point number) of the data point with
953 the minimum value in this Data object.
954 */
955 ESCRIPT_DLL_API
956 const boost::python::tuple
957 minGlobalDataPoint() const;
958
959 ESCRIPT_DLL_API
960 void
961 calc_minGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
962 /**
963 \brief
964 Return the sign of each data point of this Data object.
965 -1 for negative values, zero for zero values, 1 for positive values.
966 *
967 */
968 ESCRIPT_DLL_API
969 Data
970 sign() const;
971
972 /**
973 \brief
974 Return the symmetric part of a matrix which is half the matrix plus its transpose.
975 *
976 */
977 ESCRIPT_DLL_API
978 Data
979 symmetric() const;
980
981 /**
982 \brief
983 Return the nonsymmetric part of a matrix which is half the matrix minus its transpose.
984 *
985 */
986 ESCRIPT_DLL_API
987 Data
988 nonsymmetric() const;
989
990 /**
991 \brief
992 Return the trace of a matrix
993 *
994 */
995 ESCRIPT_DLL_API
996 Data
997 trace(int axis_offset) const;
998
999 /**
1000 \brief
1001 Transpose each data point of this Data object around the given axis.
1002 *
1003 */
1004 ESCRIPT_DLL_API
1005 Data
1006 transpose(int axis_offset) const;
1007
1008 /**
1009 \brief
1010 Return the eigenvalues of the symmetric part at each data point of this Data object in increasing values.
1011 Currently this function is restricted to rank 2, square shape, and dimension 3.
1012 *
1013 */
1014 ESCRIPT_DLL_API
1015 Data
1016 eigenvalues() const;
1017
1018 /**
1019 \brief
1020 Return the eigenvalues and corresponding eigenvcetors of the symmetric part at each data point of this Data object.
1021 the eigenvalues are ordered in increasing size where eigenvalues with relative difference less than
1022 tol are treated as equal. The eigenvectors are orthogonal, normalized and the sclaed such that the
1023 first non-zero entry is positive.
1024 Currently this function is restricted to rank 2, square shape, and dimension 3
1025 *
1026 */
1027 ESCRIPT_DLL_API
1028 const boost::python::tuple
1029 eigenvalues_and_eigenvectors(const double tol=1.e-12) const;
1030
1031 /**
1032 \brief
1033 swaps the components axis0 and axis1
1034 *
1035 */
1036 ESCRIPT_DLL_API
1037 Data
1038 swapaxes(const int axis0, const int axis1) const;
1039
1040 /**
1041 \brief
1042 Return the error function erf of each data point of this Data object.
1043 *
1044 */
1045 ESCRIPT_DLL_API
1046 Data
1047 erf() const;
1048
1049 /**
1050 \brief
1051 Return the sin of each data point of this Data object.
1052 *
1053 */
1054 ESCRIPT_DLL_API
1055 Data
1056 sin() const;
1057
1058 /**
1059 \brief
1060 Return the cos of each data point of this Data object.
1061 *
1062 */
1063 ESCRIPT_DLL_API
1064 Data
1065 cos() const;
1066
1067 /**
1068 \brief
1069 Return the tan of each data point of this Data object.
1070 *
1071 */
1072 ESCRIPT_DLL_API
1073 Data
1074 tan() const;
1075
1076 /**
1077 \brief
1078 Return the asin of each data point of this Data object.
1079 *
1080 */
1081 ESCRIPT_DLL_API
1082 Data
1083 asin() const;
1084
1085 /**
1086 \brief
1087 Return the acos of each data point of this Data object.
1088 *
1089 */
1090 ESCRIPT_DLL_API
1091 Data
1092 acos() const;
1093
1094 /**
1095 \brief
1096 Return the atan of each data point of this Data object.
1097 *
1098 */
1099 ESCRIPT_DLL_API
1100 Data
1101 atan() const;
1102
1103 /**
1104 \brief
1105 Return the sinh of each data point of this Data object.
1106 *
1107 */
1108 ESCRIPT_DLL_API
1109 Data
1110 sinh() const;
1111
1112 /**
1113 \brief
1114 Return the cosh of each data point of this Data object.
1115 *
1116 */
1117 ESCRIPT_DLL_API
1118 Data
1119 cosh() const;
1120
1121 /**
1122 \brief
1123 Return the tanh of each data point of this Data object.
1124 *
1125 */
1126 ESCRIPT_DLL_API
1127 Data
1128 tanh() const;
1129
1130 /**
1131 \brief
1132 Return the asinh of each data point of this Data object.
1133 *
1134 */
1135 ESCRIPT_DLL_API
1136 Data
1137 asinh() const;
1138
1139 /**
1140 \brief
1141 Return the acosh of each data point of this Data object.
1142 *
1143 */
1144 ESCRIPT_DLL_API
1145 Data
1146 acosh() const;
1147
1148 /**
1149 \brief
1150 Return the atanh of each data point of this Data object.
1151 *
1152 */
1153 ESCRIPT_DLL_API
1154 Data
1155 atanh() const;
1156
1157 /**
1158 \brief
1159 Return the log to base 10 of each data point of this Data object.
1160 *
1161 */
1162 ESCRIPT_DLL_API
1163 Data
1164 log10() const;
1165
1166 /**
1167 \brief
1168 Return the natural log of each data point of this Data object.
1169 *
1170 */
1171 ESCRIPT_DLL_API
1172 Data
1173 log() const;
1174
1175 /**
1176 \brief
1177 Return the exponential function of each data point of this Data object.
1178 *
1179 */
1180 ESCRIPT_DLL_API
1181 Data
1182 exp() const;
1183
1184 /**
1185 \brief
1186 Return the square root of each data point of this Data object.
1187 *
1188 */
1189 ESCRIPT_DLL_API
1190 Data
1191 sqrt() const;
1192
1193 /**
1194 \brief
1195 Return the negation of each data point of this Data object.
1196 *
1197 */
1198 ESCRIPT_DLL_API
1199 Data
1200 neg() const;
1201
1202 /**
1203 \brief
1204 Return the identity of each data point of this Data object.
1205 Simply returns this object unmodified.
1206 *
1207 */
1208 ESCRIPT_DLL_API
1209 Data
1210 pos() const;
1211
1212 /**
1213 \brief
1214 Return the given power of each data point of this Data object.
1215
1216 \param right Input - the power to raise the object to.
1217 *
1218 */
1219 ESCRIPT_DLL_API
1220 Data
1221 powD(const Data& right) const;
1222
1223 /**
1224 \brief
1225 Return the given power of each data point of this boost python object.
1226
1227 \param right Input - the power to raise the object to.
1228 *
1229 */
1230 ESCRIPT_DLL_API
1231 Data
1232 powO(const boost::python::object& right) const;
1233
1234 /**
1235 \brief
1236 Return the given power of each data point of this boost python object.
1237
1238 \param left Input - the bases
1239 *
1240 */
1241
1242 ESCRIPT_DLL_API
1243 Data
1244 rpowO(const boost::python::object& left) const;
1245
1246 /**
1247 \brief
1248 writes the object to a file in the DX file format
1249 */
1250 ESCRIPT_DLL_API
1251 void
1252 saveDX(std::string fileName) const;
1253
1254 /**
1255 \brief
1256 writes the object to a file in the VTK file format
1257 */
1258 ESCRIPT_DLL_API
1259 void
1260 saveVTK(std::string fileName) const;
1261
1262 /**
1263 \brief
1264 Overloaded operator +=
1265 \param right - Input - The right hand side.
1266 *
1267 */
1268 ESCRIPT_DLL_API
1269 Data& operator+=(const Data& right);
1270 ESCRIPT_DLL_API
1271 Data& operator+=(const boost::python::object& right);
1272
1273 ESCRIPT_DLL_API
1274 Data& operator=(const Data& other);
1275
1276 /**
1277 \brief
1278 Overloaded operator -=
1279 \param right - Input - The right hand side.
1280 *
1281 */
1282 ESCRIPT_DLL_API
1283 Data& operator-=(const Data& right);
1284 ESCRIPT_DLL_API
1285 Data& operator-=(const boost::python::object& right);
1286
1287 /**
1288 \brief
1289 Overloaded operator *=
1290 \param right - Input - The right hand side.
1291 *
1292 */
1293 ESCRIPT_DLL_API
1294 Data& operator*=(const Data& right);
1295 ESCRIPT_DLL_API
1296 Data& operator*=(const boost::python::object& right);
1297
1298 /**
1299 \brief
1300 Overloaded operator /=
1301 \param right - Input - The right hand side.
1302 *
1303 */
1304 ESCRIPT_DLL_API
1305 Data& operator/=(const Data& right);
1306 ESCRIPT_DLL_API
1307 Data& operator/=(const boost::python::object& right);
1308
1309 /**
1310 \brief
1311 Returns true if this can be interpolated to functionspace.
1312 */
1313 ESCRIPT_DLL_API
1314 bool
1315 probeInterpolation(const FunctionSpace& functionspace) const;
1316
1317 /**
1318 Data object slicing methods.
1319 */
1320
1321 /**
1322 \brief
1323 Returns a slice from this Data object.
1324
1325 /description
1326 Implements the [] get operator in python.
1327 Calls getSlice.
1328
1329 \param key - Input - python slice tuple specifying
1330 slice to return.
1331 */
1332 ESCRIPT_DLL_API
1333 Data
1334 getItem(const boost::python::object& key) const;
1335
1336 /**
1337 \brief
1338 Copies slice from value into this Data object.
1339
1340 Implements the [] set operator in python.
1341 Calls setSlice.
1342
1343 \param key - Input - python slice tuple specifying
1344 slice to copy from value.
1345 \param value - Input - Data object to copy from.
1346 */
1347 ESCRIPT_DLL_API
1348 void
1349 setItemD(const boost::python::object& key,
1350 const Data& value);
1351
1352 ESCRIPT_DLL_API
1353 void
1354 setItemO(const boost::python::object& key,
1355 const boost::python::object& value);
1356
1357 // These following public methods should be treated as private.
1358
1359 /**
1360 \brief
1361 Perform the given unary operation on every element of every data point in
1362 this Data object.
1363 */
1364 template <class UnaryFunction>
1365 ESCRIPT_DLL_API
1366 inline
1367 void
1368 unaryOp2(UnaryFunction operation);
1369
1370 /**
1371 \brief
1372 Return a Data object containing the specified slice of
1373 this Data object.
1374 \param region - Input - Region to copy.
1375 *
1376 */
1377 ESCRIPT_DLL_API
1378 Data
1379 getSlice(const DataTypes::RegionType& region) const;
1380
1381 /**
1382 \brief
1383 Copy the specified slice from the given value into this
1384 Data object.
1385 \param value - Input - Data to copy from.
1386 \param region - Input - Region to copy.
1387 *
1388 */
1389 ESCRIPT_DLL_API
1390 void
1391 setSlice(const Data& value,
1392 const DataTypes::RegionType& region);
1393
1394 /**
1395 \brief
1396 print the data values to stdout. Used for debugging
1397 */
1398 ESCRIPT_DLL_API
1399 void
1400 print(void);
1401
1402 /**
1403 \brief
1404 return the MPI rank number of the local data
1405 MPI_COMM_WORLD is assumed and the result of MPI_Comm_size()
1406 is returned
1407 */
1408 ESCRIPT_DLL_API
1409 int
1410 get_MPIRank(void) const;
1411
1412 /**
1413 \brief
1414 return the MPI rank number of the local data
1415 MPI_COMM_WORLD is assumed and the result of MPI_Comm_rank()
1416 is returned
1417 */
1418 ESCRIPT_DLL_API
1419 int
1420 get_MPISize(void) const;
1421
1422 /**
1423 \brief
1424 return the MPI rank number of the local data
1425 MPI_COMM_WORLD is assumed and returned.
1426 */
1427 ESCRIPT_DLL_API
1428 MPI_Comm
1429 get_MPIComm(void) const;
1430
1431 /**
1432 \brief
1433 return the object produced by the factory, which is a DataConstant or DataExpanded
1434 TODO Ownership of this object should be explained in doco.
1435 */
1436 ESCRIPT_DLL_API
1437 DataAbstract*
1438 borrowData(void) const;
1439
1440 ESCRIPT_DLL_API
1441 DataAbstract_ptr
1442 borrowDataPtr(void) const;
1443
1444 ESCRIPT_DLL_API
1445 DataReady_ptr
1446 borrowReadyPtr(void) const;
1447
1448
1449
1450 /**
1451 \brief
1452 Return a pointer to the beginning of the datapoint at the specified offset.
1453 TODO Eventually these should be inlined.
1454 \param i - position(offset) in the underlying datastructure
1455 */
1456
1457 ESCRIPT_DLL_API
1458 DataTypes::ValueType::const_reference
1459 getDataAtOffsetRO(DataTypes::ValueType::size_type i);
1460
1461
1462 ESCRIPT_DLL_API
1463 DataTypes::ValueType::reference
1464 getDataAtOffsetRW(DataTypes::ValueType::size_type i);
1465
1466
1467
1468 /**
1469 \brief Create a buffer for use by getSample
1470 Allocates a DataVector large enough for DataLazy::resolveSample to operate on for the current Data.
1471 Do not use this buffer for other Data instances (unless you are sure they will be the same size).
1472
1473 In multi-threaded sections, this needs to be called on each thread.
1474
1475 \return A BufferGroup* if Data is lazy, NULL otherwise.
1476 \warning This pointer must be deallocated using freeSampleBuffer to avoid cross library memory issues.
1477 */
1478 ESCRIPT_DLL_API
1479 BufferGroup*
1480 allocSampleBuffer() const;
1481
1482 /**
1483 \brief Free a buffer allocated with allocSampleBuffer.
1484 \param buffer Input - pointer to the buffer to deallocate.
1485 */
1486 ESCRIPT_DLL_API void freeSampleBuffer(BufferGroup* buffer);
1487
1488 protected:
1489
1490 private:
1491
1492 double
1493 LsupWorker() const;
1494
1495 double
1496 supWorker() const;
1497
1498 double
1499 infWorker() const;
1500
1501 boost::python::object
1502 integrateWorker() const;
1503
1504 /**
1505 \brief
1506 Check *this and the right operand are compatible. Throws
1507 an exception if they aren't.
1508 \param right - Input - The right hand side.
1509 */
1510 inline
1511 void
1512 operandCheck(const Data& right) const
1513 {
1514 return m_data->operandCheck(*(right.m_data.get()));
1515 }
1516
1517 /**
1518 \brief
1519 Perform the specified reduction algorithm on every element of every data point in
1520 this Data object according to the given function and return the single value result.
1521 */
1522 template <class BinaryFunction>
1523 inline
1524 double
1525 algorithm(BinaryFunction operation,
1526 double initial_value) const;
1527
1528 /**
1529 \brief
1530 Reduce each data-point in this Data object using the given operation. Return a Data
1531 object with the same number of data-points, but with each data-point containing only
1532 one value - the result of the reduction operation on the corresponding data-point in
1533 this Data object
1534 */
1535 template <class BinaryFunction>
1536 inline
1537 Data
1538 dp_algorithm(BinaryFunction operation,
1539 double initial_value) const;
1540
1541 /**
1542 \brief
1543 Perform the given binary operation on all of the data's elements.
1544 The underlying type of the right hand side (right) determines the final
1545 type of *this after the operation. For example if the right hand side
1546 is expanded *this will be expanded if necessary.
1547 RHS is a Data object.
1548 */
1549 template <class BinaryFunction>
1550 inline
1551 void
1552 binaryOp(const Data& right,
1553 BinaryFunction operation);
1554
1555 /**
1556 \brief
1557 Convert the data type of the RHS to match this.
1558 \param right - Input - data type to match.
1559 */
1560 void
1561 typeMatchLeft(Data& right) const;
1562
1563 /**
1564 \brief
1565 Convert the data type of this to match the RHS.
1566 \param right - Input - data type to match.
1567 */
1568 void
1569 typeMatchRight(const Data& right);
1570
1571 /**
1572 \brief
1573 Construct a Data object of the appropriate type.
1574 */
1575
1576 void
1577 initialise(const DataTypes::ValueType& value,
1578 const DataTypes::ShapeType& shape,
1579 const FunctionSpace& what,
1580 bool expanded);
1581
1582 void
1583 initialise(const WrappedArray& value,
1584 const FunctionSpace& what,
1585 bool expanded);
1586
1587 //
1588 // flag to protect the data object against any update
1589 bool m_protected;
1590 mutable bool m_shared;
1591 bool m_lazy;
1592
1593 //
1594 // pointer to the actual data object
1595 // boost::shared_ptr<DataAbstract> m_data;
1596 DataAbstract_ptr m_data;
1597
1598 // If possible please use getReadyPtr instead.
1599 // But see warning below.
1600 const DataReady*
1601 getReady() const;
1602
1603 DataReady*
1604 getReady();
1605
1606
1607 // Be wary of using this for local operations since it (temporarily) increases reference count.
1608 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1609 // getReady() instead
1610 DataReady_ptr
1611 getReadyPtr();
1612
1613 const_DataReady_ptr
1614 getReadyPtr() const;
1615
1616
1617 /**
1618 \brief Update the Data's shared flag
1619 This indicates that the DataAbstract used by this object is now shared (or no longer shared).
1620 For internal use only.
1621 */
1622 void updateShareStatus(bool nowshared) const
1623 {
1624 m_shared=nowshared; // m_shared is mutable
1625 }
1626
1627 // In the isShared() method below:
1628 // A problem would occur if m_data (the address pointed to) were being modified
1629 // while the call m_data->is_shared is being executed.
1630 //
1631 // Q: So why do I think this code can be thread safe/correct?
1632 // A: We need to make some assumptions.
1633 // 1. We assume it is acceptable to return true under some conditions when we aren't shared.
1634 // 2. We assume that no constructions or assignments which will share previously unshared
1635 // will occur while this call is executing. This is consistent with the way Data:: and C are written.
1636 //
1637 // This means that the only transition we need to consider, is when a previously shared object is
1638 // not shared anymore. ie. the other objects have been destroyed or a deep copy has been made.
1639 // In those cases the m_shared flag changes to false after m_data has completed changing.
1640 // For any threads executing before the flag switches they will assume the object is still shared.
1641 bool isShared() const
1642 {
1643 return m_shared;
1644 /* if (m_shared) return true;
1645 if (m_data->isShared())
1646 {
1647 updateShareStatus(true);
1648 return true;
1649 }
1650 return false;*/
1651 }
1652
1653 void forceResolve()
1654 {
1655 if (isLazy())
1656 {
1657 #ifdef _OPENMP
1658 if (omp_in_parallel())
1659 { // Yes this is throwing an exception out of an omp thread which is forbidden.
1660 throw DataException("Please do not call forceResolve() in a parallel region.");
1661 }
1662 #endif
1663 resolve();
1664 }
1665 }
1666
1667 /**
1668 \brief if another object is sharing out member data make a copy to work with instead.
1669 This code should only be called from single threaded sections of code.
1670 */
1671 void exclusiveWrite()
1672 {
1673 #ifdef _OPENMP
1674 if (omp_in_parallel())
1675 {
1676 // *((int*)0)=17;
1677 throw DataException("Programming error. Please do not run exclusiveWrite() in multi-threaded sections.");
1678 }
1679 #endif
1680 forceResolve();
1681 if (isShared())
1682 {
1683 DataAbstract* t=m_data->deepCopy();
1684 set_m_data(DataAbstract_ptr(t));
1685 }
1686 }
1687
1688 /**
1689 \brief checks if caller can have exclusive write to the object
1690 */
1691 void checkExclusiveWrite()
1692 {
1693 if (isLazy() || isShared())
1694 {
1695 throw DataException("Programming error. ExclusiveWrite required - please call requireWrite()");
1696 }
1697 }
1698
1699 /**
1700 \brief Modify the data abstract hosted by this Data object
1701 For internal use only.
1702 Passing a pointer to null is permitted (do this in the destructor)
1703 \warning Only to be called in single threaded code or inside a single/critical section. This method needs to be atomic.
1704 */
1705 void set_m_data(DataAbstract_ptr p);
1706
1707 friend class DataAbstract; // To allow calls to updateShareStatus
1708
1709 };
1710
1711 } // end namespace escript
1712
1713
1714 // No, this is not supposed to be at the top of the file
1715 // DataAbstact needs to be declared first, then DataReady needs to be fully declared
1716 // so that I can dynamic cast between them below.
1717 #include "DataReady.h"
1718 #include "DataLazy.h"
1719
1720 namespace escript
1721 {
1722
1723 inline
1724 const DataReady*
1725 Data::getReady() const
1726 {
1727 const DataReady* dr=dynamic_cast<const DataReady*>(m_data.get());
1728 EsysAssert((dr!=0), "Error - casting to DataReady.");
1729 return dr;
1730 }
1731
1732 inline
1733 DataReady*
1734 Data::getReady()
1735 {
1736 DataReady* dr=dynamic_cast<DataReady*>(m_data.get());
1737 EsysAssert((dr!=0), "Error - casting to DataReady.");
1738 return dr;
1739 }
1740
1741 // Be wary of using this for local operations since it (temporarily) increases reference count.
1742 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1743 // getReady() instead
1744 inline
1745 DataReady_ptr
1746 Data::getReadyPtr()
1747 {
1748 DataReady_ptr dr=boost::dynamic_pointer_cast<DataReady>(m_data);
1749 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1750 return dr;
1751 }
1752
1753
1754 inline
1755 const_DataReady_ptr
1756 Data::getReadyPtr() const
1757 {
1758 const_DataReady_ptr dr=boost::dynamic_pointer_cast<const DataReady>(m_data);
1759 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1760 return dr;
1761 }
1762
1763 inline
1764 DataAbstract::ValueType::value_type*
1765 Data::getSampleDataRW(DataAbstract::ValueType::size_type sampleNo)
1766 {
1767 // if (isLazy())
1768 // {
1769 // resolve();
1770 // }
1771 // exclusiveWrite();
1772 if (isLazy())
1773 {
1774 throw DataException("Error, attempt to acquire RW access to lazy data. Please call requireWrite() first.");
1775 }
1776 return getReady()->getSampleDataRW(sampleNo);
1777 }
1778
1779 // inline
1780 // const DataAbstract::ValueType::value_type*
1781 // Data::getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, DataTypes::ValueType* buffer)
1782 // {
1783 // DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
1784 // if (l!=0)
1785 // {
1786 // size_t offset=0;
1787 // if (buffer==NULL)
1788 // {
1789 // throw DataException("Error, attempt to getSampleDataRO for lazy Data with buffer==NULL");
1790 // }
1791 // const DataTypes::ValueType* res=l->resolveSample(*buffer,0,sampleNo,offset);
1792 // return &((*res)[offset]);
1793 // }
1794 // return getReady()->getSampleDataRO(sampleNo);
1795 // }
1796
1797 inline
1798 const DataAbstract::ValueType::value_type*
1799 Data::getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, BufferGroup* bufferg)
1800 {
1801 DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
1802 if (l!=0)
1803 {
1804 size_t offset=0;
1805 if (bufferg==NULL)
1806 {
1807 throw DataException("Error, attempt to getSampleDataRO for lazy Data with buffer==NULL");
1808 }
1809 const DataTypes::ValueType* res=l->resolveSample(*bufferg,sampleNo,offset);
1810 return &((*res)[offset]);
1811 }
1812 return getReady()->getSampleDataRO(sampleNo);
1813 }
1814
1815
1816
1817 /**
1818 Modify a filename for MPI parallel output to multiple files
1819 */
1820 char *Escript_MPI_appendRankToFileName(const char *, int, int);
1821
1822 /**
1823 Binary Data object operators.
1824 */
1825 inline double rpow(double x,double y)
1826 {
1827 return pow(y,x);
1828 }
1829
1830 /**
1831 \brief
1832 Operator+
1833 Takes two Data objects.
1834 */
1835 ESCRIPT_DLL_API Data operator+(const Data& left, const Data& right);
1836
1837 /**
1838 \brief
1839 Operator-
1840 Takes two Data objects.
1841 */
1842 ESCRIPT_DLL_API Data operator-(const Data& left, const Data& right);
1843
1844 /**
1845 \brief
1846 Operator*
1847 Takes two Data objects.
1848 */
1849 ESCRIPT_DLL_API Data operator*(const Data& left, const Data& right);
1850
1851 /**
1852 \brief
1853 Operator/
1854 Takes two Data objects.
1855 */
1856 ESCRIPT_DLL_API Data operator/(const Data& left, const Data& right);
1857
1858 /**
1859 \brief
1860 Operator+
1861 Takes LHS Data object and RHS python::object.
1862 python::object must be convertable to Data type.
1863 */
1864 ESCRIPT_DLL_API Data operator+(const Data& left, const boost::python::object& right);
1865
1866 /**
1867 \brief
1868 Operator-
1869 Takes LHS Data object and RHS python::object.
1870 python::object must be convertable to Data type.
1871 */
1872 ESCRIPT_DLL_API Data operator-(const Data& left, const boost::python::object& right);
1873
1874 /**
1875 \brief
1876 Operator*
1877 Takes LHS Data object and RHS python::object.
1878 python::object must be convertable to Data type.
1879 */
1880 ESCRIPT_DLL_API Data operator*(const Data& left, const boost::python::object& right);
1881
1882 /**
1883 \brief
1884 Operator/
1885 Takes LHS Data object and RHS python::object.
1886 python::object must be convertable to Data type.
1887 */
1888 ESCRIPT_DLL_API Data operator/(const Data& left, const boost::python::object& right);
1889
1890 /**
1891 \brief
1892 Operator+
1893 Takes LHS python::object and RHS Data object.
1894 python::object must be convertable to Data type.
1895 */
1896 ESCRIPT_DLL_API Data operator+(const boost::python::object& left, const Data& right);
1897
1898 /**
1899 \brief
1900 Operator-
1901 Takes LHS python::object and RHS Data object.
1902 python::object must be convertable to Data type.
1903 */
1904 ESCRIPT_DLL_API Data operator-(const boost::python::object& left, const Data& right);
1905
1906 /**
1907 \brief
1908 Operator*
1909 Takes LHS python::object and RHS Data object.
1910 python::object must be convertable to Data type.
1911 */
1912 ESCRIPT_DLL_API Data operator*(const boost::python::object& left, const Data& right);
1913
1914 /**
1915 \brief
1916 Operator/
1917 Takes LHS python::object and RHS Data object.
1918 python::object must be convertable to Data type.
1919 */
1920 ESCRIPT_DLL_API Data operator/(const boost::python::object& left, const Data& right);
1921
1922
1923
1924 /**
1925 \brief
1926 Output operator
1927 */
1928 ESCRIPT_DLL_API std::ostream& operator<<(std::ostream& o, const Data& data);
1929
1930 /**
1931 \brief
1932 Compute a tensor product of two Data objects
1933 \param arg0 - Input - Data object
1934 \param arg1 - Input - Data object
1935 \param axis_offset - Input - axis offset
1936 \param transpose - Input - 0: transpose neither, 1: transpose arg0, 2: transpose arg1
1937 */
1938 ESCRIPT_DLL_API
1939 Data
1940 C_GeneralTensorProduct(Data& arg0,
1941 Data& arg1,
1942 int axis_offset=0,
1943 int transpose=0);
1944
1945 /**
1946 \brief
1947 Perform the given binary operation with this and right as operands.
1948 Right is a Data object.
1949 */
1950 template <class BinaryFunction>
1951 inline
1952 void
1953 Data::binaryOp(const Data& right,
1954 BinaryFunction operation)
1955 {
1956 //
1957 // if this has a rank of zero promote it to the rank of the RHS
1958 if (getDataPointRank()==0 && right.getDataPointRank()!=0) {
1959 throw DataException("Error - attempt to update rank zero object with object with rank bigger than zero.");
1960 }
1961
1962 if (isLazy() || right.isLazy())
1963 {
1964 throw DataException("Programmer error - attempt to call binaryOp with Lazy Data.");
1965 }
1966 //
1967 // initially make the temporary a shallow copy
1968 Data tempRight(right);
1969
1970 if (getFunctionSpace()!=right.getFunctionSpace()) {
1971 if (right.probeInterpolation(getFunctionSpace())) {
1972 //
1973 // an interpolation is required so create a new Data
1974 tempRight=Data(right,this->getFunctionSpace());
1975 } else if (probeInterpolation(right.getFunctionSpace())) {
1976 //
1977 // interpolate onto the RHS function space
1978 Data tempLeft(*this,right.getFunctionSpace());
1979 // m_data=tempLeft.m_data;
1980 set_m_data(tempLeft.m_data);
1981 }
1982 }
1983 operandCheck(tempRight);
1984 //
1985 // ensure this has the right type for the RHS
1986 typeMatchRight(tempRight);
1987 //
1988 // Need to cast to the concrete types so that the correct binaryOp
1989 // is called.
1990 if (isExpanded()) {
1991 //
1992 // Expanded data will be done in parallel, the right hand side can be
1993 // of any data type
1994 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
1995 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
1996 escript::binaryOp(*leftC,*(tempRight.getReady()),operation);
1997 } else if (isTagged()) {
1998 //
1999 // Tagged data is operated on serially, the right hand side can be
2000 // either DataConstant or DataTagged
2001 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2002 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2003 if (right.isTagged()) {
2004 DataTagged* rightC=dynamic_cast<DataTagged*>(tempRight.m_data.get());
2005 EsysAssert((rightC!=0), "Programming error - casting to DataTagged.");
2006 escript::binaryOp(*leftC,*rightC,operation);
2007 } else {
2008 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2009 EsysAssert((rightC!=0), "Programming error - casting to DataConstant.");
2010 escript::binaryOp(*leftC,*rightC,operation);
2011 }
2012 } else if (isConstant()) {
2013 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2014 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2015 EsysAssert((leftC!=0 && rightC!=0), "Programming error - casting to DataConstant.");
2016 escript::binaryOp(*leftC,*rightC,operation);
2017 }
2018 }
2019
2020 /**
2021 \brief
2022 Perform the given Data object reduction algorithm on this and return the result.
2023 Given operation combines each element of each data point, thus argument
2024 object (*this) is a rank n Data object, and returned object is a scalar.
2025 Calls escript::algorithm.
2026 */
2027 template <class BinaryFunction>
2028 inline
2029 double
2030 Data::algorithm(BinaryFunction operation, double initial_value) const
2031 {
2032 if (isExpanded()) {
2033 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2034 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2035 return escript::algorithm(*leftC,operation,initial_value);
2036 } else if (isTagged()) {
2037 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2038 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2039 return escript::algorithm(*leftC,operation,initial_value);
2040 } else if (isConstant()) {
2041 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2042 EsysAssert((leftC!=0), "Programming error - casting to DataConstant.");
2043 return escript::algorithm(*leftC,operation,initial_value);
2044 } else if (isEmpty()) {
2045 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2046 } else if (isLazy()) {
2047 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2048 } else {
2049 throw DataException("Error - Data encapsulates an unknown type.");
2050 }
2051 }
2052
2053 /**
2054 \brief
2055 Perform the given data point reduction algorithm on data and return the result.
2056 Given operation combines each element within each data point into a scalar,
2057 thus argument object is a rank n Data object, and returned object is a
2058 rank 0 Data object.
2059 Calls escript::dp_algorithm.
2060 */
2061 template <class BinaryFunction>
2062 inline
2063 Data
2064 Data::dp_algorithm(BinaryFunction operation, double initial_value) const
2065 {
2066 if (isEmpty()) {
2067 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2068 }
2069 else if (isExpanded()) {
2070 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2071 DataExpanded* dataE=dynamic_cast<DataExpanded*>(m_data.get());
2072 DataExpanded* resultE=dynamic_cast<DataExpanded*>(result.m_data.get());
2073 EsysAssert((dataE!=0), "Programming error - casting data to DataExpanded.");
2074 EsysAssert((resultE!=0), "Programming error - casting result to DataExpanded.");
2075 escript::dp_algorithm(*dataE,*resultE,operation,initial_value);
2076 return result;
2077 }
2078 else if (isTagged()) {
2079 DataTagged* dataT=dynamic_cast<DataTagged*>(m_data.get());
2080 EsysAssert((dataT!=0), "Programming error - casting data to DataTagged.");
2081 DataTypes::ValueType defval(1);
2082 defval[0]=0;
2083 DataTagged* resultT=new DataTagged(getFunctionSpace(), DataTypes::scalarShape, defval, dataT);
2084 escript::dp_algorithm(*dataT,*resultT,operation,initial_value);
2085 return Data(resultT); // note: the Data object now owns the resultT pointer
2086 }
2087 else if (isConstant()) {
2088 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2089 DataConstant* dataC=dynamic_cast<DataConstant*>(m_data.get());
2090 DataConstant* resultC=dynamic_cast<DataConstant*>(result.m_data.get());
2091 EsysAssert((dataC!=0), "Programming error - casting data to DataConstant.");
2092 EsysAssert((resultC!=0), "Programming error - casting result to DataConstant.");
2093 escript::dp_algorithm(*dataC,*resultC,operation,initial_value);
2094 return result;
2095 } else if (isLazy()) {
2096 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2097 } else {
2098 throw DataException("Error - Data encapsulates an unknown type.");
2099 }
2100 }
2101
2102 /**
2103 \brief
2104 Compute a tensor operation with two Data objects
2105 \param arg0 - Input - Data object
2106 \param arg1 - Input - Data object
2107 \param operation - Input - Binary op functor
2108 */
2109 template <typename BinaryFunction>
2110 inline
2111 Data
2112 C_TensorBinaryOperation(Data const &arg_0,
2113 Data const &arg_1,
2114 BinaryFunction operation)
2115 {
2116 if (arg_0.isEmpty() || arg_1.isEmpty())
2117 {
2118 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2119 }
2120 if (arg_0.isLazy() || arg_1.isLazy())
2121 {
2122 throw DataException("Error - Operations not permitted on lazy data.");
2123 }
2124 // Interpolate if necessary and find an appropriate function space
2125 Data arg_0_Z, arg_1_Z;
2126 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2127 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2128 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2129 arg_1_Z = Data(arg_1);
2130 }
2131 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2132 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2133 arg_0_Z =Data(arg_0);
2134 }
2135 else {
2136 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible function spaces.");
2137 }
2138 } else {
2139 arg_0_Z = Data(arg_0);
2140 arg_1_Z = Data(arg_1);
2141 }
2142 // Get rank and shape of inputs
2143 int rank0 = arg_0_Z.getDataPointRank();
2144 int rank1 = arg_1_Z.getDataPointRank();
2145 DataTypes::ShapeType shape0 = arg_0_Z.getDataPointShape();
2146 DataTypes::ShapeType shape1 = arg_1_Z.getDataPointShape();
2147 int size0 = arg_0_Z.getDataPointSize();
2148 int size1 = arg_1_Z.getDataPointSize();
2149 // Declare output Data object
2150 Data res;
2151
2152 if (shape0 == shape1) {
2153 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2154 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2155 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2156 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2157 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2158
2159 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2160 }
2161 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2162
2163 // Prepare the DataConstant input
2164 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2165
2166 // Borrow DataTagged input from Data object
2167 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2168
2169 // Prepare a DataTagged output 2
2170 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2171 res.tag();
2172 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2173
2174 // Prepare offset into DataConstant
2175 int offset_0 = tmp_0->getPointOffset(0,0);
2176 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2177
2178 // Get the pointers to the actual data
2179 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2180 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2181
2182 // Compute a result for the default
2183 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2184 // Compute a result for each tag
2185 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2186 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2187 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2188 tmp_2->addTag(i->first);
2189 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2190 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2191
2192 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2193 }
2194
2195 }
2196 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2197 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2198 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2199 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2200 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2201
2202 int sampleNo_1,dataPointNo_1;
2203 int numSamples_1 = arg_1_Z.getNumSamples();
2204 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2205 int offset_0 = tmp_0->getPointOffset(0,0);
2206 res.requireWrite();
2207 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2208 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2209 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2210 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2211 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2212 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2213 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2214 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2215 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2216 }
2217 }
2218
2219 }
2220 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2221 // Borrow DataTagged input from Data object
2222 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2223
2224 // Prepare the DataConstant input
2225 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2226
2227 // Prepare a DataTagged output 2
2228 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2229 res.tag();
2230 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2231
2232 // Prepare offset into DataConstant
2233 int offset_1 = tmp_1->getPointOffset(0,0);
2234
2235 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2236 // Get the pointers to the actual data
2237 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2238 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2239 // Compute a result for the default
2240 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2241 // Compute a result for each tag
2242 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2243 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2244 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2245 tmp_2->addTag(i->first);
2246 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2247 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2248 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2249 }
2250
2251 }
2252 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2253 // Borrow DataTagged input from Data object
2254 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2255
2256 // Borrow DataTagged input from Data object
2257 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2258
2259 // Prepare a DataTagged output 2
2260 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2261 res.tag(); // DataTagged output
2262 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2263
2264 // Get the pointers to the actual data
2265 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2266 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2267 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2268
2269 // Compute a result for the default
2270 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2271 // Merge the tags
2272 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2273 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2274 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2275 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2276 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2277 }
2278 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2279 tmp_2->addTag(i->first);
2280 }
2281 // Compute a result for each tag
2282 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2283 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2284
2285 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2286 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2287 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2288
2289 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2290 }
2291
2292 }
2293 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2294 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2295 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2296 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2297 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2298 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2299
2300 int sampleNo_0,dataPointNo_0;
2301 int numSamples_0 = arg_0_Z.getNumSamples();
2302 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2303 res.requireWrite();
2304 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2305 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2306 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2307 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2308 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2309 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2310 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2311 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2312 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2313 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2314 }
2315 }
2316
2317 }
2318 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2319 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2320 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2321 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2322 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2323
2324 int sampleNo_0,dataPointNo_0;
2325 int numSamples_0 = arg_0_Z.getNumSamples();
2326 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2327 int offset_1 = tmp_1->getPointOffset(0,0);
2328 res.requireWrite();
2329 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2330 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2331 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2332 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2333 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2334
2335 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2336 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2337 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2338
2339
2340 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2341 }
2342 }
2343
2344 }
2345 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2346 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2347 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2348 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2349 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2350 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2351
2352 int sampleNo_0,dataPointNo_0;
2353 int numSamples_0 = arg_0_Z.getNumSamples();
2354 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2355 res.requireWrite();
2356 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2357 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2358 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2359 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2360 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2361 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2362 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2363 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2364 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2365 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2366 }
2367 }
2368
2369 }
2370 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2371 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2372 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2373 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2374 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2375 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2376
2377 int sampleNo_0,dataPointNo_0;
2378 int numSamples_0 = arg_0_Z.getNumSamples();
2379 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2380 res.requireWrite();
2381 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2382 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2383 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2384 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2385 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2386 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2387 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2388 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2389 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2390 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2391 }
2392 }
2393
2394 }
2395 else {
2396 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2397 }
2398
2399 } else if (0 == rank0) {
2400 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2401 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataConstant output
2402 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2403 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2404 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2405 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2406 }
2407 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2408
2409 // Prepare the DataConstant input
2410 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2411
2412 // Borrow DataTagged input from Data object
2413 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2414
2415 // Prepare a DataTagged output 2
2416 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataTagged output
2417 res.tag();
2418 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2419
2420 // Prepare offset into DataConstant
2421 int offset_0 = tmp_0->getPointOffset(0,0);
2422 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2423
2424 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2425 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2426
2427 // Compute a result for the default
2428 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2429 // Compute a result for each tag
2430 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2431 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2432 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2433 tmp_2->addTag(i->first);
2434 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2435 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2436 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2437 }
2438
2439 }
2440 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2441
2442 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2443 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2444 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2445 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2446
2447 int sampleNo_1,dataPointNo_1;
2448 int numSamples_1 = arg_1_Z.getNumSamples();
2449 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2450 int offset_0 = tmp_0->getPointOffset(0,0);
2451 res.requireWrite();
2452 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2453 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2454 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2455 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2456 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2457 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2458 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2459 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2460 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2461
2462 }
2463 }
2464
2465 }
2466 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2467
2468 // Borrow DataTagged input from Data object
2469 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2470
2471 // Prepare the DataConstant input
2472 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2473
2474 // Prepare a DataTagged output 2
2475 res = Data(0.0, shape1, arg_0_Z.getFunctionSpace()); // DataTagged output
2476 res.tag();
2477 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2478
2479 // Prepare offset into DataConstant
2480 int offset_1 = tmp_1->getPointOffset(0,0);
2481 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2482
2483 // Get the pointers to the actual data
2484 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2485 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2486
2487
2488 // Compute a result for the default
2489 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2490 // Compute a result for each tag
2491 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2492 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2493 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2494 tmp_2->addTag(i->first);
2495 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2496 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2497
2498 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2499 }
2500
2501 }
2502 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2503
2504 // Borrow DataTagged input from Data object
2505 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2506
2507 // Borrow DataTagged input from Data object
2508 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2509
2510 // Prepare a DataTagged output 2
2511 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());
2512 res.tag(); // DataTagged output
2513 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2514
2515 // Get the pointers to the actual data
2516 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2517 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2518 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2519
2520 // Compute a result for the default
2521 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2522 // Merge the tags
2523 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2524 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2525 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2526 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2527 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2528 }
2529 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2530 tmp_2->addTag(i->first);
2531 }
2532 // Compute a result for each tag
2533 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2534 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2535 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2536 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2537 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2538
2539 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2540 }
2541
2542 }
2543 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2544
2545 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2546 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2547 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2548 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2549 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2550
2551 int sampleNo_0,dataPointNo_0;
2552 int numSamples_0 = arg_0_Z.getNumSamples();
2553 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2554 res.requireWrite();
2555 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2556 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2557 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2558 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2559 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2560 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2561 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2562 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2563 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2564 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2565 }
2566 }
2567
2568 }
2569 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2570 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2571 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2572 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2573 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2574
2575 int sampleNo_0,dataPointNo_0;
2576 int numSamples_0 = arg_0_Z.getNumSamples();
2577 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2578 int offset_1 = tmp_1->getPointOffset(0,0);
2579 res.requireWrite();
2580 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2581 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2582 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2583 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2584 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2585 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2586 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2587 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2588 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2589 }
2590 }
2591
2592
2593 }
2594 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2595
2596 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2597 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2598 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2599 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2600 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2601
2602 int sampleNo_0,dataPointNo_0;
2603 int numSamples_0 = arg_0_Z.getNumSamples();
2604 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2605 res.requireWrite();
2606 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2607 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2608 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2609 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2610 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2611 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2612 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2613 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2614 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2615 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2616 }
2617 }
2618
2619 }
2620 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2621
2622 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2623 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2624 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2625 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2626 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2627
2628 int sampleNo_0,dataPointNo_0;
2629 int numSamples_0 = arg_0_Z.getNumSamples();
2630 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2631 res.requireWrite();
2632 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2633 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2634 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2635 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2636 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2637 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2638 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2639 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2640 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2641 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2642 }
2643 }
2644
2645 }
2646 else {
2647 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2648 }
2649
2650 } else if (0 == rank1) {
2651 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2652 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2653 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2654 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2655 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2656 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2657 }
2658 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2659
2660 // Prepare the DataConstant input
2661 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2662
2663 // Borrow DataTagged input from Data object
2664 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2665
2666 // Prepare a DataTagged output 2
2667 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2668 res.tag();
2669 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2670
2671 // Prepare offset into DataConstant
2672 int offset_0 = tmp_0->getPointOffset(0,0);
2673 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2674
2675 //Get the pointers to the actual data
2676 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2677 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2678
2679 // Compute a result for the default
2680 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2681 // Compute a result for each tag
2682 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2683 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2684 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2685 tmp_2->addTag(i->first);
2686 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2687 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2688 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2689 }
2690 }
2691 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2692
2693 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2694 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2695 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2696 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2697
2698 int sampleNo_1,dataPointNo_1;
2699 int numSamples_1 = arg_1_Z.getNumSamples();
2700 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2701 int offset_0 = tmp_0->getPointOffset(0,0);
2702 res.requireWrite();
2703 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2704 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2705 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2706 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2707 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2708 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2709 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2710 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2711 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2712 }
2713 }
2714
2715 }
2716 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2717
2718 // Borrow DataTagged input from Data object
2719 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2720
2721 // Prepare the DataConstant input
2722 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2723
2724 // Prepare a DataTagged output 2
2725 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2726 res.tag();
2727 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2728
2729 // Prepare offset into DataConstant
2730 int offset_1 = tmp_1->getPointOffset(0,0);
2731 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2732 // Get the pointers to the actual data
2733 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2734 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2735 // Compute a result for the default
2736 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2737 // Compute a result for each tag
2738 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2739 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2740 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2741 tmp_2->addTag(i->first);
2742 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2743 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2744 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2745 }
2746
2747 }
2748 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2749
2750 // Borrow DataTagged input from Data object
2751 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2752
2753 // Borrow DataTagged input from Data object
2754 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2755
2756 // Prepare a DataTagged output 2
2757 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2758 res.tag(); // DataTagged output
2759 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2760
2761 // Get the pointers to the actual data
2762 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2763 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2764 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2765
2766 // Compute a result for the default
2767 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2768 // Merge the tags
2769 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2770 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2771 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2772 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2773 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2774 }
2775 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2776 tmp_2->addTag(i->first);
2777 }
2778 // Compute a result for each tag
2779 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2780 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2781 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2782 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2783 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2784 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2785 }
2786
2787 }
2788 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2789
2790 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2791 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2792 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2793 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2794 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2795
2796 int sampleNo_0,dataPointNo_0;
2797 int numSamples_0 = arg_0_Z.getNumSamples();
2798 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2799 res.requireWrite();
2800 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2801 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2802 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2803 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2804 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2805 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2806 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2807 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2808 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2809 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2810 }
2811 }
2812
2813 }
2814 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2815 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2816 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2817 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2818 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2819
2820 int sampleNo_0,dataPointNo_0;
2821 int numSamples_0 = arg_0_Z.getNumSamples();
2822 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2823 int offset_1 = tmp_1->getPointOffset(0,0);
2824 res.requireWrite();
2825 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2826 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2827 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2828 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2829 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2830 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2831 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2832 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2833 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2834 }
2835 }
2836
2837
2838 }
2839 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2840
2841 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2842 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2843 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2844 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2845 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2846
2847 int sampleNo_0,dataPointNo_0;
2848 int numSamples_0 = arg_0_Z.getNumSamples();
2849 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2850 res.requireWrite();
2851 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2852 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2853 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2854 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2855 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2856 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2857 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2858 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2859 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2860 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2861 }
2862 }
2863
2864 }
2865 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2866
2867 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2868 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2869 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2870 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2871 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2872
2873 int sampleNo_0,dataPointNo_0;
2874 int numSamples_0 = arg_0_Z.getNumSamples();
2875 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2876 res.requireWrite();
2877 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2878 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2879 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2880 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2881 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2882 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2883 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2884 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2885 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2886 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2887 }
2888 }
2889
2890 }
2891 else {
2892 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2893 }
2894
2895 } else {
2896 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible shapes");
2897 }
2898
2899 return res;
2900 }
2901
2902 template <typename UnaryFunction>
2903 Data
2904 C_TensorUnaryOperation(Data const &arg_0,
2905 UnaryFunction operation)
2906 {
2907 if (arg_0.isEmpty()) // do this before we attempt to interpolate
2908 {
2909 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2910 }
2911 if (arg_0.isLazy())
2912 {
2913 throw DataException("Error - Operations not permitted on lazy data.");
2914 }
2915 // Interpolate if necessary and find an appropriate function space
2916 Data arg_0_Z = Data(arg_0);
2917
2918 // Get rank and shape of inputs
2919 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
2920 int size0 = arg_0_Z.getDataPointSize();
2921
2922 // Declare output Data object
2923 Data res;
2924
2925 if (arg_0_Z.isConstant()) {
2926 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataConstant output
2927 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2928 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2929 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2930 }
2931 else if (arg_0_Z.isTagged()) {
2932
2933 // Borrow DataTagged input from Data object
2934 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2935
2936 // Prepare a DataTagged output 2
2937 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2938 res.tag();
2939 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2940
2941 // Get the pointers to the actual data
2942 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2943 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2944 // Compute a result for the default
2945 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2946 // Compute a result for each tag
2947 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2948 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2949 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2950 tmp_2->addTag(i->first);
2951 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2952 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2953 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2954 }
2955
2956 }
2957 else if (arg_0_Z.isExpanded()) {
2958
2959 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace(),true); // DataExpanded output
2960 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2961 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2962
2963 int sampleNo_0,dataPointNo_0;
2964 int numSamples_0 = arg_0_Z.getNumSamples();
2965 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2966 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2967 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2968 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2969 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2970 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2971 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2972 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2973 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2974 }
2975 }
2976 }
2977 else {
2978 throw DataException("Error - C_TensorUnaryOperation: unknown combination of inputs");
2979 }
2980
2981 return res;
2982 }
2983
2984 }
2985 #endif

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26