/[escript]/trunk/escript/src/Data.h
ViewVC logotype

Contents of /trunk/escript/src/Data.h

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2476 - (show annotations)
Wed Jun 17 04:42:13 2009 UTC (10 years, 8 months ago) by jfenwick
File MIME type: text/plain
File size: 92132 byte(s)
Added maxGlobalDataPoint.
Not currently unit tested.
1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2008 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 /** \file Data.h */
16
17 #ifndef DATA_H
18 #define DATA_H
19 #include "system_dep.h"
20
21 #include "DataTypes.h"
22 #include "DataAbstract.h"
23 #include "DataAlgorithm.h"
24 #include "FunctionSpace.h"
25 #include "BinaryOp.h"
26 #include "UnaryOp.h"
27 #include "DataException.h"
28
29
30 extern "C" {
31 #include "DataC.h"
32 //#include <omp.h>
33 }
34
35 #include "esysmpi.h"
36 #include <string>
37 #include <algorithm>
38 #include <sstream>
39
40 #include <boost/shared_ptr.hpp>
41 #include <boost/python/object.hpp>
42 #include <boost/python/tuple.hpp>
43
44 #include "BufferGroup.h"
45
46 namespace escript {
47
48 //
49 // Forward declaration for various implementations of Data.
50 class DataConstant;
51 class DataTagged;
52 class DataExpanded;
53 class DataLazy;
54
55 /**
56 \brief
57 Data represents a collection of datapoints.
58
59 Description:
60 Internally, the datapoints are actually stored by a DataAbstract object.
61 The specific instance of DataAbstract used may vary over the lifetime
62 of the Data object.
63 Some methods on this class return references (eg getShape()).
64 These references should not be used after an operation which changes the underlying DataAbstract object.
65 Doing so will lead to invalid memory access.
66 This should not affect any methods exposed via boost::python.
67 */
68 class Data {
69
70 public:
71
72 // These typedefs allow function names to be cast to pointers
73 // to functions of the appropriate type when calling unaryOp etc.
74 typedef double (*UnaryDFunPtr)(double);
75 typedef double (*BinaryDFunPtr)(double,double);
76
77
78 /**
79 Constructors.
80 */
81
82 /**
83 \brief
84 Default constructor.
85 Creates a DataEmpty object.
86 */
87 ESCRIPT_DLL_API
88 Data();
89
90 /**
91 \brief
92 Copy constructor.
93 WARNING: Only performs a shallow copy.
94 */
95 ESCRIPT_DLL_API
96 Data(const Data& inData);
97
98 /**
99 \brief
100 Constructor from another Data object. If "what" is different from the
101 function space of inData the inData are tried to be interpolated to what,
102 otherwise a shallow copy of inData is returned.
103 */
104 ESCRIPT_DLL_API
105 Data(const Data& inData,
106 const FunctionSpace& what);
107
108 /**
109 \brief Copy Data from an existing vector
110 */
111
112 ESCRIPT_DLL_API
113 Data(const DataTypes::ValueType& value,
114 const DataTypes::ShapeType& shape,
115 const FunctionSpace& what=FunctionSpace(),
116 bool expanded=false);
117
118 /**
119 \brief
120 Constructor which creates a Data with points having the specified shape.
121
122 \param value - Input - Single value applied to all Data.
123 \param dataPointShape - Input - The shape of each data point.
124 \param what - Input - A description of what this data represents.
125 \param expanded - Input - Flag, if true fill the entire container with
126 the given value. Otherwise a more efficient storage
127 mechanism will be used.
128 */
129 ESCRIPT_DLL_API
130 Data(double value,
131 const DataTypes::ShapeType& dataPointShape=DataTypes::ShapeType(),
132 const FunctionSpace& what=FunctionSpace(),
133 bool expanded=false);
134
135 /**
136 \brief
137 Constructor which performs a deep copy of a region from another Data object.
138
139 \param inData - Input - Input Data object.
140 \param region - Input - Region to copy.
141 */
142 ESCRIPT_DLL_API
143 Data(const Data& inData,
144 const DataTypes::RegionType& region);
145
146 /**
147 \brief
148 Constructor which copies data from any object that can be treated like a python array/sequence.
149
150 \param value - Input - Input data.
151 \param what - Input - A description of what this data represents.
152 \param expanded - Input - Flag, if true fill the entire container with
153 the value. Otherwise a more efficient storage
154 mechanism will be used.
155 */
156 ESCRIPT_DLL_API
157 Data(const boost::python::object& value,
158 const FunctionSpace& what=FunctionSpace(),
159 bool expanded=false);
160
161 /**
162 \brief
163 Constructor which creates a DataConstant.
164 Copies data from any object that can be treated like a python array/sequence.
165 All other parameters are copied from other.
166
167 \param value - Input - Input data.
168 \param other - Input - contains all other parameters.
169 */
170 ESCRIPT_DLL_API
171 Data(const boost::python::object& value,
172 const Data& other);
173
174 /**
175 \brief
176 Constructor which creates a DataConstant of "shape" with constant value.
177 */
178 ESCRIPT_DLL_API
179 Data(double value,
180 const boost::python::tuple& shape=boost::python::make_tuple(),
181 const FunctionSpace& what=FunctionSpace(),
182 bool expanded=false);
183
184
185
186 /**
187 \brief Create a Data using an existing DataAbstract. Warning: The new object assumes ownership of the pointer!
188 Once you have passed the pointer, do not delete it.
189 */
190 ESCRIPT_DLL_API
191 explicit Data(DataAbstract* underlyingdata);
192
193 /**
194 \brief Create a Data based on the supplied DataAbstract
195 */
196 ESCRIPT_DLL_API
197 explicit Data(DataAbstract_ptr underlyingdata);
198
199 /**
200 \brief
201 Destructor
202 */
203 ESCRIPT_DLL_API
204 ~Data();
205
206 /**
207 \brief Make this object a deep copy of "other".
208 */
209 ESCRIPT_DLL_API
210 void
211 copy(const Data& other);
212
213 /**
214 \brief Return a pointer to a deep copy of this object.
215 */
216 ESCRIPT_DLL_API
217 Data
218 copySelf();
219
220
221 /**
222 \brief produce a delayed evaluation version of this Data.
223 */
224 ESCRIPT_DLL_API
225 Data
226 delay();
227
228 /**
229 \brief convert the current data into lazy data.
230 */
231 ESCRIPT_DLL_API
232 void
233 delaySelf();
234
235
236 /**
237 Member access methods.
238 */
239
240 /**
241 \brief
242 switches on update protection
243
244 */
245 ESCRIPT_DLL_API
246 void
247 setProtection();
248
249 /**
250 \brief
251 Returns true, if the data object is protected against update
252
253 */
254 ESCRIPT_DLL_API
255 bool
256 isProtected() const;
257
258
259 /**
260 \brief
261 Return the value of a data point as a python tuple.
262 */
263 ESCRIPT_DLL_API
264 const boost::python::object
265 getValueOfDataPointAsTuple(int dataPointNo);
266
267 /**
268 \brief
269 sets the values of a data-point from a python object on this process
270 */
271 ESCRIPT_DLL_API
272 void
273 setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object);
274
275 /**
276 \brief
277 sets the values of a data-point from a array-like object on this process
278 */
279 ESCRIPT_DLL_API
280 void
281 setValueOfDataPointToArray(int dataPointNo, const boost::python::object&);
282
283 /**
284 \brief
285 sets the values of a data-point on this process
286 */
287 ESCRIPT_DLL_API
288 void
289 setValueOfDataPoint(int dataPointNo, const double);
290
291 /**
292 \brief Return a data point across all processors as a python tuple.
293 */
294 ESCRIPT_DLL_API
295 const boost::python::object
296 getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo);
297
298 /**
299 \brief
300 Return the tag number associated with the given data-point.
301
302 */
303 ESCRIPT_DLL_API
304 int
305 getTagNumber(int dpno);
306
307 /**
308 \brief
309 Return the C wrapper for the Data object.
310 */
311 ESCRIPT_DLL_API
312 escriptDataC
313 getDataC();
314
315
316
317 /**
318 \brief
319 Return the C wrapper for the Data object - const version.
320 */
321 ESCRIPT_DLL_API
322 escriptDataC
323 getDataC() const;
324
325 /**
326 \brief How much space is required to evaulate a sample of the Data.
327 */
328 ESCRIPT_DLL_API
329 size_t
330 getSampleBufferSize() const;
331
332
333
334 /**
335 \brief
336 Write the data as a string. For large amounts of data, a summary is printed.
337 */
338 ESCRIPT_DLL_API
339 std::string
340 toString() const;
341
342 /**
343 \brief
344 Whatever the current Data type make this into a DataExpanded.
345 */
346 ESCRIPT_DLL_API
347 void
348 expand();
349
350 /**
351 \brief
352 If possible convert this Data to DataTagged. This will only allow
353 Constant data to be converted to tagged. An attempt to convert
354 Expanded data to tagged will throw an exception.
355 */
356 ESCRIPT_DLL_API
357 void
358 tag();
359
360 /**
361 \brief If this data is lazy, then convert it to ready data.
362 What type of ready data depends on the expression. For example, Constant+Tagged==Tagged.
363 */
364 ESCRIPT_DLL_API
365 void
366 resolve();
367
368
369 /**
370 \brief Ensures data is ready for write access.
371 This means that the data will be resolved if lazy and will be copied if shared with another Data object.
372 \warning This method should only be called in single threaded sections of code. (It modifies m_data).
373 Do not create any Data objects from this one between calling requireWrite and getSampleDataRW.
374 Doing so might introduce additional sharing.
375 */
376 ESCRIPT_DLL_API
377 void
378 requireWrite();
379
380 /**
381 \brief
382 Return true if this Data is expanded.
383 \note To determine if a sample will contain separate values for each datapoint. Use actsExpanded instead.
384 */
385 ESCRIPT_DLL_API
386 bool
387 isExpanded() const;
388
389 /**
390 \brief
391 Return true if this Data is expanded or resolves to expanded.
392 That is, if it has a separate value for each datapoint in the sample.
393 */
394 ESCRIPT_DLL_API
395 bool
396 actsExpanded() const;
397
398
399 /**
400 \brief
401 Return true if this Data is tagged.
402 */
403 ESCRIPT_DLL_API
404 bool
405 isTagged() const;
406
407 /**
408 \brief
409 Return true if this Data is constant.
410 */
411 ESCRIPT_DLL_API
412 bool
413 isConstant() const;
414
415 /**
416 \brief Return true if this Data is lazy.
417 */
418 ESCRIPT_DLL_API
419 bool
420 isLazy() const;
421
422 /**
423 \brief Return true if this data is ready.
424 */
425 ESCRIPT_DLL_API
426 bool
427 isReady() const;
428
429 /**
430 \brief
431 Return true if this Data holds an instance of DataEmpty. This is _not_ the same as asking if the object
432 contains datapoints.
433 */
434 ESCRIPT_DLL_API
435 bool
436 isEmpty() const;
437
438 /**
439 \brief
440 Return the function space.
441 */
442 ESCRIPT_DLL_API
443 inline
444 const FunctionSpace&
445 getFunctionSpace() const
446 {
447 return m_data->getFunctionSpace();
448 }
449
450 /**
451 \brief
452 Return a copy of the function space.
453 */
454 ESCRIPT_DLL_API
455 const FunctionSpace
456 getCopyOfFunctionSpace() const;
457
458 /**
459 \brief
460 Return the domain.
461 */
462 ESCRIPT_DLL_API
463 inline
464 // const AbstractDomain&
465 const_Domain_ptr
466 getDomain() const
467 {
468 return getFunctionSpace().getDomain();
469 }
470
471
472 /**
473 \brief
474 Return the domain.
475 TODO: For internal use only. This should be removed.
476 */
477 ESCRIPT_DLL_API
478 inline
479 // const AbstractDomain&
480 Domain_ptr
481 getDomainPython() const
482 {
483 return getFunctionSpace().getDomainPython();
484 }
485
486 /**
487 \brief
488 Return a copy of the domain.
489 */
490 ESCRIPT_DLL_API
491 const AbstractDomain
492 getCopyOfDomain() const;
493
494 /**
495 \brief
496 Return the rank of the point data.
497 */
498 ESCRIPT_DLL_API
499 inline
500 unsigned int
501 getDataPointRank() const
502 {
503 return m_data->getRank();
504 }
505
506 /**
507 \brief
508 Return the number of data points
509 */
510 ESCRIPT_DLL_API
511 inline
512 int
513 getNumDataPoints() const
514 {
515 return getNumSamples() * getNumDataPointsPerSample();
516 }
517 /**
518 \brief
519 Return the number of samples.
520 */
521 ESCRIPT_DLL_API
522 inline
523 int
524 getNumSamples() const
525 {
526 return m_data->getNumSamples();
527 }
528
529 /**
530 \brief
531 Return the number of data points per sample.
532 */
533 ESCRIPT_DLL_API
534 inline
535 int
536 getNumDataPointsPerSample() const
537 {
538 return m_data->getNumDPPSample();
539 }
540
541
542 /**
543 \brief
544 Return the number of values in the shape for this object.
545 */
546 ESCRIPT_DLL_API
547 int
548 getNoValues() const
549 {
550 return m_data->getNoValues();
551 }
552
553
554 /**
555 \brief
556 dumps the object into a netCDF file
557 */
558 ESCRIPT_DLL_API
559 void
560 dump(const std::string fileName) const;
561
562 /**
563 \brief returns the values of the object as a list of tuples (one for each datapoint).
564
565 \param scalarastuple If true, scalar data will produce single valued tuples [(1,) (2,) ...]
566 If false, the result is a list of scalars [1, 2, ...]
567 */
568 ESCRIPT_DLL_API
569 const boost::python::object
570 toListOfTuples(bool scalarastuple=true);
571
572
573 /**
574 \brief
575 Return the sample data for the given sample no. This is not the
576 preferred interface but is provided for use by C code.
577 The bufferg parameter is only required for LazyData.
578 \param sampleNo - Input - the given sample no.
579 \param buffer - A buffer to to compute (and store) sample data in will be selected from this group.
580 \return pointer to the sample data.
581 */
582 ESCRIPT_DLL_API
583 inline
584 const DataAbstract::ValueType::value_type*
585 getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, BufferGroup* bufferg=0);
586
587
588 /**
589 \brief
590 Return the sample data for the given sample no. This is not the
591 preferred interface but is provided for use by C code.
592 \param sampleNo - Input - the given sample no.
593 \return pointer to the sample data.
594 */
595 ESCRIPT_DLL_API
596 inline
597 DataAbstract::ValueType::value_type*
598 getSampleDataRW(DataAbstract::ValueType::size_type sampleNo);
599
600
601 /**
602 \brief
603 Return the sample data for the given tag. If an attempt is made to
604 access data that isn't tagged an exception will be thrown.
605 \param tag - Input - the tag key.
606 */
607 ESCRIPT_DLL_API
608 inline
609 DataAbstract::ValueType::value_type*
610 getSampleDataByTag(int tag)
611 {
612 return m_data->getSampleDataByTag(tag);
613 }
614
615 /**
616 \brief
617 Return a reference into the DataVector which points to the specified data point.
618 \param sampleNo - Input -
619 \param dataPointNo - Input -
620 */
621 ESCRIPT_DLL_API
622 DataTypes::ValueType::const_reference
623 getDataPointRO(int sampleNo, int dataPointNo);
624
625 /**
626 \brief
627 Return a reference into the DataVector which points to the specified data point.
628 \param sampleNo - Input -
629 \param dataPointNo - Input -
630 */
631 ESCRIPT_DLL_API
632 DataTypes::ValueType::reference
633 getDataPointRW(int sampleNo, int dataPointNo);
634
635
636
637 /**
638 \brief
639 Return the offset for the given sample and point within the sample
640 */
641 ESCRIPT_DLL_API
642 inline
643 DataTypes::ValueType::size_type
644 getDataOffset(int sampleNo,
645 int dataPointNo)
646 {
647 return m_data->getPointOffset(sampleNo,dataPointNo);
648 }
649
650 /**
651 \brief
652 Return a reference to the data point shape.
653 */
654 ESCRIPT_DLL_API
655 inline
656 const DataTypes::ShapeType&
657 getDataPointShape() const
658 {
659 return m_data->getShape();
660 }
661
662 /**
663 \brief
664 Return the data point shape as a tuple of integers.
665 */
666 ESCRIPT_DLL_API
667 const boost::python::tuple
668 getShapeTuple() const;
669
670 /**
671 \brief
672 Return the size of the data point. It is the product of the
673 data point shape dimensions.
674 */
675 ESCRIPT_DLL_API
676 int
677 getDataPointSize() const;
678
679 /**
680 \brief
681 Return the number of doubles stored for this Data.
682 */
683 ESCRIPT_DLL_API
684 DataTypes::ValueType::size_type
685 getLength() const;
686
687
688
689 /**
690 \brief
691 Assign the given value to the tag assocciated with name. Implicitly converts this
692 object to type DataTagged. Throws an exception if this object
693 cannot be converted to a DataTagged object or name cannot be mapped onto a tag key.
694 \param tagKey - Input - Integer key.
695 \param value - Input - Value to associate with given key.
696 ==>*
697 */
698 ESCRIPT_DLL_API
699 void
700 setTaggedValueByName(std::string name,
701 const boost::python::object& value);
702
703 /**
704 \brief
705 Assign the given value to the tag. Implicitly converts this
706 object to type DataTagged if it is constant.
707
708 \param tagKey - Input - Integer key.
709 \param value - Input - Value to associate with given key.
710 ==>*
711 */
712 ESCRIPT_DLL_API
713 void
714 setTaggedValue(int tagKey,
715 const boost::python::object& value);
716
717 /**
718 \brief
719 Assign the given value to the tag. Implicitly converts this
720 object to type DataTagged if it is constant.
721
722 \param tagKey - Input - Integer key.
723 \param pointshape - Input - The shape of the value parameter
724 \param value - Input - Value to associate with given key.
725 \param dataOffset - Input - Offset of the begining of the point within the value parameter
726 */
727 ESCRIPT_DLL_API
728 void
729 setTaggedValueFromCPP(int tagKey,
730 const DataTypes::ShapeType& pointshape,
731 const DataTypes::ValueType& value,
732 int dataOffset=0);
733
734
735
736 /**
737 \brief
738 Copy other Data object into this Data object where mask is positive.
739 */
740 ESCRIPT_DLL_API
741 void
742 copyWithMask(const Data& other,
743 const Data& mask);
744
745 /**
746 Data object operation methods and operators.
747 */
748
749 /**
750 \brief
751 set all values to zero
752 *
753 */
754 ESCRIPT_DLL_API
755 void
756 setToZero();
757
758 /**
759 \brief
760 Interpolates this onto the given functionspace and returns
761 the result as a Data object.
762 *
763 */
764 ESCRIPT_DLL_API
765 Data
766 interpolate(const FunctionSpace& functionspace) const;
767 /**
768 \brief
769 Calculates the gradient of the data at the data points of functionspace.
770 If functionspace is not present the function space of Function(getDomain()) is used.
771 *
772 */
773 ESCRIPT_DLL_API
774 Data
775 gradOn(const FunctionSpace& functionspace) const;
776
777 ESCRIPT_DLL_API
778 Data
779 grad() const;
780
781 /**
782 \brief
783 Calculate the integral over the function space domain as a python tuple.
784 */
785 ESCRIPT_DLL_API
786 boost::python::object
787 integrateToTuple_const() const;
788
789
790 /**
791 \brief
792 Calculate the integral over the function space domain as a python tuple.
793 */
794 ESCRIPT_DLL_API
795 boost::python::object
796 integrateToTuple();
797
798
799
800 /**
801 \brief
802 Returns 1./ Data object
803 *
804 */
805 ESCRIPT_DLL_API
806 Data
807 oneOver() const;
808 /**
809 \brief
810 Return a Data with a 1 for +ive values and a 0 for 0 or -ive values.
811 *
812 */
813 ESCRIPT_DLL_API
814 Data
815 wherePositive() const;
816
817 /**
818 \brief
819 Return a Data with a 1 for -ive values and a 0 for +ive or 0 values.
820 *
821 */
822 ESCRIPT_DLL_API
823 Data
824 whereNegative() const;
825
826 /**
827 \brief
828 Return a Data with a 1 for +ive or 0 values and a 0 for -ive values.
829 *
830 */
831 ESCRIPT_DLL_API
832 Data
833 whereNonNegative() const;
834
835 /**
836 \brief
837 Return a Data with a 1 for -ive or 0 values and a 0 for +ive values.
838 *
839 */
840 ESCRIPT_DLL_API
841 Data
842 whereNonPositive() const;
843
844 /**
845 \brief
846 Return a Data with a 1 for 0 values and a 0 for +ive or -ive values.
847 *
848 */
849 ESCRIPT_DLL_API
850 Data
851 whereZero(double tol=0.0) const;
852
853 /**
854 \brief
855 Return a Data with a 0 for 0 values and a 1 for +ive or -ive values.
856 *
857 */
858 ESCRIPT_DLL_API
859 Data
860 whereNonZero(double tol=0.0) const;
861
862 /**
863 \brief
864 Return the maximum absolute value of this Data object.
865
866 The method is not const because lazy data needs to be expanded before Lsup can be computed.
867 The _const form can be used when the Data object is const, however this will only work for
868 Data which is not Lazy.
869
870 For Data which contain no samples (or tagged Data for which no tags in use have a value)
871 zero is returned.
872 */
873 ESCRIPT_DLL_API
874 double
875 Lsup();
876
877 ESCRIPT_DLL_API
878 double
879 Lsup_const() const;
880
881
882 /**
883 \brief
884 Return the maximum value of this Data object.
885
886 The method is not const because lazy data needs to be expanded before sup can be computed.
887 The _const form can be used when the Data object is const, however this will only work for
888 Data which is not Lazy.
889
890 For Data which contain no samples (or tagged Data for which no tags in use have a value)
891 a large negative value is returned.
892 */
893 ESCRIPT_DLL_API
894 double
895 sup();
896
897 ESCRIPT_DLL_API
898 double
899 sup_const() const;
900
901
902 /**
903 \brief
904 Return the minimum value of this Data object.
905
906 The method is not const because lazy data needs to be expanded before inf can be computed.
907 The _const form can be used when the Data object is const, however this will only work for
908 Data which is not Lazy.
909
910 For Data which contain no samples (or tagged Data for which no tags in use have a value)
911 a large positive value is returned.
912 */
913 ESCRIPT_DLL_API
914 double
915 inf();
916
917 ESCRIPT_DLL_API
918 double
919 inf_const() const;
920
921
922
923 /**
924 \brief
925 Return the absolute value of each data point of this Data object.
926 *
927 */
928 ESCRIPT_DLL_API
929 Data
930 abs() const;
931
932 /**
933 \brief
934 Return the maximum value of each data point of this Data object.
935 *
936 */
937 ESCRIPT_DLL_API
938 Data
939 maxval() const;
940
941 /**
942 \brief
943 Return the minimum value of each data point of this Data object.
944 *
945 */
946 ESCRIPT_DLL_API
947 Data
948 minval() const;
949
950 /**
951 \brief
952 Return the (sample number, data-point number) of the data point with
953 the minimum component value in this Data object.
954 \note If you are working in python, please consider using Locator
955 instead of manually manipulating process and point IDs.
956 */
957 ESCRIPT_DLL_API
958 const boost::python::tuple
959 minGlobalDataPoint() const;
960
961 /**
962 \brief
963 Return the (sample number, data-point number) of the data point with
964 the minimum component value in this Data object.
965 \note If you are working in python, please consider using Locator
966 instead of manually manipulating process and point IDs.
967 */
968 ESCRIPT_DLL_API
969 const boost::python::tuple
970 maxGlobalDataPoint() const;
971
972
973
974 /**
975 \brief
976 Return the sign of each data point of this Data object.
977 -1 for negative values, zero for zero values, 1 for positive values.
978 *
979 */
980 ESCRIPT_DLL_API
981 Data
982 sign() const;
983
984 /**
985 \brief
986 Return the symmetric part of a matrix which is half the matrix plus its transpose.
987 *
988 */
989 ESCRIPT_DLL_API
990 Data
991 symmetric() const;
992
993 /**
994 \brief
995 Return the nonsymmetric part of a matrix which is half the matrix minus its transpose.
996 *
997 */
998 ESCRIPT_DLL_API
999 Data
1000 nonsymmetric() const;
1001
1002 /**
1003 \brief
1004 Return the trace of a matrix
1005 *
1006 */
1007 ESCRIPT_DLL_API
1008 Data
1009 trace(int axis_offset) const;
1010
1011 /**
1012 \brief
1013 Transpose each data point of this Data object around the given axis.
1014 *
1015 */
1016 ESCRIPT_DLL_API
1017 Data
1018 transpose(int axis_offset) const;
1019
1020 /**
1021 \brief
1022 Return the eigenvalues of the symmetric part at each data point of this Data object in increasing values.
1023 Currently this function is restricted to rank 2, square shape, and dimension 3.
1024 *
1025 */
1026 ESCRIPT_DLL_API
1027 Data
1028 eigenvalues() const;
1029
1030 /**
1031 \brief
1032 Return the eigenvalues and corresponding eigenvcetors of the symmetric part at each data point of this Data object.
1033 the eigenvalues are ordered in increasing size where eigenvalues with relative difference less than
1034 tol are treated as equal. The eigenvectors are orthogonal, normalized and the sclaed such that the
1035 first non-zero entry is positive.
1036 Currently this function is restricted to rank 2, square shape, and dimension 3
1037 *
1038 */
1039 ESCRIPT_DLL_API
1040 const boost::python::tuple
1041 eigenvalues_and_eigenvectors(const double tol=1.e-12) const;
1042
1043 /**
1044 \brief
1045 swaps the components axis0 and axis1
1046 *
1047 */
1048 ESCRIPT_DLL_API
1049 Data
1050 swapaxes(const int axis0, const int axis1) const;
1051
1052 /**
1053 \brief
1054 Return the error function erf of each data point of this Data object.
1055 *
1056 */
1057 ESCRIPT_DLL_API
1058 Data
1059 erf() const;
1060
1061 /**
1062 \brief
1063 Return the sin of each data point of this Data object.
1064 *
1065 */
1066 ESCRIPT_DLL_API
1067 Data
1068 sin() const;
1069
1070 /**
1071 \brief
1072 Return the cos of each data point of this Data object.
1073 *
1074 */
1075 ESCRIPT_DLL_API
1076 Data
1077 cos() const;
1078
1079 /**
1080 \brief
1081 Return the tan of each data point of this Data object.
1082 *
1083 */
1084 ESCRIPT_DLL_API
1085 Data
1086 tan() const;
1087
1088 /**
1089 \brief
1090 Return the asin of each data point of this Data object.
1091 *
1092 */
1093 ESCRIPT_DLL_API
1094 Data
1095 asin() const;
1096
1097 /**
1098 \brief
1099 Return the acos of each data point of this Data object.
1100 *
1101 */
1102 ESCRIPT_DLL_API
1103 Data
1104 acos() const;
1105
1106 /**
1107 \brief
1108 Return the atan of each data point of this Data object.
1109 *
1110 */
1111 ESCRIPT_DLL_API
1112 Data
1113 atan() const;
1114
1115 /**
1116 \brief
1117 Return the sinh of each data point of this Data object.
1118 *
1119 */
1120 ESCRIPT_DLL_API
1121 Data
1122 sinh() const;
1123
1124 /**
1125 \brief
1126 Return the cosh of each data point of this Data object.
1127 *
1128 */
1129 ESCRIPT_DLL_API
1130 Data
1131 cosh() const;
1132
1133 /**
1134 \brief
1135 Return the tanh of each data point of this Data object.
1136 *
1137 */
1138 ESCRIPT_DLL_API
1139 Data
1140 tanh() const;
1141
1142 /**
1143 \brief
1144 Return the asinh of each data point of this Data object.
1145 *
1146 */
1147 ESCRIPT_DLL_API
1148 Data
1149 asinh() const;
1150
1151 /**
1152 \brief
1153 Return the acosh of each data point of this Data object.
1154 *
1155 */
1156 ESCRIPT_DLL_API
1157 Data
1158 acosh() const;
1159
1160 /**
1161 \brief
1162 Return the atanh of each data point of this Data object.
1163 *
1164 */
1165 ESCRIPT_DLL_API
1166 Data
1167 atanh() const;
1168
1169 /**
1170 \brief
1171 Return the log to base 10 of each data point of this Data object.
1172 *
1173 */
1174 ESCRIPT_DLL_API
1175 Data
1176 log10() const;
1177
1178 /**
1179 \brief
1180 Return the natural log of each data point of this Data object.
1181 *
1182 */
1183 ESCRIPT_DLL_API
1184 Data
1185 log() const;
1186
1187 /**
1188 \brief
1189 Return the exponential function of each data point of this Data object.
1190 *
1191 */
1192 ESCRIPT_DLL_API
1193 Data
1194 exp() const;
1195
1196 /**
1197 \brief
1198 Return the square root of each data point of this Data object.
1199 *
1200 */
1201 ESCRIPT_DLL_API
1202 Data
1203 sqrt() const;
1204
1205 /**
1206 \brief
1207 Return the negation of each data point of this Data object.
1208 *
1209 */
1210 ESCRIPT_DLL_API
1211 Data
1212 neg() const;
1213
1214 /**
1215 \brief
1216 Return the identity of each data point of this Data object.
1217 Simply returns this object unmodified.
1218 *
1219 */
1220 ESCRIPT_DLL_API
1221 Data
1222 pos() const;
1223
1224 /**
1225 \brief
1226 Return the given power of each data point of this Data object.
1227
1228 \param right Input - the power to raise the object to.
1229 *
1230 */
1231 ESCRIPT_DLL_API
1232 Data
1233 powD(const Data& right) const;
1234
1235 /**
1236 \brief
1237 Return the given power of each data point of this boost python object.
1238
1239 \param right Input - the power to raise the object to.
1240 *
1241 */
1242 ESCRIPT_DLL_API
1243 Data
1244 powO(const boost::python::object& right) const;
1245
1246 /**
1247 \brief
1248 Return the given power of each data point of this boost python object.
1249
1250 \param left Input - the bases
1251 *
1252 */
1253
1254 ESCRIPT_DLL_API
1255 Data
1256 rpowO(const boost::python::object& left) const;
1257
1258 /**
1259 \brief
1260 writes the object to a file in the DX file format
1261 */
1262 ESCRIPT_DLL_API
1263 void
1264 saveDX(std::string fileName) const;
1265
1266 /**
1267 \brief
1268 writes the object to a file in the VTK file format
1269 */
1270 ESCRIPT_DLL_API
1271 void
1272 saveVTK(std::string fileName) const;
1273
1274 /**
1275 \brief
1276 Overloaded operator +=
1277 \param right - Input - The right hand side.
1278 *
1279 */
1280 ESCRIPT_DLL_API
1281 Data& operator+=(const Data& right);
1282 ESCRIPT_DLL_API
1283 Data& operator+=(const boost::python::object& right);
1284
1285 ESCRIPT_DLL_API
1286 Data& operator=(const Data& other);
1287
1288 /**
1289 \brief
1290 Overloaded operator -=
1291 \param right - Input - The right hand side.
1292 *
1293 */
1294 ESCRIPT_DLL_API
1295 Data& operator-=(const Data& right);
1296 ESCRIPT_DLL_API
1297 Data& operator-=(const boost::python::object& right);
1298
1299 /**
1300 \brief
1301 Overloaded operator *=
1302 \param right - Input - The right hand side.
1303 *
1304 */
1305 ESCRIPT_DLL_API
1306 Data& operator*=(const Data& right);
1307 ESCRIPT_DLL_API
1308 Data& operator*=(const boost::python::object& right);
1309
1310 /**
1311 \brief
1312 Overloaded operator /=
1313 \param right - Input - The right hand side.
1314 *
1315 */
1316 ESCRIPT_DLL_API
1317 Data& operator/=(const Data& right);
1318 ESCRIPT_DLL_API
1319 Data& operator/=(const boost::python::object& right);
1320
1321 /**
1322 \brief
1323 Returns true if this can be interpolated to functionspace.
1324 */
1325 ESCRIPT_DLL_API
1326 bool
1327 probeInterpolation(const FunctionSpace& functionspace) const;
1328
1329 /**
1330 Data object slicing methods.
1331 */
1332
1333 /**
1334 \brief
1335 Returns a slice from this Data object.
1336
1337 /description
1338 Implements the [] get operator in python.
1339 Calls getSlice.
1340
1341 \param key - Input - python slice tuple specifying
1342 slice to return.
1343 */
1344 ESCRIPT_DLL_API
1345 Data
1346 getItem(const boost::python::object& key) const;
1347
1348 /**
1349 \brief
1350 Copies slice from value into this Data object.
1351
1352 Implements the [] set operator in python.
1353 Calls setSlice.
1354
1355 \param key - Input - python slice tuple specifying
1356 slice to copy from value.
1357 \param value - Input - Data object to copy from.
1358 */
1359 ESCRIPT_DLL_API
1360 void
1361 setItemD(const boost::python::object& key,
1362 const Data& value);
1363
1364 ESCRIPT_DLL_API
1365 void
1366 setItemO(const boost::python::object& key,
1367 const boost::python::object& value);
1368
1369 // These following public methods should be treated as private.
1370
1371 /**
1372 \brief
1373 Perform the given unary operation on every element of every data point in
1374 this Data object.
1375 */
1376 template <class UnaryFunction>
1377 ESCRIPT_DLL_API
1378 inline
1379 void
1380 unaryOp2(UnaryFunction operation);
1381
1382 /**
1383 \brief
1384 Return a Data object containing the specified slice of
1385 this Data object.
1386 \param region - Input - Region to copy.
1387 *
1388 */
1389 ESCRIPT_DLL_API
1390 Data
1391 getSlice(const DataTypes::RegionType& region) const;
1392
1393 /**
1394 \brief
1395 Copy the specified slice from the given value into this
1396 Data object.
1397 \param value - Input - Data to copy from.
1398 \param region - Input - Region to copy.
1399 *
1400 */
1401 ESCRIPT_DLL_API
1402 void
1403 setSlice(const Data& value,
1404 const DataTypes::RegionType& region);
1405
1406 /**
1407 \brief
1408 print the data values to stdout. Used for debugging
1409 */
1410 ESCRIPT_DLL_API
1411 void
1412 print(void);
1413
1414 /**
1415 \brief
1416 return the MPI rank number of the local data
1417 MPI_COMM_WORLD is assumed and the result of MPI_Comm_size()
1418 is returned
1419 */
1420 ESCRIPT_DLL_API
1421 int
1422 get_MPIRank(void) const;
1423
1424 /**
1425 \brief
1426 return the MPI rank number of the local data
1427 MPI_COMM_WORLD is assumed and the result of MPI_Comm_rank()
1428 is returned
1429 */
1430 ESCRIPT_DLL_API
1431 int
1432 get_MPISize(void) const;
1433
1434 /**
1435 \brief
1436 return the MPI rank number of the local data
1437 MPI_COMM_WORLD is assumed and returned.
1438 */
1439 ESCRIPT_DLL_API
1440 MPI_Comm
1441 get_MPIComm(void) const;
1442
1443 /**
1444 \brief
1445 return the object produced by the factory, which is a DataConstant or DataExpanded
1446 TODO Ownership of this object should be explained in doco.
1447 */
1448 ESCRIPT_DLL_API
1449 DataAbstract*
1450 borrowData(void) const;
1451
1452 ESCRIPT_DLL_API
1453 DataAbstract_ptr
1454 borrowDataPtr(void) const;
1455
1456 ESCRIPT_DLL_API
1457 DataReady_ptr
1458 borrowReadyPtr(void) const;
1459
1460
1461
1462 /**
1463 \brief
1464 Return a pointer to the beginning of the datapoint at the specified offset.
1465 TODO Eventually these should be inlined.
1466 \param i - position(offset) in the underlying datastructure
1467 */
1468
1469 ESCRIPT_DLL_API
1470 DataTypes::ValueType::const_reference
1471 getDataAtOffsetRO(DataTypes::ValueType::size_type i);
1472
1473
1474 ESCRIPT_DLL_API
1475 DataTypes::ValueType::reference
1476 getDataAtOffsetRW(DataTypes::ValueType::size_type i);
1477
1478
1479
1480 /**
1481 \brief Create a buffer for use by getSample
1482 Allocates a DataVector large enough for DataLazy::resolveSample to operate on for the current Data.
1483 Do not use this buffer for other Data instances (unless you are sure they will be the same size).
1484
1485 In multi-threaded sections, this needs to be called on each thread.
1486
1487 \return A BufferGroup* if Data is lazy, NULL otherwise.
1488 \warning This pointer must be deallocated using freeSampleBuffer to avoid cross library memory issues.
1489 */
1490 ESCRIPT_DLL_API
1491 BufferGroup*
1492 allocSampleBuffer() const;
1493
1494 /**
1495 \brief Free a buffer allocated with allocSampleBuffer.
1496 \param buffer Input - pointer to the buffer to deallocate.
1497 */
1498 ESCRIPT_DLL_API void freeSampleBuffer(BufferGroup* buffer);
1499
1500 protected:
1501
1502 private:
1503
1504 double
1505 LsupWorker() const;
1506
1507 double
1508 supWorker() const;
1509
1510 double
1511 infWorker() const;
1512
1513 boost::python::object
1514 integrateWorker() const;
1515
1516 void
1517 calc_minGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1518
1519 void
1520 calc_maxGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1521
1522
1523 /**
1524 \brief
1525 Check *this and the right operand are compatible. Throws
1526 an exception if they aren't.
1527 \param right - Input - The right hand side.
1528 */
1529 inline
1530 void
1531 operandCheck(const Data& right) const
1532 {
1533 return m_data->operandCheck(*(right.m_data.get()));
1534 }
1535
1536 /**
1537 \brief
1538 Perform the specified reduction algorithm on every element of every data point in
1539 this Data object according to the given function and return the single value result.
1540 */
1541 template <class BinaryFunction>
1542 inline
1543 double
1544 algorithm(BinaryFunction operation,
1545 double initial_value) const;
1546
1547 /**
1548 \brief
1549 Reduce each data-point in this Data object using the given operation. Return a Data
1550 object with the same number of data-points, but with each data-point containing only
1551 one value - the result of the reduction operation on the corresponding data-point in
1552 this Data object
1553 */
1554 template <class BinaryFunction>
1555 inline
1556 Data
1557 dp_algorithm(BinaryFunction operation,
1558 double initial_value) const;
1559
1560 /**
1561 \brief
1562 Perform the given binary operation on all of the data's elements.
1563 The underlying type of the right hand side (right) determines the final
1564 type of *this after the operation. For example if the right hand side
1565 is expanded *this will be expanded if necessary.
1566 RHS is a Data object.
1567 */
1568 template <class BinaryFunction>
1569 inline
1570 void
1571 binaryOp(const Data& right,
1572 BinaryFunction operation);
1573
1574 /**
1575 \brief
1576 Convert the data type of the RHS to match this.
1577 \param right - Input - data type to match.
1578 */
1579 void
1580 typeMatchLeft(Data& right) const;
1581
1582 /**
1583 \brief
1584 Convert the data type of this to match the RHS.
1585 \param right - Input - data type to match.
1586 */
1587 void
1588 typeMatchRight(const Data& right);
1589
1590 /**
1591 \brief
1592 Construct a Data object of the appropriate type.
1593 */
1594
1595 void
1596 initialise(const DataTypes::ValueType& value,
1597 const DataTypes::ShapeType& shape,
1598 const FunctionSpace& what,
1599 bool expanded);
1600
1601 void
1602 initialise(const WrappedArray& value,
1603 const FunctionSpace& what,
1604 bool expanded);
1605
1606 //
1607 // flag to protect the data object against any update
1608 bool m_protected;
1609 mutable bool m_shared;
1610 bool m_lazy;
1611
1612 //
1613 // pointer to the actual data object
1614 // boost::shared_ptr<DataAbstract> m_data;
1615 DataAbstract_ptr m_data;
1616
1617 // If possible please use getReadyPtr instead.
1618 // But see warning below.
1619 const DataReady*
1620 getReady() const;
1621
1622 DataReady*
1623 getReady();
1624
1625
1626 // Be wary of using this for local operations since it (temporarily) increases reference count.
1627 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1628 // getReady() instead
1629 DataReady_ptr
1630 getReadyPtr();
1631
1632 const_DataReady_ptr
1633 getReadyPtr() const;
1634
1635
1636 /**
1637 \brief Update the Data's shared flag
1638 This indicates that the DataAbstract used by this object is now shared (or no longer shared).
1639 For internal use only.
1640 */
1641 void updateShareStatus(bool nowshared) const
1642 {
1643 m_shared=nowshared; // m_shared is mutable
1644 }
1645
1646 // In the isShared() method below:
1647 // A problem would occur if m_data (the address pointed to) were being modified
1648 // while the call m_data->is_shared is being executed.
1649 //
1650 // Q: So why do I think this code can be thread safe/correct?
1651 // A: We need to make some assumptions.
1652 // 1. We assume it is acceptable to return true under some conditions when we aren't shared.
1653 // 2. We assume that no constructions or assignments which will share previously unshared
1654 // will occur while this call is executing. This is consistent with the way Data:: and C are written.
1655 //
1656 // This means that the only transition we need to consider, is when a previously shared object is
1657 // not shared anymore. ie. the other objects have been destroyed or a deep copy has been made.
1658 // In those cases the m_shared flag changes to false after m_data has completed changing.
1659 // For any threads executing before the flag switches they will assume the object is still shared.
1660 bool isShared() const
1661 {
1662 return m_shared;
1663 /* if (m_shared) return true;
1664 if (m_data->isShared())
1665 {
1666 updateShareStatus(true);
1667 return true;
1668 }
1669 return false;*/
1670 }
1671
1672 void forceResolve()
1673 {
1674 if (isLazy())
1675 {
1676 #ifdef _OPENMP
1677 if (omp_in_parallel())
1678 { // Yes this is throwing an exception out of an omp thread which is forbidden.
1679 throw DataException("Please do not call forceResolve() in a parallel region.");
1680 }
1681 #endif
1682 resolve();
1683 }
1684 }
1685
1686 /**
1687 \brief if another object is sharing out member data make a copy to work with instead.
1688 This code should only be called from single threaded sections of code.
1689 */
1690 void exclusiveWrite()
1691 {
1692 #ifdef _OPENMP
1693 if (omp_in_parallel())
1694 {
1695 // *((int*)0)=17;
1696 throw DataException("Programming error. Please do not run exclusiveWrite() in multi-threaded sections.");
1697 }
1698 #endif
1699 forceResolve();
1700 if (isShared())
1701 {
1702 DataAbstract* t=m_data->deepCopy();
1703 set_m_data(DataAbstract_ptr(t));
1704 }
1705 }
1706
1707 /**
1708 \brief checks if caller can have exclusive write to the object
1709 */
1710 void checkExclusiveWrite()
1711 {
1712 if (isLazy() || isShared())
1713 {
1714 throw DataException("Programming error. ExclusiveWrite required - please call requireWrite()");
1715 }
1716 }
1717
1718 /**
1719 \brief Modify the data abstract hosted by this Data object
1720 For internal use only.
1721 Passing a pointer to null is permitted (do this in the destructor)
1722 \warning Only to be called in single threaded code or inside a single/critical section. This method needs to be atomic.
1723 */
1724 void set_m_data(DataAbstract_ptr p);
1725
1726 friend class DataAbstract; // To allow calls to updateShareStatus
1727
1728 };
1729
1730 } // end namespace escript
1731
1732
1733 // No, this is not supposed to be at the top of the file
1734 // DataAbstact needs to be declared first, then DataReady needs to be fully declared
1735 // so that I can dynamic cast between them below.
1736 #include "DataReady.h"
1737 #include "DataLazy.h"
1738
1739 namespace escript
1740 {
1741
1742 inline
1743 const DataReady*
1744 Data::getReady() const
1745 {
1746 const DataReady* dr=dynamic_cast<const DataReady*>(m_data.get());
1747 EsysAssert((dr!=0), "Error - casting to DataReady.");
1748 return dr;
1749 }
1750
1751 inline
1752 DataReady*
1753 Data::getReady()
1754 {
1755 DataReady* dr=dynamic_cast<DataReady*>(m_data.get());
1756 EsysAssert((dr!=0), "Error - casting to DataReady.");
1757 return dr;
1758 }
1759
1760 // Be wary of using this for local operations since it (temporarily) increases reference count.
1761 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1762 // getReady() instead
1763 inline
1764 DataReady_ptr
1765 Data::getReadyPtr()
1766 {
1767 DataReady_ptr dr=boost::dynamic_pointer_cast<DataReady>(m_data);
1768 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1769 return dr;
1770 }
1771
1772
1773 inline
1774 const_DataReady_ptr
1775 Data::getReadyPtr() const
1776 {
1777 const_DataReady_ptr dr=boost::dynamic_pointer_cast<const DataReady>(m_data);
1778 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1779 return dr;
1780 }
1781
1782 inline
1783 DataAbstract::ValueType::value_type*
1784 Data::getSampleDataRW(DataAbstract::ValueType::size_type sampleNo)
1785 {
1786 // if (isLazy())
1787 // {
1788 // resolve();
1789 // }
1790 // exclusiveWrite();
1791 if (isLazy())
1792 {
1793 throw DataException("Error, attempt to acquire RW access to lazy data. Please call requireWrite() first.");
1794 }
1795 return getReady()->getSampleDataRW(sampleNo);
1796 }
1797
1798 // inline
1799 // const DataAbstract::ValueType::value_type*
1800 // Data::getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, DataTypes::ValueType* buffer)
1801 // {
1802 // DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
1803 // if (l!=0)
1804 // {
1805 // size_t offset=0;
1806 // if (buffer==NULL)
1807 // {
1808 // throw DataException("Error, attempt to getSampleDataRO for lazy Data with buffer==NULL");
1809 // }
1810 // const DataTypes::ValueType* res=l->resolveSample(*buffer,0,sampleNo,offset);
1811 // return &((*res)[offset]);
1812 // }
1813 // return getReady()->getSampleDataRO(sampleNo);
1814 // }
1815
1816 inline
1817 const DataAbstract::ValueType::value_type*
1818 Data::getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, BufferGroup* bufferg)
1819 {
1820 DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
1821 if (l!=0)
1822 {
1823 size_t offset=0;
1824 if (bufferg==NULL)
1825 {
1826 throw DataException("Error, attempt to getSampleDataRO for lazy Data with buffer==NULL");
1827 }
1828 const DataTypes::ValueType* res=l->resolveSample(*bufferg,sampleNo,offset);
1829 return &((*res)[offset]);
1830 }
1831 return getReady()->getSampleDataRO(sampleNo);
1832 }
1833
1834
1835
1836 /**
1837 Modify a filename for MPI parallel output to multiple files
1838 */
1839 char *Escript_MPI_appendRankToFileName(const char *, int, int);
1840
1841 /**
1842 Binary Data object operators.
1843 */
1844 inline double rpow(double x,double y)
1845 {
1846 return pow(y,x);
1847 }
1848
1849 /**
1850 \brief
1851 Operator+
1852 Takes two Data objects.
1853 */
1854 ESCRIPT_DLL_API Data operator+(const Data& left, const Data& right);
1855
1856 /**
1857 \brief
1858 Operator-
1859 Takes two Data objects.
1860 */
1861 ESCRIPT_DLL_API Data operator-(const Data& left, const Data& right);
1862
1863 /**
1864 \brief
1865 Operator*
1866 Takes two Data objects.
1867 */
1868 ESCRIPT_DLL_API Data operator*(const Data& left, const Data& right);
1869
1870 /**
1871 \brief
1872 Operator/
1873 Takes two Data objects.
1874 */
1875 ESCRIPT_DLL_API Data operator/(const Data& left, const Data& right);
1876
1877 /**
1878 \brief
1879 Operator+
1880 Takes LHS Data object and RHS python::object.
1881 python::object must be convertable to Data type.
1882 */
1883 ESCRIPT_DLL_API Data operator+(const Data& left, const boost::python::object& right);
1884
1885 /**
1886 \brief
1887 Operator-
1888 Takes LHS Data object and RHS python::object.
1889 python::object must be convertable to Data type.
1890 */
1891 ESCRIPT_DLL_API Data operator-(const Data& left, const boost::python::object& right);
1892
1893 /**
1894 \brief
1895 Operator*
1896 Takes LHS Data object and RHS python::object.
1897 python::object must be convertable to Data type.
1898 */
1899 ESCRIPT_DLL_API Data operator*(const Data& left, const boost::python::object& right);
1900
1901 /**
1902 \brief
1903 Operator/
1904 Takes LHS Data object and RHS python::object.
1905 python::object must be convertable to Data type.
1906 */
1907 ESCRIPT_DLL_API Data operator/(const Data& left, const boost::python::object& right);
1908
1909 /**
1910 \brief
1911 Operator+
1912 Takes LHS python::object and RHS Data object.
1913 python::object must be convertable to Data type.
1914 */
1915 ESCRIPT_DLL_API Data operator+(const boost::python::object& left, const Data& right);
1916
1917 /**
1918 \brief
1919 Operator-
1920 Takes LHS python::object and RHS Data object.
1921 python::object must be convertable to Data type.
1922 */
1923 ESCRIPT_DLL_API Data operator-(const boost::python::object& left, const Data& right);
1924
1925 /**
1926 \brief
1927 Operator*
1928 Takes LHS python::object and RHS Data object.
1929 python::object must be convertable to Data type.
1930 */
1931 ESCRIPT_DLL_API Data operator*(const boost::python::object& left, const Data& right);
1932
1933 /**
1934 \brief
1935 Operator/
1936 Takes LHS python::object and RHS Data object.
1937 python::object must be convertable to Data type.
1938 */
1939 ESCRIPT_DLL_API Data operator/(const boost::python::object& left, const Data& right);
1940
1941
1942
1943 /**
1944 \brief
1945 Output operator
1946 */
1947 ESCRIPT_DLL_API std::ostream& operator<<(std::ostream& o, const Data& data);
1948
1949 /**
1950 \brief
1951 Compute a tensor product of two Data objects
1952 \param arg0 - Input - Data object
1953 \param arg1 - Input - Data object
1954 \param axis_offset - Input - axis offset
1955 \param transpose - Input - 0: transpose neither, 1: transpose arg0, 2: transpose arg1
1956 */
1957 ESCRIPT_DLL_API
1958 Data
1959 C_GeneralTensorProduct(Data& arg0,
1960 Data& arg1,
1961 int axis_offset=0,
1962 int transpose=0);
1963
1964 /**
1965 \brief
1966 Perform the given binary operation with this and right as operands.
1967 Right is a Data object.
1968 */
1969 template <class BinaryFunction>
1970 inline
1971 void
1972 Data::binaryOp(const Data& right,
1973 BinaryFunction operation)
1974 {
1975 //
1976 // if this has a rank of zero promote it to the rank of the RHS
1977 if (getDataPointRank()==0 && right.getDataPointRank()!=0) {
1978 throw DataException("Error - attempt to update rank zero object with object with rank bigger than zero.");
1979 }
1980
1981 if (isLazy() || right.isLazy())
1982 {
1983 throw DataException("Programmer error - attempt to call binaryOp with Lazy Data.");
1984 }
1985 //
1986 // initially make the temporary a shallow copy
1987 Data tempRight(right);
1988
1989 if (getFunctionSpace()!=right.getFunctionSpace()) {
1990 if (right.probeInterpolation(getFunctionSpace())) {
1991 //
1992 // an interpolation is required so create a new Data
1993 tempRight=Data(right,this->getFunctionSpace());
1994 } else if (probeInterpolation(right.getFunctionSpace())) {
1995 //
1996 // interpolate onto the RHS function space
1997 Data tempLeft(*this,right.getFunctionSpace());
1998 // m_data=tempLeft.m_data;
1999 set_m_data(tempLeft.m_data);
2000 }
2001 }
2002 operandCheck(tempRight);
2003 //
2004 // ensure this has the right type for the RHS
2005 typeMatchRight(tempRight);
2006 //
2007 // Need to cast to the concrete types so that the correct binaryOp
2008 // is called.
2009 if (isExpanded()) {
2010 //
2011 // Expanded data will be done in parallel, the right hand side can be
2012 // of any data type
2013 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2014 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2015 escript::binaryOp(*leftC,*(tempRight.getReady()),operation);
2016 } else if (isTagged()) {
2017 //
2018 // Tagged data is operated on serially, the right hand side can be
2019 // either DataConstant or DataTagged
2020 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2021 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2022 if (right.isTagged()) {
2023 DataTagged* rightC=dynamic_cast<DataTagged*>(tempRight.m_data.get());
2024 EsysAssert((rightC!=0), "Programming error - casting to DataTagged.");
2025 escript::binaryOp(*leftC,*rightC,operation);
2026 } else {
2027 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2028 EsysAssert((rightC!=0), "Programming error - casting to DataConstant.");
2029 escript::binaryOp(*leftC,*rightC,operation);
2030 }
2031 } else if (isConstant()) {
2032 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2033 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2034 EsysAssert((leftC!=0 && rightC!=0), "Programming error - casting to DataConstant.");
2035 escript::binaryOp(*leftC,*rightC,operation);
2036 }
2037 }
2038
2039 /**
2040 \brief
2041 Perform the given Data object reduction algorithm on this and return the result.
2042 Given operation combines each element of each data point, thus argument
2043 object (*this) is a rank n Data object, and returned object is a scalar.
2044 Calls escript::algorithm.
2045 */
2046 template <class BinaryFunction>
2047 inline
2048 double
2049 Data::algorithm(BinaryFunction operation, double initial_value) const
2050 {
2051 if (isExpanded()) {
2052 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2053 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2054 return escript::algorithm(*leftC,operation,initial_value);
2055 } else if (isTagged()) {
2056 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2057 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2058 return escript::algorithm(*leftC,operation,initial_value);
2059 } else if (isConstant()) {
2060 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2061 EsysAssert((leftC!=0), "Programming error - casting to DataConstant.");
2062 return escript::algorithm(*leftC,operation,initial_value);
2063 } else if (isEmpty()) {
2064 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2065 } else if (isLazy()) {
2066 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2067 } else {
2068 throw DataException("Error - Data encapsulates an unknown type.");
2069 }
2070 }
2071
2072 /**
2073 \brief
2074 Perform the given data point reduction algorithm on data and return the result.
2075 Given operation combines each element within each data point into a scalar,
2076 thus argument object is a rank n Data object, and returned object is a
2077 rank 0 Data object.
2078 Calls escript::dp_algorithm.
2079 */
2080 template <class BinaryFunction>
2081 inline
2082 Data
2083 Data::dp_algorithm(BinaryFunction operation, double initial_value) const
2084 {
2085 if (isEmpty()) {
2086 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2087 }
2088 else if (isExpanded()) {
2089 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2090 DataExpanded* dataE=dynamic_cast<DataExpanded*>(m_data.get());
2091 DataExpanded* resultE=dynamic_cast<DataExpanded*>(result.m_data.get());
2092 EsysAssert((dataE!=0), "Programming error - casting data to DataExpanded.");
2093 EsysAssert((resultE!=0), "Programming error - casting result to DataExpanded.");
2094 escript::dp_algorithm(*dataE,*resultE,operation,initial_value);
2095 return result;
2096 }
2097 else if (isTagged()) {
2098 DataTagged* dataT=dynamic_cast<DataTagged*>(m_data.get());
2099 EsysAssert((dataT!=0), "Programming error - casting data to DataTagged.");
2100 DataTypes::ValueType defval(1);
2101 defval[0]=0;
2102 DataTagged* resultT=new DataTagged(getFunctionSpace(), DataTypes::scalarShape, defval, dataT);
2103 escript::dp_algorithm(*dataT,*resultT,operation,initial_value);
2104 return Data(resultT); // note: the Data object now owns the resultT pointer
2105 }
2106 else if (isConstant()) {
2107 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2108 DataConstant* dataC=dynamic_cast<DataConstant*>(m_data.get());
2109 DataConstant* resultC=dynamic_cast<DataConstant*>(result.m_data.get());
2110 EsysAssert((dataC!=0), "Programming error - casting data to DataConstant.");
2111 EsysAssert((resultC!=0), "Programming error - casting result to DataConstant.");
2112 escript::dp_algorithm(*dataC,*resultC,operation,initial_value);
2113 return result;
2114 } else if (isLazy()) {
2115 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2116 } else {
2117 throw DataException("Error - Data encapsulates an unknown type.");
2118 }
2119 }
2120
2121 /**
2122 \brief
2123 Compute a tensor operation with two Data objects
2124 \param arg0 - Input - Data object
2125 \param arg1 - Input - Data object
2126 \param operation - Input - Binary op functor
2127 */
2128 template <typename BinaryFunction>
2129 inline
2130 Data
2131 C_TensorBinaryOperation(Data const &arg_0,
2132 Data const &arg_1,
2133 BinaryFunction operation)
2134 {
2135 if (arg_0.isEmpty() || arg_1.isEmpty())
2136 {
2137 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2138 }
2139 if (arg_0.isLazy() || arg_1.isLazy())
2140 {
2141 throw DataException("Error - Operations not permitted on lazy data.");
2142 }
2143 // Interpolate if necessary and find an appropriate function space
2144 Data arg_0_Z, arg_1_Z;
2145 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2146 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2147 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2148 arg_1_Z = Data(arg_1);
2149 }
2150 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2151 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2152 arg_0_Z =Data(arg_0);
2153 }
2154 else {
2155 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible function spaces.");
2156 }
2157 } else {
2158 arg_0_Z = Data(arg_0);
2159 arg_1_Z = Data(arg_1);
2160 }
2161 // Get rank and shape of inputs
2162 int rank0 = arg_0_Z.getDataPointRank();
2163 int rank1 = arg_1_Z.getDataPointRank();
2164 DataTypes::ShapeType shape0 = arg_0_Z.getDataPointShape();
2165 DataTypes::ShapeType shape1 = arg_1_Z.getDataPointShape();
2166 int size0 = arg_0_Z.getDataPointSize();
2167 int size1 = arg_1_Z.getDataPointSize();
2168 // Declare output Data object
2169 Data res;
2170
2171 if (shape0 == shape1) {
2172 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2173 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2174 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2175 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2176 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2177
2178 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2179 }
2180 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2181
2182 // Prepare the DataConstant input
2183 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2184
2185 // Borrow DataTagged input from Data object
2186 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2187
2188 // Prepare a DataTagged output 2
2189 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2190 res.tag();
2191 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2192
2193 // Prepare offset into DataConstant
2194 int offset_0 = tmp_0->getPointOffset(0,0);
2195 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2196
2197 // Get the pointers to the actual data
2198 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2199 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2200
2201 // Compute a result for the default
2202 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2203 // Compute a result for each tag
2204 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2205 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2206 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2207 tmp_2->addTag(i->first);
2208 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2209 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2210
2211 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2212 }
2213
2214 }
2215 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2216 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2217 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2218 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2219 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2220
2221 int sampleNo_1,dataPointNo_1;
2222 int numSamples_1 = arg_1_Z.getNumSamples();
2223 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2224 int offset_0 = tmp_0->getPointOffset(0,0);
2225 res.requireWrite();
2226 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2227 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2228 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2229 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2230 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2231 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2232 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2233 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2234 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2235 }
2236 }
2237
2238 }
2239 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2240 // Borrow DataTagged input from Data object
2241 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2242
2243 // Prepare the DataConstant input
2244 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2245
2246 // Prepare a DataTagged output 2
2247 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2248 res.tag();
2249 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2250
2251 // Prepare offset into DataConstant
2252 int offset_1 = tmp_1->getPointOffset(0,0);
2253
2254 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2255 // Get the pointers to the actual data
2256 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2257 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2258 // Compute a result for the default
2259 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2260 // Compute a result for each tag
2261 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2262 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2263 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2264 tmp_2->addTag(i->first);
2265 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2266 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2267 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2268 }
2269
2270 }
2271 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2272 // Borrow DataTagged input from Data object
2273 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2274
2275 // Borrow DataTagged input from Data object
2276 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2277
2278 // Prepare a DataTagged output 2
2279 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2280 res.tag(); // DataTagged output
2281 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2282
2283 // Get the pointers to the actual data
2284 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2285 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2286 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2287
2288 // Compute a result for the default
2289 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2290 // Merge the tags
2291 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2292 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2293 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2294 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2295 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2296 }
2297 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2298 tmp_2->addTag(i->first);
2299 }
2300 // Compute a result for each tag
2301 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2302 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2303
2304 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2305 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2306 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2307
2308 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2309 }
2310
2311 }
2312 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2313 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2314 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2315 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2316 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2317 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2318
2319 int sampleNo_0,dataPointNo_0;
2320 int numSamples_0 = arg_0_Z.getNumSamples();
2321 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2322 res.requireWrite();
2323 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2324 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2325 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2326 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2327 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2328 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2329 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2330 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2331 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2332 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2333 }
2334 }
2335
2336 }
2337 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2338 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2339 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2340 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2341 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2342
2343 int sampleNo_0,dataPointNo_0;
2344 int numSamples_0 = arg_0_Z.getNumSamples();
2345 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2346 int offset_1 = tmp_1->getPointOffset(0,0);
2347 res.requireWrite();
2348 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2349 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2350 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2351 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2352 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2353
2354 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2355 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2356 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2357
2358
2359 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2360 }
2361 }
2362
2363 }
2364 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2365 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2366 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2367 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2368 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2369 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2370
2371 int sampleNo_0,dataPointNo_0;
2372 int numSamples_0 = arg_0_Z.getNumSamples();
2373 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2374 res.requireWrite();
2375 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2376 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2377 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2378 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2379 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2380 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2381 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2382 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2383 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2384 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2385 }
2386 }
2387
2388 }
2389 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2390 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2391 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2392 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2393 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2394 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2395
2396 int sampleNo_0,dataPointNo_0;
2397 int numSamples_0 = arg_0_Z.getNumSamples();
2398 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2399 res.requireWrite();
2400 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2401 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2402 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2403 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2404 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2405 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2406 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2407 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2408 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2409 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2410 }
2411 }
2412
2413 }
2414 else {
2415 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2416 }
2417
2418 } else if (0 == rank0) {
2419 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2420 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataConstant output
2421 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2422 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2423 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2424 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2425 }
2426 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2427
2428 // Prepare the DataConstant input
2429 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2430
2431 // Borrow DataTagged input from Data object
2432 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2433
2434 // Prepare a DataTagged output 2
2435 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataTagged output
2436 res.tag();
2437 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2438
2439 // Prepare offset into DataConstant
2440 int offset_0 = tmp_0->getPointOffset(0,0);
2441 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2442
2443 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2444 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2445
2446 // Compute a result for the default
2447 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2448 // Compute a result for each tag
2449 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2450 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2451 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2452 tmp_2->addTag(i->first);
2453 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2454 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2455 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2456 }
2457
2458 }
2459 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2460
2461 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2462 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2463 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2464 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2465
2466 int sampleNo_1,dataPointNo_1;
2467 int numSamples_1 = arg_1_Z.getNumSamples();
2468 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2469 int offset_0 = tmp_0->getPointOffset(0,0);
2470 res.requireWrite();
2471 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2472 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2473 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2474 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2475 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2476 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2477 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2478 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2479 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2480
2481 }
2482 }
2483
2484 }
2485 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2486
2487 // Borrow DataTagged input from Data object
2488 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2489
2490 // Prepare the DataConstant input
2491 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2492
2493 // Prepare a DataTagged output 2
2494 res = Data(0.0, shape1, arg_0_Z.getFunctionSpace()); // DataTagged output
2495 res.tag();
2496 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2497
2498 // Prepare offset into DataConstant
2499 int offset_1 = tmp_1->getPointOffset(0,0);
2500 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2501
2502 // Get the pointers to the actual data
2503 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2504 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2505
2506
2507 // Compute a result for the default
2508 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2509 // Compute a result for each tag
2510 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2511 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2512 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2513 tmp_2->addTag(i->first);
2514 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2515 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2516
2517 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2518 }
2519
2520 }
2521 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2522
2523 // Borrow DataTagged input from Data object
2524 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2525
2526 // Borrow DataTagged input from Data object
2527 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2528
2529 // Prepare a DataTagged output 2
2530 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());
2531 res.tag(); // DataTagged output
2532 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2533
2534 // Get the pointers to the actual data
2535 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2536 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2537 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2538
2539 // Compute a result for the default
2540 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2541 // Merge the tags
2542 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2543 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2544 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2545 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2546 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2547 }
2548 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2549 tmp_2->addTag(i->first);
2550 }
2551 // Compute a result for each tag
2552 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2553 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2554 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2555 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2556 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2557
2558 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2559 }
2560
2561 }
2562 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2563
2564 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2565 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2566 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2567 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2568 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2569
2570 int sampleNo_0,dataPointNo_0;
2571 int numSamples_0 = arg_0_Z.getNumSamples();
2572 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2573 res.requireWrite();
2574 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2575 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2576 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2577 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2578 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2579 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2580 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2581 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2582 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2583 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2584 }
2585 }
2586
2587 }
2588 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2589 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2590 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2591 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2592 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2593
2594 int sampleNo_0,dataPointNo_0;
2595 int numSamples_0 = arg_0_Z.getNumSamples();
2596 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2597 int offset_1 = tmp_1->getPointOffset(0,0);
2598 res.requireWrite();
2599 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2600 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2601 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2602 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2603 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2604 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2605 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2606 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2607 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2608 }
2609 }
2610
2611
2612 }
2613 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2614
2615 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2616 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2617 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2618 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2619 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2620
2621 int sampleNo_0,dataPointNo_0;
2622 int numSamples_0 = arg_0_Z.getNumSamples();
2623 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2624 res.requireWrite();
2625 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2626 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2627 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2628 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2629 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2630 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2631 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2632 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2633 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2634 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2635 }
2636 }
2637
2638 }
2639 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2640
2641 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2642 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2643 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2644 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2645 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2646
2647 int sampleNo_0,dataPointNo_0;
2648 int numSamples_0 = arg_0_Z.getNumSamples();
2649 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2650 res.requireWrite();
2651 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2652 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2653 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2654 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2655 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2656 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2657 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2658 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2659 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2660 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2661 }
2662 }
2663
2664 }
2665 else {
2666 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2667 }
2668
2669 } else if (0 == rank1) {
2670 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2671 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2672 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2673 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2674 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2675 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2676 }
2677 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2678
2679 // Prepare the DataConstant input
2680 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2681
2682 // Borrow DataTagged input from Data object
2683 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2684
2685 // Prepare a DataTagged output 2
2686 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2687 res.tag();
2688 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2689
2690 // Prepare offset into DataConstant
2691 int offset_0 = tmp_0->getPointOffset(0,0);
2692 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2693
2694 //Get the pointers to the actual data
2695 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2696 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2697
2698 // Compute a result for the default
2699 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2700 // Compute a result for each tag
2701 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2702 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2703 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2704 tmp_2->addTag(i->first);
2705 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2706 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2707 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2708 }
2709 }
2710 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2711
2712 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2713 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2714 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2715 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2716
2717 int sampleNo_1,dataPointNo_1;
2718 int numSamples_1 = arg_1_Z.getNumSamples();
2719 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2720 int offset_0 = tmp_0->getPointOffset(0,0);
2721 res.requireWrite();
2722 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2723 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2724 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2725 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2726 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2727 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2728 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2729 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2730 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2731 }
2732 }
2733
2734 }
2735 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2736
2737 // Borrow DataTagged input from Data object
2738 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2739
2740 // Prepare the DataConstant input
2741 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2742
2743 // Prepare a DataTagged output 2
2744 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2745 res.tag();
2746 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2747
2748 // Prepare offset into DataConstant
2749 int offset_1 = tmp_1->getPointOffset(0,0);
2750 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2751 // Get the pointers to the actual data
2752 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2753 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2754 // Compute a result for the default
2755 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2756 // Compute a result for each tag
2757 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2758 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2759 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2760 tmp_2->addTag(i->first);
2761 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2762 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2763 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2764 }
2765
2766 }
2767 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2768
2769 // Borrow DataTagged input from Data object
2770 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2771
2772 // Borrow DataTagged input from Data object
2773 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2774
2775 // Prepare a DataTagged output 2
2776 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2777 res.tag(); // DataTagged output
2778 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2779
2780 // Get the pointers to the actual data
2781 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2782 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2783 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2784
2785 // Compute a result for the default
2786 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2787 // Merge the tags
2788 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2789 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2790 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2791 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2792 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2793 }
2794 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2795 tmp_2->addTag(i->first);
2796 }
2797 // Compute a result for each tag
2798 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2799 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2800 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2801 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2802 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2803 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2804 }
2805
2806 }
2807 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2808
2809 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2810 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2811 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2812 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2813 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2814
2815 int sampleNo_0,dataPointNo_0;
2816 int numSamples_0 = arg_0_Z.getNumSamples();
2817 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2818 res.requireWrite();
2819 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2820 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2821 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2822 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2823 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2824 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2825 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2826 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2827 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2828 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2829 }
2830 }
2831
2832 }
2833 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2834 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2835 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2836 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2837 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2838
2839 int sampleNo_0,dataPointNo_0;
2840 int numSamples_0 = arg_0_Z.getNumSamples();
2841 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2842 int offset_1 = tmp_1->getPointOffset(0,0);
2843 res.requireWrite();
2844 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2845 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2846 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2847 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2848 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2849 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2850 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2851 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2852 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2853 }
2854 }
2855
2856
2857 }
2858 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2859
2860 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2861 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2862 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2863 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2864 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2865
2866 int sampleNo_0,dataPointNo_0;
2867 int numSamples_0 = arg_0_Z.getNumSamples();
2868 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2869 res.requireWrite();
2870 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2871 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2872 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2873 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2874 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2875 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2876 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2877 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2878 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2879 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2880 }
2881 }
2882
2883 }
2884 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2885
2886 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2887 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2888 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2889 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2890 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2891
2892 int sampleNo_0,dataPointNo_0;
2893 int numSamples_0 = arg_0_Z.getNumSamples();
2894 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2895 res.requireWrite();
2896 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2897 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2898 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2899 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2900 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2901 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2902 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2903 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2904 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2905 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2906 }
2907 }
2908
2909 }
2910 else {
2911 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2912 }
2913
2914 } else {
2915 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible shapes");
2916 }
2917
2918 return res;
2919 }
2920
2921 template <typename UnaryFunction>
2922 Data
2923 C_TensorUnaryOperation(Data const &arg_0,
2924 UnaryFunction operation)
2925 {
2926 if (arg_0.isEmpty()) // do this before we attempt to interpolate
2927 {
2928 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2929 }
2930 if (arg_0.isLazy())
2931 {
2932 throw DataException("Error - Operations not permitted on lazy data.");
2933 }
2934 // Interpolate if necessary and find an appropriate function space
2935 Data arg_0_Z = Data(arg_0);
2936
2937 // Get rank and shape of inputs
2938 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
2939 int size0 = arg_0_Z.getDataPointSize();
2940
2941 // Declare output Data object
2942 Data res;
2943
2944 if (arg_0_Z.isConstant()) {
2945 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataConstant output
2946 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2947 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2948 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2949 }
2950 else if (arg_0_Z.isTagged()) {
2951
2952 // Borrow DataTagged input from Data object
2953 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2954
2955 // Prepare a DataTagged output 2
2956 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2957 res.tag();
2958 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2959
2960 // Get the pointers to the actual data
2961 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2962 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2963 // Compute a result for the default
2964 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2965 // Compute a result for each tag
2966 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2967 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2968 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2969 tmp_2->addTag(i->first);
2970 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2971 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2972 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2973 }
2974
2975 }
2976 else if (arg_0_Z.isExpanded()) {
2977
2978 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace(),true); // DataExpanded output
2979 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2980 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2981
2982 int sampleNo_0,dataPointNo_0;
2983 int numSamples_0 = arg_0_Z.getNumSamples();
2984 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2985 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2986 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2987 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2988 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2989 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2990 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2991 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2992 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2993 }
2994 }
2995 }
2996 else {
2997 throw DataException("Error - C_TensorUnaryOperation: unknown combination of inputs");
2998 }
2999
3000 return res;
3001 }
3002
3003 }
3004 #endif

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26