/[escript]/trunk/escript/src/Data.h
ViewVC logotype

Contents of /trunk/escript/src/Data.h

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2721 - (show annotations)
Fri Oct 16 05:40:12 2009 UTC (10 years, 8 months ago) by jfenwick
File MIME type: text/plain
File size: 92590 byte(s)
minval and maxval are now lazy operations (they weren't before).
Whether or not Lsup, sup and inf resolve their arguments before computing answers is controlled by the escriptParam 'RESOLVE_COLLECTIVE'.
Note: integrate() still forces a resolve.

Added some unit tests for operations which weren't tested before.
Added deepcopy implementations for lazy operations which got missed somehow.
1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2009 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 /** \file Data.h */
16
17 #ifndef DATA_H
18 #define DATA_H
19 #include "system_dep.h"
20
21 #include "DataTypes.h"
22 #include "DataAbstract.h"
23 #include "DataAlgorithm.h"
24 #include "FunctionSpace.h"
25 #include "BinaryOp.h"
26 #include "UnaryOp.h"
27 #include "DataException.h"
28
29
30 extern "C" {
31 #include "DataC.h"
32 //#include <omp.h>
33 }
34
35 #include "esysmpi.h"
36 #include <string>
37 #include <algorithm>
38 #include <sstream>
39
40 #include <boost/shared_ptr.hpp>
41 #include <boost/python/object.hpp>
42 #include <boost/python/tuple.hpp>
43
44 #include "BufferGroup.h"
45
46 namespace escript {
47
48 //
49 // Forward declaration for various implementations of Data.
50 class DataConstant;
51 class DataTagged;
52 class DataExpanded;
53 class DataLazy;
54
55 /**
56 \brief
57 Data represents a collection of datapoints.
58
59 Description:
60 Internally, the datapoints are actually stored by a DataAbstract object.
61 The specific instance of DataAbstract used may vary over the lifetime
62 of the Data object.
63 Some methods on this class return references (eg getShape()).
64 These references should not be used after an operation which changes the underlying DataAbstract object.
65 Doing so will lead to invalid memory access.
66 This should not affect any methods exposed via boost::python.
67 */
68 class Data {
69
70 public:
71
72 // These typedefs allow function names to be cast to pointers
73 // to functions of the appropriate type when calling unaryOp etc.
74 typedef double (*UnaryDFunPtr)(double);
75 typedef double (*BinaryDFunPtr)(double,double);
76
77
78 /**
79 Constructors.
80 */
81
82 /**
83 \brief
84 Default constructor.
85 Creates a DataEmpty object.
86 */
87 ESCRIPT_DLL_API
88 Data();
89
90 /**
91 \brief
92 Copy constructor.
93 WARNING: Only performs a shallow copy.
94 */
95 ESCRIPT_DLL_API
96 Data(const Data& inData);
97
98 /**
99 \brief
100 Constructor from another Data object. If "what" is different from the
101 function space of inData the inData are tried to be interpolated to what,
102 otherwise a shallow copy of inData is returned.
103 */
104 ESCRIPT_DLL_API
105 Data(const Data& inData,
106 const FunctionSpace& what);
107
108 /**
109 \brief Copy Data from an existing vector
110 */
111
112 ESCRIPT_DLL_API
113 Data(const DataTypes::ValueType& value,
114 const DataTypes::ShapeType& shape,
115 const FunctionSpace& what=FunctionSpace(),
116 bool expanded=false);
117
118 /**
119 \brief
120 Constructor which creates a Data with points having the specified shape.
121
122 \param value - Input - Single value applied to all Data.
123 \param dataPointShape - Input - The shape of each data point.
124 \param what - Input - A description of what this data represents.
125 \param expanded - Input - Flag, if true fill the entire container with
126 the given value. Otherwise a more efficient storage
127 mechanism will be used.
128 */
129 ESCRIPT_DLL_API
130 Data(double value,
131 const DataTypes::ShapeType& dataPointShape=DataTypes::ShapeType(),
132 const FunctionSpace& what=FunctionSpace(),
133 bool expanded=false);
134
135 /**
136 \brief
137 Constructor which performs a deep copy of a region from another Data object.
138
139 \param inData - Input - Input Data object.
140 \param region - Input - Region to copy.
141 */
142 ESCRIPT_DLL_API
143 Data(const Data& inData,
144 const DataTypes::RegionType& region);
145
146 /**
147 \brief
148 Constructor which copies data from any object that can be treated like a python array/sequence.
149
150 \param value - Input - Input data.
151 \param what - Input - A description of what this data represents.
152 \param expanded - Input - Flag, if true fill the entire container with
153 the value. Otherwise a more efficient storage
154 mechanism will be used.
155 */
156 ESCRIPT_DLL_API
157 Data(const boost::python::object& value,
158 const FunctionSpace& what=FunctionSpace(),
159 bool expanded=false);
160
161 /**
162 \brief
163 Constructor which creates a DataConstant.
164 Copies data from any object that can be treated like a python array/sequence.
165 All other parameters are copied from other.
166
167 \param value - Input - Input data.
168 \param other - Input - contains all other parameters.
169 */
170 ESCRIPT_DLL_API
171 Data(const boost::python::object& value,
172 const Data& other);
173
174 /**
175 \brief
176 Constructor which creates a DataConstant of "shape" with constant value.
177 */
178 ESCRIPT_DLL_API
179 Data(double value,
180 const boost::python::tuple& shape=boost::python::make_tuple(),
181 const FunctionSpace& what=FunctionSpace(),
182 bool expanded=false);
183
184
185
186 /**
187 \brief Create a Data using an existing DataAbstract. Warning: The new object assumes ownership of the pointer!
188 Once you have passed the pointer, do not delete it.
189 */
190 ESCRIPT_DLL_API
191 explicit Data(DataAbstract* underlyingdata);
192
193 /**
194 \brief Create a Data based on the supplied DataAbstract
195 */
196 ESCRIPT_DLL_API
197 explicit Data(DataAbstract_ptr underlyingdata);
198
199 /**
200 \brief
201 Destructor
202 */
203 ESCRIPT_DLL_API
204 ~Data();
205
206 /**
207 \brief Make this object a deep copy of "other".
208 */
209 ESCRIPT_DLL_API
210 void
211 copy(const Data& other);
212
213 /**
214 \brief Return a pointer to a deep copy of this object.
215 */
216 ESCRIPT_DLL_API
217 Data
218 copySelf();
219
220
221 /**
222 \brief produce a delayed evaluation version of this Data.
223 */
224 ESCRIPT_DLL_API
225 Data
226 delay();
227
228 /**
229 \brief convert the current data into lazy data.
230 */
231 ESCRIPT_DLL_API
232 void
233 delaySelf();
234
235
236 /**
237 Member access methods.
238 */
239
240 /**
241 \brief
242 switches on update protection
243
244 */
245 ESCRIPT_DLL_API
246 void
247 setProtection();
248
249 /**
250 \brief
251 Returns true, if the data object is protected against update
252
253 */
254 ESCRIPT_DLL_API
255 bool
256 isProtected() const;
257
258
259 /**
260 \brief
261 Return the value of a data point as a python tuple.
262 */
263 ESCRIPT_DLL_API
264 const boost::python::object
265 getValueOfDataPointAsTuple(int dataPointNo);
266
267 /**
268 \brief
269 sets the values of a data-point from a python object on this process
270 */
271 ESCRIPT_DLL_API
272 void
273 setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object);
274
275 /**
276 \brief
277 sets the values of a data-point from a array-like object on this process
278 */
279 ESCRIPT_DLL_API
280 void
281 setValueOfDataPointToArray(int dataPointNo, const boost::python::object&);
282
283 /**
284 \brief
285 sets the values of a data-point on this process
286 */
287 ESCRIPT_DLL_API
288 void
289 setValueOfDataPoint(int dataPointNo, const double);
290
291 /**
292 \brief Return a data point across all processors as a python tuple.
293 */
294 ESCRIPT_DLL_API
295 const boost::python::object
296 getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo);
297
298 /**
299 \brief
300 Return the tag number associated with the given data-point.
301
302 */
303 ESCRIPT_DLL_API
304 int
305 getTagNumber(int dpno);
306
307 /**
308 \brief
309 Return the C wrapper for the Data object.
310 */
311 ESCRIPT_DLL_API
312 escriptDataC
313 getDataC();
314
315
316
317 /**
318 \brief
319 Return the C wrapper for the Data object - const version.
320 */
321 ESCRIPT_DLL_API
322 escriptDataC
323 getDataC() const;
324
325 /**
326 \brief How much space is required to evaulate a sample of the Data.
327 */
328 ESCRIPT_DLL_API
329 size_t
330 getSampleBufferSize() const;
331
332
333
334 /**
335 \brief
336 Write the data as a string. For large amounts of data, a summary is printed.
337 */
338 ESCRIPT_DLL_API
339 std::string
340 toString() const;
341
342 /**
343 \brief
344 Whatever the current Data type make this into a DataExpanded.
345 */
346 ESCRIPT_DLL_API
347 void
348 expand();
349
350 /**
351 \brief
352 If possible convert this Data to DataTagged. This will only allow
353 Constant data to be converted to tagged. An attempt to convert
354 Expanded data to tagged will throw an exception.
355 */
356 ESCRIPT_DLL_API
357 void
358 tag();
359
360 /**
361 \brief If this data is lazy, then convert it to ready data.
362 What type of ready data depends on the expression. For example, Constant+Tagged==Tagged.
363 */
364 ESCRIPT_DLL_API
365 void
366 resolve();
367
368
369 /**
370 \brief Ensures data is ready for write access.
371 This means that the data will be resolved if lazy and will be copied if shared with another Data object.
372 \warning This method should only be called in single threaded sections of code. (It modifies m_data).
373 Do not create any Data objects from this one between calling requireWrite and getSampleDataRW.
374 Doing so might introduce additional sharing.
375 */
376 ESCRIPT_DLL_API
377 void
378 requireWrite();
379
380 /**
381 \brief
382 Return true if this Data is expanded.
383 \note To determine if a sample will contain separate values for each datapoint. Use actsExpanded instead.
384 */
385 ESCRIPT_DLL_API
386 bool
387 isExpanded() const;
388
389 /**
390 \brief
391 Return true if this Data is expanded or resolves to expanded.
392 That is, if it has a separate value for each datapoint in the sample.
393 */
394 ESCRIPT_DLL_API
395 bool
396 actsExpanded() const;
397
398
399 /**
400 \brief
401 Return true if this Data is tagged.
402 */
403 ESCRIPT_DLL_API
404 bool
405 isTagged() const;
406
407 /**
408 \brief
409 Return true if this Data is constant.
410 */
411 ESCRIPT_DLL_API
412 bool
413 isConstant() const;
414
415 /**
416 \brief Return true if this Data is lazy.
417 */
418 ESCRIPT_DLL_API
419 bool
420 isLazy() const;
421
422 /**
423 \brief Return true if this data is ready.
424 */
425 ESCRIPT_DLL_API
426 bool
427 isReady() const;
428
429 /**
430 \brief
431 Return true if this Data holds an instance of DataEmpty. This is _not_ the same as asking if the object
432 contains datapoints.
433 */
434 ESCRIPT_DLL_API
435 bool
436 isEmpty() const;
437
438 /**
439 \brief
440 Return the function space.
441 */
442 ESCRIPT_DLL_API
443 inline
444 const FunctionSpace&
445 getFunctionSpace() const
446 {
447 return m_data->getFunctionSpace();
448 }
449
450 /**
451 \brief
452 Return a copy of the function space.
453 */
454 ESCRIPT_DLL_API
455 const FunctionSpace
456 getCopyOfFunctionSpace() const;
457
458 /**
459 \brief
460 Return the domain.
461 */
462 ESCRIPT_DLL_API
463 inline
464 // const AbstractDomain&
465 const_Domain_ptr
466 getDomain() const
467 {
468 return getFunctionSpace().getDomain();
469 }
470
471
472 /**
473 \brief
474 Return the domain.
475 TODO: For internal use only. This should be removed.
476 */
477 ESCRIPT_DLL_API
478 inline
479 // const AbstractDomain&
480 Domain_ptr
481 getDomainPython() const
482 {
483 return getFunctionSpace().getDomainPython();
484 }
485
486 /**
487 \brief
488 Return a copy of the domain.
489 */
490 ESCRIPT_DLL_API
491 const AbstractDomain
492 getCopyOfDomain() const;
493
494 /**
495 \brief
496 Return the rank of the point data.
497 */
498 ESCRIPT_DLL_API
499 inline
500 unsigned int
501 getDataPointRank() const
502 {
503 return m_data->getRank();
504 }
505
506 /**
507 \brief
508 Return the number of data points
509 */
510 ESCRIPT_DLL_API
511 inline
512 int
513 getNumDataPoints() const
514 {
515 return getNumSamples() * getNumDataPointsPerSample();
516 }
517 /**
518 \brief
519 Return the number of samples.
520 */
521 ESCRIPT_DLL_API
522 inline
523 int
524 getNumSamples() const
525 {
526 return m_data->getNumSamples();
527 }
528
529 /**
530 \brief
531 Return the number of data points per sample.
532 */
533 ESCRIPT_DLL_API
534 inline
535 int
536 getNumDataPointsPerSample() const
537 {
538 return m_data->getNumDPPSample();
539 }
540
541
542 /**
543 \brief
544 Return the number of values in the shape for this object.
545 */
546 ESCRIPT_DLL_API
547 int
548 getNoValues() const
549 {
550 return m_data->getNoValues();
551 }
552
553
554 /**
555 \brief
556 dumps the object into a netCDF file
557 */
558 ESCRIPT_DLL_API
559 void
560 dump(const std::string fileName) const;
561
562 /**
563 \brief returns the values of the object as a list of tuples (one for each datapoint).
564
565 \param scalarastuple If true, scalar data will produce single valued tuples [(1,) (2,) ...]
566 If false, the result is a list of scalars [1, 2, ...]
567 */
568 ESCRIPT_DLL_API
569 const boost::python::object
570 toListOfTuples(bool scalarastuple=true);
571
572
573 /**
574 \brief
575 Return the sample data for the given sample no. This is not the
576 preferred interface but is provided for use by C code.
577 The bufferg parameter is only required for LazyData.
578 \param sampleNo - Input - the given sample no.
579 \param bufferg - A buffer to compute (and store) sample data in will be selected from this group.
580 \return pointer to the sample data.
581 */
582 ESCRIPT_DLL_API
583 inline
584 const DataAbstract::ValueType::value_type*
585 getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, BufferGroup* bufferg=0);
586
587
588 /**
589 \brief
590 Return the sample data for the given sample no. This is not the
591 preferred interface but is provided for use by C code.
592 \param sampleNo - Input - the given sample no.
593 \return pointer to the sample data.
594 */
595 ESCRIPT_DLL_API
596 inline
597 DataAbstract::ValueType::value_type*
598 getSampleDataRW(DataAbstract::ValueType::size_type sampleNo);
599
600
601 /**
602 \brief
603 Return the sample data for the given tag. If an attempt is made to
604 access data that isn't tagged an exception will be thrown.
605 \param tag - Input - the tag key.
606 */
607 ESCRIPT_DLL_API
608 inline
609 DataAbstract::ValueType::value_type*
610 getSampleDataByTag(int tag)
611 {
612 return m_data->getSampleDataByTag(tag);
613 }
614
615 /**
616 \brief
617 Return a reference into the DataVector which points to the specified data point.
618 \param sampleNo - Input -
619 \param dataPointNo - Input -
620 */
621 ESCRIPT_DLL_API
622 DataTypes::ValueType::const_reference
623 getDataPointRO(int sampleNo, int dataPointNo);
624
625 /**
626 \brief
627 Return a reference into the DataVector which points to the specified data point.
628 \param sampleNo - Input -
629 \param dataPointNo - Input -
630 */
631 ESCRIPT_DLL_API
632 DataTypes::ValueType::reference
633 getDataPointRW(int sampleNo, int dataPointNo);
634
635
636
637 /**
638 \brief
639 Return the offset for the given sample and point within the sample
640 */
641 ESCRIPT_DLL_API
642 inline
643 DataTypes::ValueType::size_type
644 getDataOffset(int sampleNo,
645 int dataPointNo)
646 {
647 return m_data->getPointOffset(sampleNo,dataPointNo);
648 }
649
650 /**
651 \brief
652 Return a reference to the data point shape.
653 */
654 ESCRIPT_DLL_API
655 inline
656 const DataTypes::ShapeType&
657 getDataPointShape() const
658 {
659 return m_data->getShape();
660 }
661
662 /**
663 \brief
664 Return the data point shape as a tuple of integers.
665 */
666 ESCRIPT_DLL_API
667 const boost::python::tuple
668 getShapeTuple() const;
669
670 /**
671 \brief
672 Return the size of the data point. It is the product of the
673 data point shape dimensions.
674 */
675 ESCRIPT_DLL_API
676 int
677 getDataPointSize() const;
678
679 /**
680 \brief
681 Return the number of doubles stored for this Data.
682 */
683 ESCRIPT_DLL_API
684 DataTypes::ValueType::size_type
685 getLength() const;
686
687 /**
688 \brief Return true if this object contains no samples.
689 This is not the same as isEmpty()
690 */
691 ESCRIPT_DLL_API
692 bool
693 hasNoSamples() const
694 {
695 return getLength()==0;
696 }
697
698 /**
699 \brief
700 Assign the given value to the tag assocciated with name. Implicitly converts this
701 object to type DataTagged. Throws an exception if this object
702 cannot be converted to a DataTagged object or name cannot be mapped onto a tag key.
703 \param name - Input - name of tag.
704 \param value - Input - Value to associate with given key.
705 */
706 ESCRIPT_DLL_API
707 void
708 setTaggedValueByName(std::string name,
709 const boost::python::object& value);
710
711 /**
712 \brief
713 Assign the given value to the tag. Implicitly converts this
714 object to type DataTagged if it is constant.
715
716 \param tagKey - Input - Integer key.
717 \param value - Input - Value to associate with given key.
718 ==>*
719 */
720 ESCRIPT_DLL_API
721 void
722 setTaggedValue(int tagKey,
723 const boost::python::object& value);
724
725 /**
726 \brief
727 Assign the given value to the tag. Implicitly converts this
728 object to type DataTagged if it is constant.
729
730 \param tagKey - Input - Integer key.
731 \param pointshape - Input - The shape of the value parameter
732 \param value - Input - Value to associate with given key.
733 \param dataOffset - Input - Offset of the begining of the point within the value parameter
734 */
735 ESCRIPT_DLL_API
736 void
737 setTaggedValueFromCPP(int tagKey,
738 const DataTypes::ShapeType& pointshape,
739 const DataTypes::ValueType& value,
740 int dataOffset=0);
741
742
743
744 /**
745 \brief
746 Copy other Data object into this Data object where mask is positive.
747 */
748 ESCRIPT_DLL_API
749 void
750 copyWithMask(const Data& other,
751 const Data& mask);
752
753 /**
754 Data object operation methods and operators.
755 */
756
757 /**
758 \brief
759 set all values to zero
760 *
761 */
762 ESCRIPT_DLL_API
763 void
764 setToZero();
765
766 /**
767 \brief
768 Interpolates this onto the given functionspace and returns
769 the result as a Data object.
770 *
771 */
772 ESCRIPT_DLL_API
773 Data
774 interpolate(const FunctionSpace& functionspace) const;
775
776
777 ESCRIPT_DLL_API
778 Data
779 interpolateFromTable2D(const WrappedArray& table, double Amin, double Astep,
780 double undef, Data& B, double Bmin, double Bstep,bool check_boundaries);
781
782 ESCRIPT_DLL_API
783 Data
784 interpolateFromTable1D(const WrappedArray& table, double Amin, double Astep,
785 double undef,bool check_boundaries);
786
787
788
789
790 ESCRIPT_DLL_API
791 Data
792 interpolateFromTable2DP(boost::python::object table, double Amin, double Astep,
793 Data& B, double Bmin, double Bstep, double undef,bool check_boundaries);
794
795 ESCRIPT_DLL_API
796 Data
797 interpolateFromTable1DP(boost::python::object table, double Amin, double Astep,
798 double undef,bool check_boundaries);
799
800 /**
801 \brief
802 Calculates the gradient of the data at the data points of functionspace.
803 If functionspace is not present the function space of Function(getDomain()) is used.
804 *
805 */
806 ESCRIPT_DLL_API
807 Data
808 gradOn(const FunctionSpace& functionspace) const;
809
810 ESCRIPT_DLL_API
811 Data
812 grad() const;
813
814 /**
815 \brief
816 Calculate the integral over the function space domain as a python tuple.
817 */
818 ESCRIPT_DLL_API
819 boost::python::object
820 integrateToTuple_const() const;
821
822
823 /**
824 \brief
825 Calculate the integral over the function space domain as a python tuple.
826 */
827 ESCRIPT_DLL_API
828 boost::python::object
829 integrateToTuple();
830
831
832
833 /**
834 \brief
835 Returns 1./ Data object
836 *
837 */
838 ESCRIPT_DLL_API
839 Data
840 oneOver() const;
841 /**
842 \brief
843 Return a Data with a 1 for +ive values and a 0 for 0 or -ive values.
844 *
845 */
846 ESCRIPT_DLL_API
847 Data
848 wherePositive() const;
849
850 /**
851 \brief
852 Return a Data with a 1 for -ive values and a 0 for +ive or 0 values.
853 *
854 */
855 ESCRIPT_DLL_API
856 Data
857 whereNegative() const;
858
859 /**
860 \brief
861 Return a Data with a 1 for +ive or 0 values and a 0 for -ive values.
862 *
863 */
864 ESCRIPT_DLL_API
865 Data
866 whereNonNegative() const;
867
868 /**
869 \brief
870 Return a Data with a 1 for -ive or 0 values and a 0 for +ive values.
871 *
872 */
873 ESCRIPT_DLL_API
874 Data
875 whereNonPositive() const;
876
877 /**
878 \brief
879 Return a Data with a 1 for 0 values and a 0 for +ive or -ive values.
880 *
881 */
882 ESCRIPT_DLL_API
883 Data
884 whereZero(double tol=0.0) const;
885
886 /**
887 \brief
888 Return a Data with a 0 for 0 values and a 1 for +ive or -ive values.
889 *
890 */
891 ESCRIPT_DLL_API
892 Data
893 whereNonZero(double tol=0.0) const;
894
895 /**
896 \brief
897 Return the maximum absolute value of this Data object.
898
899 The method is not const because lazy data needs to be expanded before Lsup can be computed.
900 The _const form can be used when the Data object is const, however this will only work for
901 Data which is not Lazy.
902
903 For Data which contain no samples (or tagged Data for which no tags in use have a value)
904 zero is returned.
905 */
906 ESCRIPT_DLL_API
907 double
908 Lsup();
909
910 ESCRIPT_DLL_API
911 double
912 Lsup_const() const;
913
914
915 /**
916 \brief
917 Return the maximum value of this Data object.
918
919 The method is not const because lazy data needs to be expanded before sup can be computed.
920 The _const form can be used when the Data object is const, however this will only work for
921 Data which is not Lazy.
922
923 For Data which contain no samples (or tagged Data for which no tags in use have a value)
924 a large negative value is returned.
925 */
926 ESCRIPT_DLL_API
927 double
928 sup();
929
930 ESCRIPT_DLL_API
931 double
932 sup_const() const;
933
934
935 /**
936 \brief
937 Return the minimum value of this Data object.
938
939 The method is not const because lazy data needs to be expanded before inf can be computed.
940 The _const form can be used when the Data object is const, however this will only work for
941 Data which is not Lazy.
942
943 For Data which contain no samples (or tagged Data for which no tags in use have a value)
944 a large positive value is returned.
945 */
946 ESCRIPT_DLL_API
947 double
948 inf();
949
950 ESCRIPT_DLL_API
951 double
952 inf_const() const;
953
954
955
956 /**
957 \brief
958 Return the absolute value of each data point of this Data object.
959 *
960 */
961 ESCRIPT_DLL_API
962 Data
963 abs() const;
964
965 /**
966 \brief
967 Return the maximum value of each data point of this Data object.
968 *
969 */
970 ESCRIPT_DLL_API
971 Data
972 maxval() const;
973
974 /**
975 \brief
976 Return the minimum value of each data point of this Data object.
977 *
978 */
979 ESCRIPT_DLL_API
980 Data
981 minval() const;
982
983 /**
984 \brief
985 Return the (sample number, data-point number) of the data point with
986 the minimum component value in this Data object.
987 \note If you are working in python, please consider using Locator
988 instead of manually manipulating process and point IDs.
989 */
990 ESCRIPT_DLL_API
991 const boost::python::tuple
992 minGlobalDataPoint() const;
993
994 /**
995 \brief
996 Return the (sample number, data-point number) of the data point with
997 the minimum component value in this Data object.
998 \note If you are working in python, please consider using Locator
999 instead of manually manipulating process and point IDs.
1000 */
1001 ESCRIPT_DLL_API
1002 const boost::python::tuple
1003 maxGlobalDataPoint() const;
1004
1005
1006
1007 /**
1008 \brief
1009 Return the sign of each data point of this Data object.
1010 -1 for negative values, zero for zero values, 1 for positive values.
1011 *
1012 */
1013 ESCRIPT_DLL_API
1014 Data
1015 sign() const;
1016
1017 /**
1018 \brief
1019 Return the symmetric part of a matrix which is half the matrix plus its transpose.
1020 *
1021 */
1022 ESCRIPT_DLL_API
1023 Data
1024 symmetric() const;
1025
1026 /**
1027 \brief
1028 Return the nonsymmetric part of a matrix which is half the matrix minus its transpose.
1029 *
1030 */
1031 ESCRIPT_DLL_API
1032 Data
1033 nonsymmetric() const;
1034
1035 /**
1036 \brief
1037 Return the trace of a matrix
1038 *
1039 */
1040 ESCRIPT_DLL_API
1041 Data
1042 trace(int axis_offset) const;
1043
1044 /**
1045 \brief
1046 Transpose each data point of this Data object around the given axis.
1047 *
1048 */
1049 ESCRIPT_DLL_API
1050 Data
1051 transpose(int axis_offset) const;
1052
1053 /**
1054 \brief
1055 Return the eigenvalues of the symmetric part at each data point of this Data object in increasing values.
1056 Currently this function is restricted to rank 2, square shape, and dimension 3.
1057 *
1058 */
1059 ESCRIPT_DLL_API
1060 Data
1061 eigenvalues() const;
1062
1063 /**
1064 \brief
1065 Return the eigenvalues and corresponding eigenvcetors of the symmetric part at each data point of this Data object.
1066 the eigenvalues are ordered in increasing size where eigenvalues with relative difference less than
1067 tol are treated as equal. The eigenvectors are orthogonal, normalized and the sclaed such that the
1068 first non-zero entry is positive.
1069 Currently this function is restricted to rank 2, square shape, and dimension 3
1070 *
1071 */
1072 ESCRIPT_DLL_API
1073 const boost::python::tuple
1074 eigenvalues_and_eigenvectors(const double tol=1.e-12) const;
1075
1076 /**
1077 \brief
1078 swaps the components axis0 and axis1
1079 *
1080 */
1081 ESCRIPT_DLL_API
1082 Data
1083 swapaxes(const int axis0, const int axis1) const;
1084
1085 /**
1086 \brief
1087 Return the error function erf of each data point of this Data object.
1088 *
1089 */
1090 ESCRIPT_DLL_API
1091 Data
1092 erf() const;
1093
1094 /**
1095 \brief
1096 Return the sin of each data point of this Data object.
1097 *
1098 */
1099 ESCRIPT_DLL_API
1100 Data
1101 sin() const;
1102
1103 /**
1104 \brief
1105 Return the cos of each data point of this Data object.
1106 *
1107 */
1108 ESCRIPT_DLL_API
1109 Data
1110 cos() const;
1111
1112 /**
1113 \brief
1114 Return the tan of each data point of this Data object.
1115 *
1116 */
1117 ESCRIPT_DLL_API
1118 Data
1119 tan() const;
1120
1121 /**
1122 \brief
1123 Return the asin of each data point of this Data object.
1124 *
1125 */
1126 ESCRIPT_DLL_API
1127 Data
1128 asin() const;
1129
1130 /**
1131 \brief
1132 Return the acos of each data point of this Data object.
1133 *
1134 */
1135 ESCRIPT_DLL_API
1136 Data
1137 acos() const;
1138
1139 /**
1140 \brief
1141 Return the atan of each data point of this Data object.
1142 *
1143 */
1144 ESCRIPT_DLL_API
1145 Data
1146 atan() const;
1147
1148 /**
1149 \brief
1150 Return the sinh of each data point of this Data object.
1151 *
1152 */
1153 ESCRIPT_DLL_API
1154 Data
1155 sinh() const;
1156
1157 /**
1158 \brief
1159 Return the cosh of each data point of this Data object.
1160 *
1161 */
1162 ESCRIPT_DLL_API
1163 Data
1164 cosh() const;
1165
1166 /**
1167 \brief
1168 Return the tanh of each data point of this Data object.
1169 *
1170 */
1171 ESCRIPT_DLL_API
1172 Data
1173 tanh() const;
1174
1175 /**
1176 \brief
1177 Return the asinh of each data point of this Data object.
1178 *
1179 */
1180 ESCRIPT_DLL_API
1181 Data
1182 asinh() const;
1183
1184 /**
1185 \brief
1186 Return the acosh of each data point of this Data object.
1187 *
1188 */
1189 ESCRIPT_DLL_API
1190 Data
1191 acosh() const;
1192
1193 /**
1194 \brief
1195 Return the atanh of each data point of this Data object.
1196 *
1197 */
1198 ESCRIPT_DLL_API
1199 Data
1200 atanh() const;
1201
1202 /**
1203 \brief
1204 Return the log to base 10 of each data point of this Data object.
1205 *
1206 */
1207 ESCRIPT_DLL_API
1208 Data
1209 log10() const;
1210
1211 /**
1212 \brief
1213 Return the natural log of each data point of this Data object.
1214 *
1215 */
1216 ESCRIPT_DLL_API
1217 Data
1218 log() const;
1219
1220 /**
1221 \brief
1222 Return the exponential function of each data point of this Data object.
1223 *
1224 */
1225 ESCRIPT_DLL_API
1226 Data
1227 exp() const;
1228
1229 /**
1230 \brief
1231 Return the square root of each data point of this Data object.
1232 *
1233 */
1234 ESCRIPT_DLL_API
1235 Data
1236 sqrt() const;
1237
1238 /**
1239 \brief
1240 Return the negation of each data point of this Data object.
1241 *
1242 */
1243 ESCRIPT_DLL_API
1244 Data
1245 neg() const;
1246
1247 /**
1248 \brief
1249 Return the identity of each data point of this Data object.
1250 Simply returns this object unmodified.
1251 *
1252 */
1253 ESCRIPT_DLL_API
1254 Data
1255 pos() const;
1256
1257 /**
1258 \brief
1259 Return the given power of each data point of this Data object.
1260
1261 \param right Input - the power to raise the object to.
1262 *
1263 */
1264 ESCRIPT_DLL_API
1265 Data
1266 powD(const Data& right) const;
1267
1268 /**
1269 \brief
1270 Return the given power of each data point of this boost python object.
1271
1272 \param right Input - the power to raise the object to.
1273 *
1274 */
1275 ESCRIPT_DLL_API
1276 Data
1277 powO(const boost::python::object& right) const;
1278
1279 /**
1280 \brief
1281 Return the given power of each data point of this boost python object.
1282
1283 \param left Input - the bases
1284 *
1285 */
1286
1287 ESCRIPT_DLL_API
1288 Data
1289 rpowO(const boost::python::object& left) const;
1290
1291 /**
1292 \brief
1293 writes the object to a file in the DX file format
1294 */
1295 ESCRIPT_DLL_API
1296 void
1297 saveDX(std::string fileName) const;
1298
1299 /**
1300 \brief
1301 writes the object to a file in the VTK file format
1302 */
1303 ESCRIPT_DLL_API
1304 void
1305 saveVTK(std::string fileName) const;
1306
1307
1308
1309 /**
1310 \brief
1311 Overloaded operator +=
1312 \param right - Input - The right hand side.
1313 *
1314 */
1315 ESCRIPT_DLL_API
1316 Data& operator+=(const Data& right);
1317 ESCRIPT_DLL_API
1318 Data& operator+=(const boost::python::object& right);
1319
1320 ESCRIPT_DLL_API
1321 Data& operator=(const Data& other);
1322
1323 /**
1324 \brief
1325 Overloaded operator -=
1326 \param right - Input - The right hand side.
1327 *
1328 */
1329 ESCRIPT_DLL_API
1330 Data& operator-=(const Data& right);
1331 ESCRIPT_DLL_API
1332 Data& operator-=(const boost::python::object& right);
1333
1334 /**
1335 \brief
1336 Overloaded operator *=
1337 \param right - Input - The right hand side.
1338 *
1339 */
1340 ESCRIPT_DLL_API
1341 Data& operator*=(const Data& right);
1342 ESCRIPT_DLL_API
1343 Data& operator*=(const boost::python::object& right);
1344
1345 /**
1346 \brief
1347 Overloaded operator /=
1348 \param right - Input - The right hand side.
1349 *
1350 */
1351 ESCRIPT_DLL_API
1352 Data& operator/=(const Data& right);
1353 ESCRIPT_DLL_API
1354 Data& operator/=(const boost::python::object& right);
1355
1356 /**
1357 \brief
1358 Returns true if this can be interpolated to functionspace.
1359 */
1360 ESCRIPT_DLL_API
1361 bool
1362 probeInterpolation(const FunctionSpace& functionspace) const;
1363
1364 /**
1365 Data object slicing methods.
1366 */
1367
1368 /**
1369 \brief
1370 Returns a slice from this Data object.
1371
1372 /description
1373 Implements the [] get operator in python.
1374 Calls getSlice.
1375
1376 \param key - Input - python slice tuple specifying
1377 slice to return.
1378 */
1379 ESCRIPT_DLL_API
1380 Data
1381 getItem(const boost::python::object& key) const;
1382
1383 /**
1384 \brief
1385 Copies slice from value into this Data object.
1386
1387 Implements the [] set operator in python.
1388 Calls setSlice.
1389
1390 \param key - Input - python slice tuple specifying
1391 slice to copy from value.
1392 \param value - Input - Data object to copy from.
1393 */
1394 ESCRIPT_DLL_API
1395 void
1396 setItemD(const boost::python::object& key,
1397 const Data& value);
1398
1399 ESCRIPT_DLL_API
1400 void
1401 setItemO(const boost::python::object& key,
1402 const boost::python::object& value);
1403
1404 // These following public methods should be treated as private.
1405
1406 /**
1407 \brief
1408 Perform the given unary operation on every element of every data point in
1409 this Data object.
1410 */
1411 template <class UnaryFunction>
1412 ESCRIPT_DLL_API
1413 inline
1414 void
1415 unaryOp2(UnaryFunction operation);
1416
1417 /**
1418 \brief
1419 Return a Data object containing the specified slice of
1420 this Data object.
1421 \param region - Input - Region to copy.
1422 *
1423 */
1424 ESCRIPT_DLL_API
1425 Data
1426 getSlice(const DataTypes::RegionType& region) const;
1427
1428 /**
1429 \brief
1430 Copy the specified slice from the given value into this
1431 Data object.
1432 \param value - Input - Data to copy from.
1433 \param region - Input - Region to copy.
1434 *
1435 */
1436 ESCRIPT_DLL_API
1437 void
1438 setSlice(const Data& value,
1439 const DataTypes::RegionType& region);
1440
1441 /**
1442 \brief
1443 print the data values to stdout. Used for debugging
1444 */
1445 ESCRIPT_DLL_API
1446 void
1447 print(void);
1448
1449 /**
1450 \brief
1451 return the MPI rank number of the local data
1452 MPI_COMM_WORLD is assumed and the result of MPI_Comm_size()
1453 is returned
1454 */
1455 ESCRIPT_DLL_API
1456 int
1457 get_MPIRank(void) const;
1458
1459 /**
1460 \brief
1461 return the MPI rank number of the local data
1462 MPI_COMM_WORLD is assumed and the result of MPI_Comm_rank()
1463 is returned
1464 */
1465 ESCRIPT_DLL_API
1466 int
1467 get_MPISize(void) const;
1468
1469 /**
1470 \brief
1471 return the MPI rank number of the local data
1472 MPI_COMM_WORLD is assumed and returned.
1473 */
1474 ESCRIPT_DLL_API
1475 MPI_Comm
1476 get_MPIComm(void) const;
1477
1478 /**
1479 \brief
1480 return the object produced by the factory, which is a DataConstant or DataExpanded
1481 TODO Ownership of this object should be explained in doco.
1482 */
1483 ESCRIPT_DLL_API
1484 DataAbstract*
1485 borrowData(void) const;
1486
1487 ESCRIPT_DLL_API
1488 DataAbstract_ptr
1489 borrowDataPtr(void) const;
1490
1491 ESCRIPT_DLL_API
1492 DataReady_ptr
1493 borrowReadyPtr(void) const;
1494
1495
1496
1497 /**
1498 \brief
1499 Return a pointer to the beginning of the datapoint at the specified offset.
1500 TODO Eventually these should be inlined.
1501 \param i - position(offset) in the underlying datastructure
1502 */
1503
1504 ESCRIPT_DLL_API
1505 DataTypes::ValueType::const_reference
1506 getDataAtOffsetRO(DataTypes::ValueType::size_type i);
1507
1508
1509 ESCRIPT_DLL_API
1510 DataTypes::ValueType::reference
1511 getDataAtOffsetRW(DataTypes::ValueType::size_type i);
1512
1513
1514
1515 /**
1516 \brief Create a buffer for use by getSample
1517 Allocates a DataVector large enough for DataLazy::resolveSample to operate on for the current Data.
1518 Do not use this buffer for other Data instances (unless you are sure they will be the same size).
1519
1520 In multi-threaded sections, this needs to be called on each thread.
1521
1522 \return A BufferGroup* if Data is lazy, NULL otherwise.
1523 \warning This pointer must be deallocated using freeSampleBuffer to avoid cross library memory issues.
1524 */
1525 ESCRIPT_DLL_API
1526 BufferGroup*
1527 allocSampleBuffer() const;
1528
1529 /**
1530 \brief Free a buffer allocated with allocSampleBuffer.
1531 \param buffer Input - pointer to the buffer to deallocate.
1532 */
1533 ESCRIPT_DLL_API void freeSampleBuffer(BufferGroup* buffer);
1534
1535 protected:
1536
1537 private:
1538
1539 template <class BinaryOp>
1540 double
1541 lazyAlgWorker(double init, int mpiop_type);
1542
1543 double
1544 LsupWorker() const;
1545
1546 double
1547 supWorker() const;
1548
1549 double
1550 infWorker() const;
1551
1552 boost::python::object
1553 integrateWorker() const;
1554
1555 void
1556 calc_minGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1557
1558 void
1559 calc_maxGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1560
1561
1562 /**
1563 \brief
1564 Check *this and the right operand are compatible. Throws
1565 an exception if they aren't.
1566 \param right - Input - The right hand side.
1567 */
1568 inline
1569 void
1570 operandCheck(const Data& right) const
1571 {
1572 return m_data->operandCheck(*(right.m_data.get()));
1573 }
1574
1575 /**
1576 \brief
1577 Perform the specified reduction algorithm on every element of every data point in
1578 this Data object according to the given function and return the single value result.
1579 */
1580 template <class BinaryFunction>
1581 inline
1582 double
1583 algorithm(BinaryFunction operation,
1584 double initial_value) const;
1585
1586 /**
1587 \brief
1588 Reduce each data-point in this Data object using the given operation. Return a Data
1589 object with the same number of data-points, but with each data-point containing only
1590 one value - the result of the reduction operation on the corresponding data-point in
1591 this Data object
1592 */
1593 template <class BinaryFunction>
1594 inline
1595 Data
1596 dp_algorithm(BinaryFunction operation,
1597 double initial_value) const;
1598
1599 /**
1600 \brief
1601 Perform the given binary operation on all of the data's elements.
1602 The underlying type of the right hand side (right) determines the final
1603 type of *this after the operation. For example if the right hand side
1604 is expanded *this will be expanded if necessary.
1605 RHS is a Data object.
1606 */
1607 template <class BinaryFunction>
1608 inline
1609 void
1610 binaryOp(const Data& right,
1611 BinaryFunction operation);
1612
1613 /**
1614 \brief
1615 Convert the data type of the RHS to match this.
1616 \param right - Input - data type to match.
1617 */
1618 void
1619 typeMatchLeft(Data& right) const;
1620
1621 /**
1622 \brief
1623 Convert the data type of this to match the RHS.
1624 \param right - Input - data type to match.
1625 */
1626 void
1627 typeMatchRight(const Data& right);
1628
1629 /**
1630 \brief
1631 Construct a Data object of the appropriate type.
1632 */
1633
1634 void
1635 initialise(const DataTypes::ValueType& value,
1636 const DataTypes::ShapeType& shape,
1637 const FunctionSpace& what,
1638 bool expanded);
1639
1640 void
1641 initialise(const WrappedArray& value,
1642 const FunctionSpace& what,
1643 bool expanded);
1644
1645 //
1646 // flag to protect the data object against any update
1647 bool m_protected;
1648 mutable bool m_shared;
1649 bool m_lazy;
1650
1651 //
1652 // pointer to the actual data object
1653 // boost::shared_ptr<DataAbstract> m_data;
1654 DataAbstract_ptr m_data;
1655
1656 // If possible please use getReadyPtr instead.
1657 // But see warning below.
1658 const DataReady*
1659 getReady() const;
1660
1661 DataReady*
1662 getReady();
1663
1664
1665 // Be wary of using this for local operations since it (temporarily) increases reference count.
1666 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1667 // getReady() instead
1668 DataReady_ptr
1669 getReadyPtr();
1670
1671 const_DataReady_ptr
1672 getReadyPtr() const;
1673
1674
1675 /**
1676 \brief Update the Data's shared flag
1677 This indicates that the DataAbstract used by this object is now shared (or no longer shared).
1678 For internal use only.
1679 */
1680 void updateShareStatus(bool nowshared) const
1681 {
1682 m_shared=nowshared; // m_shared is mutable
1683 }
1684
1685 // In the isShared() method below:
1686 // A problem would occur if m_data (the address pointed to) were being modified
1687 // while the call m_data->is_shared is being executed.
1688 //
1689 // Q: So why do I think this code can be thread safe/correct?
1690 // A: We need to make some assumptions.
1691 // 1. We assume it is acceptable to return true under some conditions when we aren't shared.
1692 // 2. We assume that no constructions or assignments which will share previously unshared
1693 // will occur while this call is executing. This is consistent with the way Data:: and C are written.
1694 //
1695 // This means that the only transition we need to consider, is when a previously shared object is
1696 // not shared anymore. ie. the other objects have been destroyed or a deep copy has been made.
1697 // In those cases the m_shared flag changes to false after m_data has completed changing.
1698 // For any threads executing before the flag switches they will assume the object is still shared.
1699 bool isShared() const
1700 {
1701 return m_shared;
1702 /* if (m_shared) return true;
1703 if (m_data->isShared())
1704 {
1705 updateShareStatus(true);
1706 return true;
1707 }
1708 return false;*/
1709 }
1710
1711 void forceResolve()
1712 {
1713 if (isLazy())
1714 {
1715 #ifdef _OPENMP
1716 if (omp_in_parallel())
1717 { // Yes this is throwing an exception out of an omp thread which is forbidden.
1718 throw DataException("Please do not call forceResolve() in a parallel region.");
1719 }
1720 #endif
1721 resolve();
1722 }
1723 }
1724
1725 /**
1726 \brief if another object is sharing out member data make a copy to work with instead.
1727 This code should only be called from single threaded sections of code.
1728 */
1729 void exclusiveWrite()
1730 {
1731 #ifdef _OPENMP
1732 if (omp_in_parallel())
1733 {
1734 // *((int*)0)=17;
1735 throw DataException("Programming error. Please do not run exclusiveWrite() in multi-threaded sections.");
1736 }
1737 #endif
1738 forceResolve();
1739 if (isShared())
1740 {
1741 DataAbstract* t=m_data->deepCopy();
1742 set_m_data(DataAbstract_ptr(t));
1743 }
1744 }
1745
1746 /**
1747 \brief checks if caller can have exclusive write to the object
1748 */
1749 void checkExclusiveWrite()
1750 {
1751 if (isLazy() || isShared())
1752 {
1753 throw DataException("Programming error. ExclusiveWrite required - please call requireWrite()");
1754 }
1755 }
1756
1757 /**
1758 \brief Modify the data abstract hosted by this Data object
1759 For internal use only.
1760 Passing a pointer to null is permitted (do this in the destructor)
1761 \warning Only to be called in single threaded code or inside a single/critical section. This method needs to be atomic.
1762 */
1763 void set_m_data(DataAbstract_ptr p);
1764
1765 friend class DataAbstract; // To allow calls to updateShareStatus
1766
1767 };
1768
1769 } // end namespace escript
1770
1771
1772 // No, this is not supposed to be at the top of the file
1773 // DataAbstact needs to be declared first, then DataReady needs to be fully declared
1774 // so that I can dynamic cast between them below.
1775 #include "DataReady.h"
1776 #include "DataLazy.h"
1777
1778 namespace escript
1779 {
1780
1781 inline
1782 const DataReady*
1783 Data::getReady() const
1784 {
1785 const DataReady* dr=dynamic_cast<const DataReady*>(m_data.get());
1786 EsysAssert((dr!=0), "Error - casting to DataReady.");
1787 return dr;
1788 }
1789
1790 inline
1791 DataReady*
1792 Data::getReady()
1793 {
1794 DataReady* dr=dynamic_cast<DataReady*>(m_data.get());
1795 EsysAssert((dr!=0), "Error - casting to DataReady.");
1796 return dr;
1797 }
1798
1799 // Be wary of using this for local operations since it (temporarily) increases reference count.
1800 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1801 // getReady() instead
1802 inline
1803 DataReady_ptr
1804 Data::getReadyPtr()
1805 {
1806 DataReady_ptr dr=boost::dynamic_pointer_cast<DataReady>(m_data);
1807 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1808 return dr;
1809 }
1810
1811
1812 inline
1813 const_DataReady_ptr
1814 Data::getReadyPtr() const
1815 {
1816 const_DataReady_ptr dr=boost::dynamic_pointer_cast<const DataReady>(m_data);
1817 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1818 return dr;
1819 }
1820
1821 inline
1822 DataAbstract::ValueType::value_type*
1823 Data::getSampleDataRW(DataAbstract::ValueType::size_type sampleNo)
1824 {
1825 if (isLazy())
1826 {
1827 throw DataException("Error, attempt to acquire RW access to lazy data. Please call requireWrite() first.");
1828 }
1829 return getReady()->getSampleDataRW(sampleNo);
1830 }
1831
1832 inline
1833 const DataAbstract::ValueType::value_type*
1834 Data::getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, BufferGroup* bufferg)
1835 {
1836 DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
1837 if (l!=0)
1838 {
1839 size_t offset=0;
1840 if (bufferg==NULL)
1841 {
1842 throw DataException("Error, attempt to getSampleDataRO for lazy Data with buffer==NULL");
1843 }
1844 const DataTypes::ValueType* res=l->resolveSample(*bufferg,sampleNo,offset);
1845 return &((*res)[offset]);
1846 }
1847 return getReady()->getSampleDataRO(sampleNo);
1848 }
1849
1850
1851
1852 /**
1853 Modify a filename for MPI parallel output to multiple files
1854 */
1855 char *Escript_MPI_appendRankToFileName(const char *, int, int);
1856
1857 /**
1858 Binary Data object operators.
1859 */
1860 inline double rpow(double x,double y)
1861 {
1862 return pow(y,x);
1863 }
1864
1865 /**
1866 \brief
1867 Operator+
1868 Takes two Data objects.
1869 */
1870 ESCRIPT_DLL_API Data operator+(const Data& left, const Data& right);
1871
1872 /**
1873 \brief
1874 Operator-
1875 Takes two Data objects.
1876 */
1877 ESCRIPT_DLL_API Data operator-(const Data& left, const Data& right);
1878
1879 /**
1880 \brief
1881 Operator*
1882 Takes two Data objects.
1883 */
1884 ESCRIPT_DLL_API Data operator*(const Data& left, const Data& right);
1885
1886 /**
1887 \brief
1888 Operator/
1889 Takes two Data objects.
1890 */
1891 ESCRIPT_DLL_API Data operator/(const Data& left, const Data& right);
1892
1893 /**
1894 \brief
1895 Operator+
1896 Takes LHS Data object and RHS python::object.
1897 python::object must be convertable to Data type.
1898 */
1899 ESCRIPT_DLL_API Data operator+(const Data& left, const boost::python::object& right);
1900
1901 /**
1902 \brief
1903 Operator-
1904 Takes LHS Data object and RHS python::object.
1905 python::object must be convertable to Data type.
1906 */
1907 ESCRIPT_DLL_API Data operator-(const Data& left, const boost::python::object& right);
1908
1909 /**
1910 \brief
1911 Operator*
1912 Takes LHS Data object and RHS python::object.
1913 python::object must be convertable to Data type.
1914 */
1915 ESCRIPT_DLL_API Data operator*(const Data& left, const boost::python::object& right);
1916
1917 /**
1918 \brief
1919 Operator/
1920 Takes LHS Data object and RHS python::object.
1921 python::object must be convertable to Data type.
1922 */
1923 ESCRIPT_DLL_API Data operator/(const Data& left, const boost::python::object& right);
1924
1925 /**
1926 \brief
1927 Operator+
1928 Takes LHS python::object and RHS Data object.
1929 python::object must be convertable to Data type.
1930 */
1931 ESCRIPT_DLL_API Data operator+(const boost::python::object& left, const Data& right);
1932
1933 /**
1934 \brief
1935 Operator-
1936 Takes LHS python::object and RHS Data object.
1937 python::object must be convertable to Data type.
1938 */
1939 ESCRIPT_DLL_API Data operator-(const boost::python::object& left, const Data& right);
1940
1941 /**
1942 \brief
1943 Operator*
1944 Takes LHS python::object and RHS Data object.
1945 python::object must be convertable to Data type.
1946 */
1947 ESCRIPT_DLL_API Data operator*(const boost::python::object& left, const Data& right);
1948
1949 /**
1950 \brief
1951 Operator/
1952 Takes LHS python::object and RHS Data object.
1953 python::object must be convertable to Data type.
1954 */
1955 ESCRIPT_DLL_API Data operator/(const boost::python::object& left, const Data& right);
1956
1957
1958
1959 /**
1960 \brief
1961 Output operator
1962 */
1963 ESCRIPT_DLL_API std::ostream& operator<<(std::ostream& o, const Data& data);
1964
1965 /**
1966 \brief
1967 Compute a tensor product of two Data objects
1968 \param arg_0 - Input - Data object
1969 \param arg_1 - Input - Data object
1970 \param axis_offset - Input - axis offset
1971 \param transpose - Input - 0: transpose neither, 1: transpose arg0, 2: transpose arg1
1972 */
1973 ESCRIPT_DLL_API
1974 Data
1975 C_GeneralTensorProduct(Data& arg_0,
1976 Data& arg_1,
1977 int axis_offset=0,
1978 int transpose=0);
1979
1980 /**
1981 \brief
1982 Perform the given binary operation with this and right as operands.
1983 Right is a Data object.
1984 */
1985 template <class BinaryFunction>
1986 inline
1987 void
1988 Data::binaryOp(const Data& right,
1989 BinaryFunction operation)
1990 {
1991 //
1992 // if this has a rank of zero promote it to the rank of the RHS
1993 if (getDataPointRank()==0 && right.getDataPointRank()!=0) {
1994 throw DataException("Error - attempt to update rank zero object with object with rank bigger than zero.");
1995 }
1996
1997 if (isLazy() || right.isLazy())
1998 {
1999 throw DataException("Programmer error - attempt to call binaryOp with Lazy Data.");
2000 }
2001 //
2002 // initially make the temporary a shallow copy
2003 Data tempRight(right);
2004
2005 if (getFunctionSpace()!=right.getFunctionSpace()) {
2006 if (right.probeInterpolation(getFunctionSpace())) {
2007 //
2008 // an interpolation is required so create a new Data
2009 tempRight=Data(right,this->getFunctionSpace());
2010 } else if (probeInterpolation(right.getFunctionSpace())) {
2011 //
2012 // interpolate onto the RHS function space
2013 Data tempLeft(*this,right.getFunctionSpace());
2014 // m_data=tempLeft.m_data;
2015 set_m_data(tempLeft.m_data);
2016 }
2017 }
2018 operandCheck(tempRight);
2019 //
2020 // ensure this has the right type for the RHS
2021 typeMatchRight(tempRight);
2022 //
2023 // Need to cast to the concrete types so that the correct binaryOp
2024 // is called.
2025 if (isExpanded()) {
2026 //
2027 // Expanded data will be done in parallel, the right hand side can be
2028 // of any data type
2029 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2030 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2031 escript::binaryOp(*leftC,*(tempRight.getReady()),operation);
2032 } else if (isTagged()) {
2033 //
2034 // Tagged data is operated on serially, the right hand side can be
2035 // either DataConstant or DataTagged
2036 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2037 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2038 if (right.isTagged()) {
2039 DataTagged* rightC=dynamic_cast<DataTagged*>(tempRight.m_data.get());
2040 EsysAssert((rightC!=0), "Programming error - casting to DataTagged.");
2041 escript::binaryOp(*leftC,*rightC,operation);
2042 } else {
2043 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2044 EsysAssert((rightC!=0), "Programming error - casting to DataConstant.");
2045 escript::binaryOp(*leftC,*rightC,operation);
2046 }
2047 } else if (isConstant()) {
2048 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2049 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2050 EsysAssert((leftC!=0 && rightC!=0), "Programming error - casting to DataConstant.");
2051 escript::binaryOp(*leftC,*rightC,operation);
2052 }
2053 }
2054
2055 /**
2056 \brief
2057 Perform the given Data object reduction algorithm on this and return the result.
2058 Given operation combines each element of each data point, thus argument
2059 object (*this) is a rank n Data object, and returned object is a scalar.
2060 Calls escript::algorithm.
2061 */
2062 template <class BinaryFunction>
2063 inline
2064 double
2065 Data::algorithm(BinaryFunction operation, double initial_value) const
2066 {
2067 if (isExpanded()) {
2068 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2069 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2070 return escript::algorithm(*leftC,operation,initial_value);
2071 } else if (isTagged()) {
2072 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2073 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2074 return escript::algorithm(*leftC,operation,initial_value);
2075 } else if (isConstant()) {
2076 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2077 EsysAssert((leftC!=0), "Programming error - casting to DataConstant.");
2078 return escript::algorithm(*leftC,operation,initial_value);
2079 } else if (isEmpty()) {
2080 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2081 } else if (isLazy()) {
2082 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2083 } else {
2084 throw DataException("Error - Data encapsulates an unknown type.");
2085 }
2086 }
2087
2088 /**
2089 \brief
2090 Perform the given data point reduction algorithm on data and return the result.
2091 Given operation combines each element within each data point into a scalar,
2092 thus argument object is a rank n Data object, and returned object is a
2093 rank 0 Data object.
2094 Calls escript::dp_algorithm.
2095 */
2096 template <class BinaryFunction>
2097 inline
2098 Data
2099 Data::dp_algorithm(BinaryFunction operation, double initial_value) const
2100 {
2101 if (isEmpty()) {
2102 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2103 }
2104 else if (isExpanded()) {
2105 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2106 DataExpanded* dataE=dynamic_cast<DataExpanded*>(m_data.get());
2107 DataExpanded* resultE=dynamic_cast<DataExpanded*>(result.m_data.get());
2108 EsysAssert((dataE!=0), "Programming error - casting data to DataExpanded.");
2109 EsysAssert((resultE!=0), "Programming error - casting result to DataExpanded.");
2110 escript::dp_algorithm(*dataE,*resultE,operation,initial_value);
2111 return result;
2112 }
2113 else if (isTagged()) {
2114 DataTagged* dataT=dynamic_cast<DataTagged*>(m_data.get());
2115 EsysAssert((dataT!=0), "Programming error - casting data to DataTagged.");
2116 DataTypes::ValueType defval(1);
2117 defval[0]=0;
2118 DataTagged* resultT=new DataTagged(getFunctionSpace(), DataTypes::scalarShape, defval, dataT);
2119 escript::dp_algorithm(*dataT,*resultT,operation,initial_value);
2120 return Data(resultT); // note: the Data object now owns the resultT pointer
2121 }
2122 else if (isConstant()) {
2123 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2124 DataConstant* dataC=dynamic_cast<DataConstant*>(m_data.get());
2125 DataConstant* resultC=dynamic_cast<DataConstant*>(result.m_data.get());
2126 EsysAssert((dataC!=0), "Programming error - casting data to DataConstant.");
2127 EsysAssert((resultC!=0), "Programming error - casting result to DataConstant.");
2128 escript::dp_algorithm(*dataC,*resultC,operation,initial_value);
2129 return result;
2130 } else if (isLazy()) {
2131 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2132 } else {
2133 throw DataException("Error - Data encapsulates an unknown type.");
2134 }
2135 }
2136
2137 /**
2138 \brief
2139 Compute a tensor operation with two Data objects
2140 \param arg_0 - Input - Data object
2141 \param arg_1 - Input - Data object
2142 \param operation - Input - Binary op functor
2143 */
2144 template <typename BinaryFunction>
2145 inline
2146 Data
2147 C_TensorBinaryOperation(Data const &arg_0,
2148 Data const &arg_1,
2149 BinaryFunction operation)
2150 {
2151 if (arg_0.isEmpty() || arg_1.isEmpty())
2152 {
2153 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2154 }
2155 if (arg_0.isLazy() || arg_1.isLazy())
2156 {
2157 throw DataException("Error - Operations not permitted on lazy data.");
2158 }
2159 // Interpolate if necessary and find an appropriate function space
2160 Data arg_0_Z, arg_1_Z;
2161 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2162 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2163 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2164 arg_1_Z = Data(arg_1);
2165 }
2166 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2167 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2168 arg_0_Z =Data(arg_0);
2169 }
2170 else {
2171 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible function spaces.");
2172 }
2173 } else {
2174 arg_0_Z = Data(arg_0);
2175 arg_1_Z = Data(arg_1);
2176 }
2177 // Get rank and shape of inputs
2178 int rank0 = arg_0_Z.getDataPointRank();
2179 int rank1 = arg_1_Z.getDataPointRank();
2180 DataTypes::ShapeType shape0 = arg_0_Z.getDataPointShape();
2181 DataTypes::ShapeType shape1 = arg_1_Z.getDataPointShape();
2182 int size0 = arg_0_Z.getDataPointSize();
2183 int size1 = arg_1_Z.getDataPointSize();
2184 // Declare output Data object
2185 Data res;
2186
2187 if (shape0 == shape1) {
2188 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2189 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2190 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2191 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2192 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2193
2194 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2195 }
2196 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2197
2198 // Prepare the DataConstant input
2199 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2200
2201 // Borrow DataTagged input from Data object
2202 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2203
2204 // Prepare a DataTagged output 2
2205 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2206 res.tag();
2207 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2208
2209 // Prepare offset into DataConstant
2210 int offset_0 = tmp_0->getPointOffset(0,0);
2211 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2212
2213 // Get the pointers to the actual data
2214 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2215 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2216
2217 // Compute a result for the default
2218 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2219 // Compute a result for each tag
2220 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2221 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2222 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2223 tmp_2->addTag(i->first);
2224 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2225 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2226
2227 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2228 }
2229
2230 }
2231 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2232 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2233 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2234 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2235 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2236
2237 int sampleNo_1,dataPointNo_1;
2238 int numSamples_1 = arg_1_Z.getNumSamples();
2239 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2240 int offset_0 = tmp_0->getPointOffset(0,0);
2241 res.requireWrite();
2242 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2243 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2244 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2245 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2246 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2247 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2248 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2249 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2250 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2251 }
2252 }
2253
2254 }
2255 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2256 // Borrow DataTagged input from Data object
2257 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2258
2259 // Prepare the DataConstant input
2260 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2261
2262 // Prepare a DataTagged output 2
2263 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2264 res.tag();
2265 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2266
2267 // Prepare offset into DataConstant
2268 int offset_1 = tmp_1->getPointOffset(0,0);
2269
2270 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2271 // Get the pointers to the actual data
2272 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2273 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2274 // Compute a result for the default
2275 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2276 // Compute a result for each tag
2277 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2278 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2279 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2280 tmp_2->addTag(i->first);
2281 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2282 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2283 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2284 }
2285
2286 }
2287 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2288 // Borrow DataTagged input from Data object
2289 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2290
2291 // Borrow DataTagged input from Data object
2292 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2293
2294 // Prepare a DataTagged output 2
2295 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2296 res.tag(); // DataTagged output
2297 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2298
2299 // Get the pointers to the actual data
2300 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2301 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2302 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2303
2304 // Compute a result for the default
2305 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2306 // Merge the tags
2307 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2308 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2309 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2310 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2311 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2312 }
2313 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2314 tmp_2->addTag(i->first);
2315 }
2316 // Compute a result for each tag
2317 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2318 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2319
2320 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2321 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2322 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2323
2324 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2325 }
2326
2327 }
2328 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2329 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2330 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2331 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2332 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2333 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2334
2335 int sampleNo_0,dataPointNo_0;
2336 int numSamples_0 = arg_0_Z.getNumSamples();
2337 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2338 res.requireWrite();
2339 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2340 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2341 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2342 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2343 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2344 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2345 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2346 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2347 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2348 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2349 }
2350 }
2351
2352 }
2353 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2354 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2355 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2356 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2357 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2358
2359 int sampleNo_0,dataPointNo_0;
2360 int numSamples_0 = arg_0_Z.getNumSamples();
2361 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2362 int offset_1 = tmp_1->getPointOffset(0,0);
2363 res.requireWrite();
2364 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2365 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2366 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2367 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2368 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2369
2370 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2371 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2372 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2373
2374
2375 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2376 }
2377 }
2378
2379 }
2380 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2381 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2382 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2383 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2384 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2385 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2386
2387 int sampleNo_0,dataPointNo_0;
2388 int numSamples_0 = arg_0_Z.getNumSamples();
2389 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2390 res.requireWrite();
2391 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2392 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2393 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2394 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2395 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2396 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2397 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2398 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2399 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2400 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2401 }
2402 }
2403
2404 }
2405 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2406 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2407 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2408 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2409 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2410 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2411
2412 int sampleNo_0,dataPointNo_0;
2413 int numSamples_0 = arg_0_Z.getNumSamples();
2414 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2415 res.requireWrite();
2416 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2417 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2418 dataPointNo_0=0;
2419 // for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2420 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2421 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2422 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2423 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2424 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2425 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2426 tensor_binary_operation(size0*numDataPointsPerSample_0, ptr_0, ptr_1, ptr_2, operation);
2427 // }
2428 }
2429
2430 }
2431 else {
2432 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2433 }
2434
2435 } else if (0 == rank0) {
2436 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2437 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataConstant output
2438 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2439 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2440 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2441 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2442 }
2443 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2444
2445 // Prepare the DataConstant input
2446 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2447
2448 // Borrow DataTagged input from Data object
2449 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2450
2451 // Prepare a DataTagged output 2
2452 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataTagged output
2453 res.tag();
2454 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2455
2456 // Prepare offset into DataConstant
2457 int offset_0 = tmp_0->getPointOffset(0,0);
2458 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2459
2460 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2461 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2462
2463 // Compute a result for the default
2464 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2465 // Compute a result for each tag
2466 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2467 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2468 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2469 tmp_2->addTag(i->first);
2470 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2471 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2472 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2473 }
2474
2475 }
2476 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2477
2478 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2479 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2480 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2481 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2482
2483 int sampleNo_1,dataPointNo_1;
2484 int numSamples_1 = arg_1_Z.getNumSamples();
2485 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2486 int offset_0 = tmp_0->getPointOffset(0,0);
2487 res.requireWrite();
2488 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2489 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2490 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2491 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2492 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2493 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2494 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2495 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2496 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2497
2498 }
2499 }
2500
2501 }
2502 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2503
2504 // Borrow DataTagged input from Data object
2505 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2506
2507 // Prepare the DataConstant input
2508 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2509
2510 // Prepare a DataTagged output 2
2511 res = Data(0.0, shape1, arg_0_Z.getFunctionSpace()); // DataTagged output
2512 res.tag();
2513 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2514
2515 // Prepare offset into DataConstant
2516 int offset_1 = tmp_1->getPointOffset(0,0);
2517 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2518
2519 // Get the pointers to the actual data
2520 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2521 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2522
2523
2524 // Compute a result for the default
2525 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2526 // Compute a result for each tag
2527 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2528 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2529 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2530 tmp_2->addTag(i->first);
2531 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2532 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2533
2534 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2535 }
2536
2537 }
2538 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2539
2540 // Borrow DataTagged input from Data object
2541 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2542
2543 // Borrow DataTagged input from Data object
2544 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2545
2546 // Prepare a DataTagged output 2
2547 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());
2548 res.tag(); // DataTagged output
2549 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2550
2551 // Get the pointers to the actual data
2552 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2553 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2554 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2555
2556 // Compute a result for the default
2557 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2558 // Merge the tags
2559 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2560 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2561 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2562 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2563 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2564 }
2565 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2566 tmp_2->addTag(i->first);
2567 }
2568 // Compute a result for each tag
2569 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2570 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2571 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2572 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2573 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2574
2575 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2576 }
2577
2578 }
2579 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2580
2581 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2582 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2583 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2584 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2585 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2586
2587 int sampleNo_0,dataPointNo_0;
2588 int numSamples_0 = arg_0_Z.getNumSamples();
2589 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2590 res.requireWrite();
2591 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2592 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2593 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2594 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2595 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2596 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2597 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2598 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2599 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2600 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2601 }
2602 }
2603
2604 }
2605 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2606 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2607 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2608 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2609 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2610
2611 int sampleNo_0,dataPointNo_0;
2612 int numSamples_0 = arg_0_Z.getNumSamples();
2613 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2614 int offset_1 = tmp_1->getPointOffset(0,0);
2615 res.requireWrite();
2616 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2617 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2618 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2619 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2620 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2621 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2622 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2623 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2624 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2625 }
2626 }
2627
2628
2629 }
2630 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2631
2632 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2633 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2634 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2635 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2636 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2637
2638 int sampleNo_0,dataPointNo_0;
2639 int numSamples_0 = arg_0_Z.getNumSamples();
2640 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2641 res.requireWrite();
2642 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2643 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2644 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2645 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2646 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2647 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2648 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2649 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2650 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2651 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2652 }
2653 }
2654
2655 }
2656 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2657
2658 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2659 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2660 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2661 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2662 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2663
2664 int sampleNo_0,dataPointNo_0;
2665 int numSamples_0 = arg_0_Z.getNumSamples();
2666 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2667 res.requireWrite();
2668 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2669 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2670 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2671 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2672 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2673 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2674 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2675 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2676 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2677 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2678 }
2679 }
2680
2681 }
2682 else {
2683 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2684 }
2685
2686 } else if (0 == rank1) {
2687 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2688 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2689 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2690 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2691 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2692 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2693 }
2694 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2695
2696 // Prepare the DataConstant input
2697 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2698
2699 // Borrow DataTagged input from Data object
2700 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2701
2702 // Prepare a DataTagged output 2
2703 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2704 res.tag();
2705 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2706
2707 // Prepare offset into DataConstant
2708 int offset_0 = tmp_0->getPointOffset(0,0);
2709 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2710
2711 //Get the pointers to the actual data
2712 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2713 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2714
2715 // Compute a result for the default
2716 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2717 // Compute a result for each tag
2718 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2719 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2720 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2721 tmp_2->addTag(i->first);
2722 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2723 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2724 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2725 }
2726 }
2727 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2728
2729 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2730 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2731 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2732 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2733
2734 int sampleNo_1,dataPointNo_1;
2735 int numSamples_1 = arg_1_Z.getNumSamples();
2736 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2737 int offset_0 = tmp_0->getPointOffset(0,0);
2738 res.requireWrite();
2739 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2740 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2741 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2742 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2743 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2744 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2745 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2746 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2747 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2748 }
2749 }
2750
2751 }
2752 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2753
2754 // Borrow DataTagged input from Data object
2755 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2756
2757 // Prepare the DataConstant input
2758 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2759
2760 // Prepare a DataTagged output 2
2761 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2762 res.tag();
2763 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2764
2765 // Prepare offset into DataConstant
2766 int offset_1 = tmp_1->getPointOffset(0,0);
2767 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2768 // Get the pointers to the actual data
2769 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2770 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2771 // Compute a result for the default
2772 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2773 // Compute a result for each tag
2774 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2775 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2776 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2777 tmp_2->addTag(i->first);
2778 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2779 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2780 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2781 }
2782
2783 }
2784 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2785
2786 // Borrow DataTagged input from Data object
2787 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2788
2789 // Borrow DataTagged input from Data object
2790 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2791
2792 // Prepare a DataTagged output 2
2793 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2794 res.tag(); // DataTagged output
2795 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2796
2797 // Get the pointers to the actual data
2798 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2799 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2800 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2801
2802 // Compute a result for the default
2803 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2804 // Merge the tags
2805 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2806 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2807 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2808 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2809 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2810 }
2811 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2812 tmp_2->addTag(i->first);
2813 }
2814 // Compute a result for each tag
2815 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2816 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2817 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2818 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2819 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2820 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2821 }
2822
2823 }
2824 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2825
2826 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2827 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2828 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2829 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2830 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2831
2832 int sampleNo_0,dataPointNo_0;
2833 int numSamples_0 = arg_0_Z.getNumSamples();
2834 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2835 res.requireWrite();
2836 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2837 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2838 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2839 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2840 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2841 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2842 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2843 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2844 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2845 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2846 }
2847 }
2848
2849 }
2850 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2851 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2852 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2853 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2854 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2855
2856 int sampleNo_0,dataPointNo_0;
2857 int numSamples_0 = arg_0_Z.getNumSamples();
2858 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2859 int offset_1 = tmp_1->getPointOffset(0,0);
2860 res.requireWrite();
2861 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2862 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2863 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2864 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2865 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2866 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2867 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2868 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2869 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2870 }
2871 }
2872
2873
2874 }
2875 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2876
2877 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2878 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2879 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2880 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2881 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2882
2883 int sampleNo_0,dataPointNo_0;
2884 int numSamples_0 = arg_0_Z.getNumSamples();
2885 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2886 res.requireWrite();
2887 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2888 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2889 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2890 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2891 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2892 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2893 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2894 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2895 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2896 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2897 }
2898 }
2899
2900 }
2901 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2902
2903 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2904 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2905 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2906 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2907 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2908
2909 int sampleNo_0,dataPointNo_0;
2910 int numSamples_0 = arg_0_Z.getNumSamples();
2911 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2912 res.requireWrite();
2913 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2914 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2915 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2916 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2917 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2918 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2919 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2920 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2921 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2922 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2923 }
2924 }
2925
2926 }
2927 else {
2928 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2929 }
2930
2931 } else {
2932 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible shapes");
2933 }
2934
2935 return res;
2936 }
2937
2938 template <typename UnaryFunction>
2939 Data
2940 C_TensorUnaryOperation(Data const &arg_0,
2941 UnaryFunction operation)
2942 {
2943 if (arg_0.isEmpty()) // do this before we attempt to interpolate
2944 {
2945 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2946 }
2947 if (arg_0.isLazy())
2948 {
2949 throw DataException("Error - Operations not permitted on lazy data.");
2950 }
2951 // Interpolate if necessary and find an appropriate function space
2952 Data arg_0_Z = Data(arg_0);
2953
2954 // Get rank and shape of inputs
2955 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
2956 int size0 = arg_0_Z.getDataPointSize();
2957
2958 // Declare output Data object
2959 Data res;
2960
2961 if (arg_0_Z.isConstant()) {
2962 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataConstant output
2963 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2964 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2965 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2966 }
2967 else if (arg_0_Z.isTagged()) {
2968
2969 // Borrow DataTagged input from Data object
2970 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2971
2972 // Prepare a DataTagged output 2
2973 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2974 res.tag();
2975 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2976
2977 // Get the pointers to the actual data
2978 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2979 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2980 // Compute a result for the default
2981 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2982 // Compute a result for each tag
2983 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2984 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2985 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2986 tmp_2->addTag(i->first);
2987 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2988 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2989 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2990 }
2991
2992 }
2993 else if (arg_0_Z.isExpanded()) {
2994
2995 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace(),true); // DataExpanded output
2996 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2997 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2998
2999 int sampleNo_0,dataPointNo_0;
3000 int numSamples_0 = arg_0_Z.getNumSamples();
3001 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3002 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3003 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3004 dataPointNo_0=0;
3005 // for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3006 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
3007 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3008 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3009 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3010 tensor_unary_operation(size0*numDataPointsPerSample_0, ptr_0, ptr_2, operation);
3011 // }
3012 }
3013 }
3014 else {
3015 throw DataException("Error - C_TensorUnaryOperation: unknown combination of inputs");
3016 }
3017
3018 return res;
3019 }
3020
3021 }
3022 #endif

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26