/[escript]/trunk/escript/src/Data.h
ViewVC logotype

Contents of /trunk/escript/src/Data.h

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2521 - (show annotations)
Tue Jul 7 00:08:58 2009 UTC (10 years, 2 months ago) by jfenwick
File MIME type: text/plain
File size: 91481 byte(s)
Modified Lazy so that resolving a single sample uses the node cache method.
Fixed some doxygen problems.
1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2008 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 /** \file Data.h */
16
17 #ifndef DATA_H
18 #define DATA_H
19 #include "system_dep.h"
20
21 #include "DataTypes.h"
22 #include "DataAbstract.h"
23 #include "DataAlgorithm.h"
24 #include "FunctionSpace.h"
25 #include "BinaryOp.h"
26 #include "UnaryOp.h"
27 #include "DataException.h"
28
29
30 extern "C" {
31 #include "DataC.h"
32 //#include <omp.h>
33 }
34
35 #include "esysmpi.h"
36 #include <string>
37 #include <algorithm>
38 #include <sstream>
39
40 #include <boost/shared_ptr.hpp>
41 #include <boost/python/object.hpp>
42 #include <boost/python/tuple.hpp>
43
44 #include "BufferGroup.h"
45
46 namespace escript {
47
48 //
49 // Forward declaration for various implementations of Data.
50 class DataConstant;
51 class DataTagged;
52 class DataExpanded;
53 class DataLazy;
54
55 /**
56 \brief
57 Data represents a collection of datapoints.
58
59 Description:
60 Internally, the datapoints are actually stored by a DataAbstract object.
61 The specific instance of DataAbstract used may vary over the lifetime
62 of the Data object.
63 Some methods on this class return references (eg getShape()).
64 These references should not be used after an operation which changes the underlying DataAbstract object.
65 Doing so will lead to invalid memory access.
66 This should not affect any methods exposed via boost::python.
67 */
68 class Data {
69
70 public:
71
72 // These typedefs allow function names to be cast to pointers
73 // to functions of the appropriate type when calling unaryOp etc.
74 typedef double (*UnaryDFunPtr)(double);
75 typedef double (*BinaryDFunPtr)(double,double);
76
77
78 /**
79 Constructors.
80 */
81
82 /**
83 \brief
84 Default constructor.
85 Creates a DataEmpty object.
86 */
87 ESCRIPT_DLL_API
88 Data();
89
90 /**
91 \brief
92 Copy constructor.
93 WARNING: Only performs a shallow copy.
94 */
95 ESCRIPT_DLL_API
96 Data(const Data& inData);
97
98 /**
99 \brief
100 Constructor from another Data object. If "what" is different from the
101 function space of inData the inData are tried to be interpolated to what,
102 otherwise a shallow copy of inData is returned.
103 */
104 ESCRIPT_DLL_API
105 Data(const Data& inData,
106 const FunctionSpace& what);
107
108 /**
109 \brief Copy Data from an existing vector
110 */
111
112 ESCRIPT_DLL_API
113 Data(const DataTypes::ValueType& value,
114 const DataTypes::ShapeType& shape,
115 const FunctionSpace& what=FunctionSpace(),
116 bool expanded=false);
117
118 /**
119 \brief
120 Constructor which creates a Data with points having the specified shape.
121
122 \param value - Input - Single value applied to all Data.
123 \param dataPointShape - Input - The shape of each data point.
124 \param what - Input - A description of what this data represents.
125 \param expanded - Input - Flag, if true fill the entire container with
126 the given value. Otherwise a more efficient storage
127 mechanism will be used.
128 */
129 ESCRIPT_DLL_API
130 Data(double value,
131 const DataTypes::ShapeType& dataPointShape=DataTypes::ShapeType(),
132 const FunctionSpace& what=FunctionSpace(),
133 bool expanded=false);
134
135 /**
136 \brief
137 Constructor which performs a deep copy of a region from another Data object.
138
139 \param inData - Input - Input Data object.
140 \param region - Input - Region to copy.
141 */
142 ESCRIPT_DLL_API
143 Data(const Data& inData,
144 const DataTypes::RegionType& region);
145
146 /**
147 \brief
148 Constructor which copies data from any object that can be treated like a python array/sequence.
149
150 \param value - Input - Input data.
151 \param what - Input - A description of what this data represents.
152 \param expanded - Input - Flag, if true fill the entire container with
153 the value. Otherwise a more efficient storage
154 mechanism will be used.
155 */
156 ESCRIPT_DLL_API
157 Data(const boost::python::object& value,
158 const FunctionSpace& what=FunctionSpace(),
159 bool expanded=false);
160
161 /**
162 \brief
163 Constructor which creates a DataConstant.
164 Copies data from any object that can be treated like a python array/sequence.
165 All other parameters are copied from other.
166
167 \param value - Input - Input data.
168 \param other - Input - contains all other parameters.
169 */
170 ESCRIPT_DLL_API
171 Data(const boost::python::object& value,
172 const Data& other);
173
174 /**
175 \brief
176 Constructor which creates a DataConstant of "shape" with constant value.
177 */
178 ESCRIPT_DLL_API
179 Data(double value,
180 const boost::python::tuple& shape=boost::python::make_tuple(),
181 const FunctionSpace& what=FunctionSpace(),
182 bool expanded=false);
183
184
185
186 /**
187 \brief Create a Data using an existing DataAbstract. Warning: The new object assumes ownership of the pointer!
188 Once you have passed the pointer, do not delete it.
189 */
190 ESCRIPT_DLL_API
191 explicit Data(DataAbstract* underlyingdata);
192
193 /**
194 \brief Create a Data based on the supplied DataAbstract
195 */
196 ESCRIPT_DLL_API
197 explicit Data(DataAbstract_ptr underlyingdata);
198
199 /**
200 \brief
201 Destructor
202 */
203 ESCRIPT_DLL_API
204 ~Data();
205
206 /**
207 \brief Make this object a deep copy of "other".
208 */
209 ESCRIPT_DLL_API
210 void
211 copy(const Data& other);
212
213 /**
214 \brief Return a pointer to a deep copy of this object.
215 */
216 ESCRIPT_DLL_API
217 Data
218 copySelf();
219
220
221 /**
222 \brief produce a delayed evaluation version of this Data.
223 */
224 ESCRIPT_DLL_API
225 Data
226 delay();
227
228 /**
229 \brief convert the current data into lazy data.
230 */
231 ESCRIPT_DLL_API
232 void
233 delaySelf();
234
235
236 /**
237 Member access methods.
238 */
239
240 /**
241 \brief
242 switches on update protection
243
244 */
245 ESCRIPT_DLL_API
246 void
247 setProtection();
248
249 /**
250 \brief
251 Returns true, if the data object is protected against update
252
253 */
254 ESCRIPT_DLL_API
255 bool
256 isProtected() const;
257
258
259 /**
260 \brief
261 Return the value of a data point as a python tuple.
262 */
263 ESCRIPT_DLL_API
264 const boost::python::object
265 getValueOfDataPointAsTuple(int dataPointNo);
266
267 /**
268 \brief
269 sets the values of a data-point from a python object on this process
270 */
271 ESCRIPT_DLL_API
272 void
273 setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object);
274
275 /**
276 \brief
277 sets the values of a data-point from a array-like object on this process
278 */
279 ESCRIPT_DLL_API
280 void
281 setValueOfDataPointToArray(int dataPointNo, const boost::python::object&);
282
283 /**
284 \brief
285 sets the values of a data-point on this process
286 */
287 ESCRIPT_DLL_API
288 void
289 setValueOfDataPoint(int dataPointNo, const double);
290
291 /**
292 \brief Return a data point across all processors as a python tuple.
293 */
294 ESCRIPT_DLL_API
295 const boost::python::object
296 getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo);
297
298 /**
299 \brief
300 Return the tag number associated with the given data-point.
301
302 */
303 ESCRIPT_DLL_API
304 int
305 getTagNumber(int dpno);
306
307 /**
308 \brief
309 Return the C wrapper for the Data object.
310 */
311 ESCRIPT_DLL_API
312 escriptDataC
313 getDataC();
314
315
316
317 /**
318 \brief
319 Return the C wrapper for the Data object - const version.
320 */
321 ESCRIPT_DLL_API
322 escriptDataC
323 getDataC() const;
324
325 /**
326 \brief How much space is required to evaulate a sample of the Data.
327 */
328 ESCRIPT_DLL_API
329 size_t
330 getSampleBufferSize() const;
331
332
333
334 /**
335 \brief
336 Write the data as a string. For large amounts of data, a summary is printed.
337 */
338 ESCRIPT_DLL_API
339 std::string
340 toString() const;
341
342 /**
343 \brief
344 Whatever the current Data type make this into a DataExpanded.
345 */
346 ESCRIPT_DLL_API
347 void
348 expand();
349
350 /**
351 \brief
352 If possible convert this Data to DataTagged. This will only allow
353 Constant data to be converted to tagged. An attempt to convert
354 Expanded data to tagged will throw an exception.
355 */
356 ESCRIPT_DLL_API
357 void
358 tag();
359
360 /**
361 \brief If this data is lazy, then convert it to ready data.
362 What type of ready data depends on the expression. For example, Constant+Tagged==Tagged.
363 */
364 ESCRIPT_DLL_API
365 void
366 resolve();
367
368
369 /**
370 \brief Ensures data is ready for write access.
371 This means that the data will be resolved if lazy and will be copied if shared with another Data object.
372 \warning This method should only be called in single threaded sections of code. (It modifies m_data).
373 Do not create any Data objects from this one between calling requireWrite and getSampleDataRW.
374 Doing so might introduce additional sharing.
375 */
376 ESCRIPT_DLL_API
377 void
378 requireWrite();
379
380 /**
381 \brief
382 Return true if this Data is expanded.
383 \note To determine if a sample will contain separate values for each datapoint. Use actsExpanded instead.
384 */
385 ESCRIPT_DLL_API
386 bool
387 isExpanded() const;
388
389 /**
390 \brief
391 Return true if this Data is expanded or resolves to expanded.
392 That is, if it has a separate value for each datapoint in the sample.
393 */
394 ESCRIPT_DLL_API
395 bool
396 actsExpanded() const;
397
398
399 /**
400 \brief
401 Return true if this Data is tagged.
402 */
403 ESCRIPT_DLL_API
404 bool
405 isTagged() const;
406
407 /**
408 \brief
409 Return true if this Data is constant.
410 */
411 ESCRIPT_DLL_API
412 bool
413 isConstant() const;
414
415 /**
416 \brief Return true if this Data is lazy.
417 */
418 ESCRIPT_DLL_API
419 bool
420 isLazy() const;
421
422 /**
423 \brief Return true if this data is ready.
424 */
425 ESCRIPT_DLL_API
426 bool
427 isReady() const;
428
429 /**
430 \brief
431 Return true if this Data holds an instance of DataEmpty. This is _not_ the same as asking if the object
432 contains datapoints.
433 */
434 ESCRIPT_DLL_API
435 bool
436 isEmpty() const;
437
438 /**
439 \brief
440 Return the function space.
441 */
442 ESCRIPT_DLL_API
443 inline
444 const FunctionSpace&
445 getFunctionSpace() const
446 {
447 return m_data->getFunctionSpace();
448 }
449
450 /**
451 \brief
452 Return a copy of the function space.
453 */
454 ESCRIPT_DLL_API
455 const FunctionSpace
456 getCopyOfFunctionSpace() const;
457
458 /**
459 \brief
460 Return the domain.
461 */
462 ESCRIPT_DLL_API
463 inline
464 // const AbstractDomain&
465 const_Domain_ptr
466 getDomain() const
467 {
468 return getFunctionSpace().getDomain();
469 }
470
471
472 /**
473 \brief
474 Return the domain.
475 TODO: For internal use only. This should be removed.
476 */
477 ESCRIPT_DLL_API
478 inline
479 // const AbstractDomain&
480 Domain_ptr
481 getDomainPython() const
482 {
483 return getFunctionSpace().getDomainPython();
484 }
485
486 /**
487 \brief
488 Return a copy of the domain.
489 */
490 ESCRIPT_DLL_API
491 const AbstractDomain
492 getCopyOfDomain() const;
493
494 /**
495 \brief
496 Return the rank of the point data.
497 */
498 ESCRIPT_DLL_API
499 inline
500 unsigned int
501 getDataPointRank() const
502 {
503 return m_data->getRank();
504 }
505
506 /**
507 \brief
508 Return the number of data points
509 */
510 ESCRIPT_DLL_API
511 inline
512 int
513 getNumDataPoints() const
514 {
515 return getNumSamples() * getNumDataPointsPerSample();
516 }
517 /**
518 \brief
519 Return the number of samples.
520 */
521 ESCRIPT_DLL_API
522 inline
523 int
524 getNumSamples() const
525 {
526 return m_data->getNumSamples();
527 }
528
529 /**
530 \brief
531 Return the number of data points per sample.
532 */
533 ESCRIPT_DLL_API
534 inline
535 int
536 getNumDataPointsPerSample() const
537 {
538 return m_data->getNumDPPSample();
539 }
540
541
542 /**
543 \brief
544 Return the number of values in the shape for this object.
545 */
546 ESCRIPT_DLL_API
547 int
548 getNoValues() const
549 {
550 return m_data->getNoValues();
551 }
552
553
554 /**
555 \brief
556 dumps the object into a netCDF file
557 */
558 ESCRIPT_DLL_API
559 void
560 dump(const std::string fileName) const;
561
562 /**
563 \brief returns the values of the object as a list of tuples (one for each datapoint).
564
565 \param scalarastuple If true, scalar data will produce single valued tuples [(1,) (2,) ...]
566 If false, the result is a list of scalars [1, 2, ...]
567 */
568 ESCRIPT_DLL_API
569 const boost::python::object
570 toListOfTuples(bool scalarastuple=true);
571
572
573 /**
574 \brief
575 Return the sample data for the given sample no. This is not the
576 preferred interface but is provided for use by C code.
577 The bufferg parameter is only required for LazyData.
578 \param sampleNo - Input - the given sample no.
579 \param bufferg - A buffer to compute (and store) sample data in will be selected from this group.
580 \return pointer to the sample data.
581 */
582 ESCRIPT_DLL_API
583 inline
584 const DataAbstract::ValueType::value_type*
585 getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, BufferGroup* bufferg=0);
586
587
588 /**
589 \brief
590 Return the sample data for the given sample no. This is not the
591 preferred interface but is provided for use by C code.
592 \param sampleNo - Input - the given sample no.
593 \return pointer to the sample data.
594 */
595 ESCRIPT_DLL_API
596 inline
597 DataAbstract::ValueType::value_type*
598 getSampleDataRW(DataAbstract::ValueType::size_type sampleNo);
599
600
601 /**
602 \brief
603 Return the sample data for the given tag. If an attempt is made to
604 access data that isn't tagged an exception will be thrown.
605 \param tag - Input - the tag key.
606 */
607 ESCRIPT_DLL_API
608 inline
609 DataAbstract::ValueType::value_type*
610 getSampleDataByTag(int tag)
611 {
612 return m_data->getSampleDataByTag(tag);
613 }
614
615 /**
616 \brief
617 Return a reference into the DataVector which points to the specified data point.
618 \param sampleNo - Input -
619 \param dataPointNo - Input -
620 */
621 ESCRIPT_DLL_API
622 DataTypes::ValueType::const_reference
623 getDataPointRO(int sampleNo, int dataPointNo);
624
625 /**
626 \brief
627 Return a reference into the DataVector which points to the specified data point.
628 \param sampleNo - Input -
629 \param dataPointNo - Input -
630 */
631 ESCRIPT_DLL_API
632 DataTypes::ValueType::reference
633 getDataPointRW(int sampleNo, int dataPointNo);
634
635
636
637 /**
638 \brief
639 Return the offset for the given sample and point within the sample
640 */
641 ESCRIPT_DLL_API
642 inline
643 DataTypes::ValueType::size_type
644 getDataOffset(int sampleNo,
645 int dataPointNo)
646 {
647 return m_data->getPointOffset(sampleNo,dataPointNo);
648 }
649
650 /**
651 \brief
652 Return a reference to the data point shape.
653 */
654 ESCRIPT_DLL_API
655 inline
656 const DataTypes::ShapeType&
657 getDataPointShape() const
658 {
659 return m_data->getShape();
660 }
661
662 /**
663 \brief
664 Return the data point shape as a tuple of integers.
665 */
666 ESCRIPT_DLL_API
667 const boost::python::tuple
668 getShapeTuple() const;
669
670 /**
671 \brief
672 Return the size of the data point. It is the product of the
673 data point shape dimensions.
674 */
675 ESCRIPT_DLL_API
676 int
677 getDataPointSize() const;
678
679 /**
680 \brief
681 Return the number of doubles stored for this Data.
682 */
683 ESCRIPT_DLL_API
684 DataTypes::ValueType::size_type
685 getLength() const;
686
687
688
689 /**
690 \brief
691 Assign the given value to the tag assocciated with name. Implicitly converts this
692 object to type DataTagged. Throws an exception if this object
693 cannot be converted to a DataTagged object or name cannot be mapped onto a tag key.
694 \param name - Input - name of tag.
695 \param value - Input - Value to associate with given key.
696 */
697 ESCRIPT_DLL_API
698 void
699 setTaggedValueByName(std::string name,
700 const boost::python::object& value);
701
702 /**
703 \brief
704 Assign the given value to the tag. Implicitly converts this
705 object to type DataTagged if it is constant.
706
707 \param tagKey - Input - Integer key.
708 \param value - Input - Value to associate with given key.
709 ==>*
710 */
711 ESCRIPT_DLL_API
712 void
713 setTaggedValue(int tagKey,
714 const boost::python::object& value);
715
716 /**
717 \brief
718 Assign the given value to the tag. Implicitly converts this
719 object to type DataTagged if it is constant.
720
721 \param tagKey - Input - Integer key.
722 \param pointshape - Input - The shape of the value parameter
723 \param value - Input - Value to associate with given key.
724 \param dataOffset - Input - Offset of the begining of the point within the value parameter
725 */
726 ESCRIPT_DLL_API
727 void
728 setTaggedValueFromCPP(int tagKey,
729 const DataTypes::ShapeType& pointshape,
730 const DataTypes::ValueType& value,
731 int dataOffset=0);
732
733
734
735 /**
736 \brief
737 Copy other Data object into this Data object where mask is positive.
738 */
739 ESCRIPT_DLL_API
740 void
741 copyWithMask(const Data& other,
742 const Data& mask);
743
744 /**
745 Data object operation methods and operators.
746 */
747
748 /**
749 \brief
750 set all values to zero
751 *
752 */
753 ESCRIPT_DLL_API
754 void
755 setToZero();
756
757 /**
758 \brief
759 Interpolates this onto the given functionspace and returns
760 the result as a Data object.
761 *
762 */
763 ESCRIPT_DLL_API
764 Data
765 interpolate(const FunctionSpace& functionspace) const;
766 /**
767 \brief
768 Calculates the gradient of the data at the data points of functionspace.
769 If functionspace is not present the function space of Function(getDomain()) is used.
770 *
771 */
772 ESCRIPT_DLL_API
773 Data
774 gradOn(const FunctionSpace& functionspace) const;
775
776 ESCRIPT_DLL_API
777 Data
778 grad() const;
779
780 /**
781 \brief
782 Calculate the integral over the function space domain as a python tuple.
783 */
784 ESCRIPT_DLL_API
785 boost::python::object
786 integrateToTuple_const() const;
787
788
789 /**
790 \brief
791 Calculate the integral over the function space domain as a python tuple.
792 */
793 ESCRIPT_DLL_API
794 boost::python::object
795 integrateToTuple();
796
797
798
799 /**
800 \brief
801 Returns 1./ Data object
802 *
803 */
804 ESCRIPT_DLL_API
805 Data
806 oneOver() const;
807 /**
808 \brief
809 Return a Data with a 1 for +ive values and a 0 for 0 or -ive values.
810 *
811 */
812 ESCRIPT_DLL_API
813 Data
814 wherePositive() const;
815
816 /**
817 \brief
818 Return a Data with a 1 for -ive values and a 0 for +ive or 0 values.
819 *
820 */
821 ESCRIPT_DLL_API
822 Data
823 whereNegative() const;
824
825 /**
826 \brief
827 Return a Data with a 1 for +ive or 0 values and a 0 for -ive values.
828 *
829 */
830 ESCRIPT_DLL_API
831 Data
832 whereNonNegative() const;
833
834 /**
835 \brief
836 Return a Data with a 1 for -ive or 0 values and a 0 for +ive values.
837 *
838 */
839 ESCRIPT_DLL_API
840 Data
841 whereNonPositive() const;
842
843 /**
844 \brief
845 Return a Data with a 1 for 0 values and a 0 for +ive or -ive values.
846 *
847 */
848 ESCRIPT_DLL_API
849 Data
850 whereZero(double tol=0.0) const;
851
852 /**
853 \brief
854 Return a Data with a 0 for 0 values and a 1 for +ive or -ive values.
855 *
856 */
857 ESCRIPT_DLL_API
858 Data
859 whereNonZero(double tol=0.0) const;
860
861 /**
862 \brief
863 Return the maximum absolute value of this Data object.
864
865 The method is not const because lazy data needs to be expanded before Lsup can be computed.
866 The _const form can be used when the Data object is const, however this will only work for
867 Data which is not Lazy.
868
869 For Data which contain no samples (or tagged Data for which no tags in use have a value)
870 zero is returned.
871 */
872 ESCRIPT_DLL_API
873 double
874 Lsup();
875
876 ESCRIPT_DLL_API
877 double
878 Lsup_const() const;
879
880
881 /**
882 \brief
883 Return the maximum value of this Data object.
884
885 The method is not const because lazy data needs to be expanded before sup can be computed.
886 The _const form can be used when the Data object is const, however this will only work for
887 Data which is not Lazy.
888
889 For Data which contain no samples (or tagged Data for which no tags in use have a value)
890 a large negative value is returned.
891 */
892 ESCRIPT_DLL_API
893 double
894 sup();
895
896 ESCRIPT_DLL_API
897 double
898 sup_const() const;
899
900
901 /**
902 \brief
903 Return the minimum value of this Data object.
904
905 The method is not const because lazy data needs to be expanded before inf can be computed.
906 The _const form can be used when the Data object is const, however this will only work for
907 Data which is not Lazy.
908
909 For Data which contain no samples (or tagged Data for which no tags in use have a value)
910 a large positive value is returned.
911 */
912 ESCRIPT_DLL_API
913 double
914 inf();
915
916 ESCRIPT_DLL_API
917 double
918 inf_const() const;
919
920
921
922 /**
923 \brief
924 Return the absolute value of each data point of this Data object.
925 *
926 */
927 ESCRIPT_DLL_API
928 Data
929 abs() const;
930
931 /**
932 \brief
933 Return the maximum value of each data point of this Data object.
934 *
935 */
936 ESCRIPT_DLL_API
937 Data
938 maxval() const;
939
940 /**
941 \brief
942 Return the minimum value of each data point of this Data object.
943 *
944 */
945 ESCRIPT_DLL_API
946 Data
947 minval() const;
948
949 /**
950 \brief
951 Return the (sample number, data-point number) of the data point with
952 the minimum component value in this Data object.
953 \note If you are working in python, please consider using Locator
954 instead of manually manipulating process and point IDs.
955 */
956 ESCRIPT_DLL_API
957 const boost::python::tuple
958 minGlobalDataPoint() const;
959
960 /**
961 \brief
962 Return the (sample number, data-point number) of the data point with
963 the minimum component value in this Data object.
964 \note If you are working in python, please consider using Locator
965 instead of manually manipulating process and point IDs.
966 */
967 ESCRIPT_DLL_API
968 const boost::python::tuple
969 maxGlobalDataPoint() const;
970
971
972
973 /**
974 \brief
975 Return the sign of each data point of this Data object.
976 -1 for negative values, zero for zero values, 1 for positive values.
977 *
978 */
979 ESCRIPT_DLL_API
980 Data
981 sign() const;
982
983 /**
984 \brief
985 Return the symmetric part of a matrix which is half the matrix plus its transpose.
986 *
987 */
988 ESCRIPT_DLL_API
989 Data
990 symmetric() const;
991
992 /**
993 \brief
994 Return the nonsymmetric part of a matrix which is half the matrix minus its transpose.
995 *
996 */
997 ESCRIPT_DLL_API
998 Data
999 nonsymmetric() const;
1000
1001 /**
1002 \brief
1003 Return the trace of a matrix
1004 *
1005 */
1006 ESCRIPT_DLL_API
1007 Data
1008 trace(int axis_offset) const;
1009
1010 /**
1011 \brief
1012 Transpose each data point of this Data object around the given axis.
1013 *
1014 */
1015 ESCRIPT_DLL_API
1016 Data
1017 transpose(int axis_offset) const;
1018
1019 /**
1020 \brief
1021 Return the eigenvalues of the symmetric part at each data point of this Data object in increasing values.
1022 Currently this function is restricted to rank 2, square shape, and dimension 3.
1023 *
1024 */
1025 ESCRIPT_DLL_API
1026 Data
1027 eigenvalues() const;
1028
1029 /**
1030 \brief
1031 Return the eigenvalues and corresponding eigenvcetors of the symmetric part at each data point of this Data object.
1032 the eigenvalues are ordered in increasing size where eigenvalues with relative difference less than
1033 tol are treated as equal. The eigenvectors are orthogonal, normalized and the sclaed such that the
1034 first non-zero entry is positive.
1035 Currently this function is restricted to rank 2, square shape, and dimension 3
1036 *
1037 */
1038 ESCRIPT_DLL_API
1039 const boost::python::tuple
1040 eigenvalues_and_eigenvectors(const double tol=1.e-12) const;
1041
1042 /**
1043 \brief
1044 swaps the components axis0 and axis1
1045 *
1046 */
1047 ESCRIPT_DLL_API
1048 Data
1049 swapaxes(const int axis0, const int axis1) const;
1050
1051 /**
1052 \brief
1053 Return the error function erf of each data point of this Data object.
1054 *
1055 */
1056 ESCRIPT_DLL_API
1057 Data
1058 erf() const;
1059
1060 /**
1061 \brief
1062 Return the sin of each data point of this Data object.
1063 *
1064 */
1065 ESCRIPT_DLL_API
1066 Data
1067 sin() const;
1068
1069 /**
1070 \brief
1071 Return the cos of each data point of this Data object.
1072 *
1073 */
1074 ESCRIPT_DLL_API
1075 Data
1076 cos() const;
1077
1078 /**
1079 \brief
1080 Return the tan of each data point of this Data object.
1081 *
1082 */
1083 ESCRIPT_DLL_API
1084 Data
1085 tan() const;
1086
1087 /**
1088 \brief
1089 Return the asin of each data point of this Data object.
1090 *
1091 */
1092 ESCRIPT_DLL_API
1093 Data
1094 asin() const;
1095
1096 /**
1097 \brief
1098 Return the acos of each data point of this Data object.
1099 *
1100 */
1101 ESCRIPT_DLL_API
1102 Data
1103 acos() const;
1104
1105 /**
1106 \brief
1107 Return the atan of each data point of this Data object.
1108 *
1109 */
1110 ESCRIPT_DLL_API
1111 Data
1112 atan() const;
1113
1114 /**
1115 \brief
1116 Return the sinh of each data point of this Data object.
1117 *
1118 */
1119 ESCRIPT_DLL_API
1120 Data
1121 sinh() const;
1122
1123 /**
1124 \brief
1125 Return the cosh of each data point of this Data object.
1126 *
1127 */
1128 ESCRIPT_DLL_API
1129 Data
1130 cosh() const;
1131
1132 /**
1133 \brief
1134 Return the tanh of each data point of this Data object.
1135 *
1136 */
1137 ESCRIPT_DLL_API
1138 Data
1139 tanh() const;
1140
1141 /**
1142 \brief
1143 Return the asinh of each data point of this Data object.
1144 *
1145 */
1146 ESCRIPT_DLL_API
1147 Data
1148 asinh() const;
1149
1150 /**
1151 \brief
1152 Return the acosh of each data point of this Data object.
1153 *
1154 */
1155 ESCRIPT_DLL_API
1156 Data
1157 acosh() const;
1158
1159 /**
1160 \brief
1161 Return the atanh of each data point of this Data object.
1162 *
1163 */
1164 ESCRIPT_DLL_API
1165 Data
1166 atanh() const;
1167
1168 /**
1169 \brief
1170 Return the log to base 10 of each data point of this Data object.
1171 *
1172 */
1173 ESCRIPT_DLL_API
1174 Data
1175 log10() const;
1176
1177 /**
1178 \brief
1179 Return the natural log of each data point of this Data object.
1180 *
1181 */
1182 ESCRIPT_DLL_API
1183 Data
1184 log() const;
1185
1186 /**
1187 \brief
1188 Return the exponential function of each data point of this Data object.
1189 *
1190 */
1191 ESCRIPT_DLL_API
1192 Data
1193 exp() const;
1194
1195 /**
1196 \brief
1197 Return the square root of each data point of this Data object.
1198 *
1199 */
1200 ESCRIPT_DLL_API
1201 Data
1202 sqrt() const;
1203
1204 /**
1205 \brief
1206 Return the negation of each data point of this Data object.
1207 *
1208 */
1209 ESCRIPT_DLL_API
1210 Data
1211 neg() const;
1212
1213 /**
1214 \brief
1215 Return the identity of each data point of this Data object.
1216 Simply returns this object unmodified.
1217 *
1218 */
1219 ESCRIPT_DLL_API
1220 Data
1221 pos() const;
1222
1223 /**
1224 \brief
1225 Return the given power of each data point of this Data object.
1226
1227 \param right Input - the power to raise the object to.
1228 *
1229 */
1230 ESCRIPT_DLL_API
1231 Data
1232 powD(const Data& right) const;
1233
1234 /**
1235 \brief
1236 Return the given power of each data point of this boost python object.
1237
1238 \param right Input - the power to raise the object to.
1239 *
1240 */
1241 ESCRIPT_DLL_API
1242 Data
1243 powO(const boost::python::object& right) const;
1244
1245 /**
1246 \brief
1247 Return the given power of each data point of this boost python object.
1248
1249 \param left Input - the bases
1250 *
1251 */
1252
1253 ESCRIPT_DLL_API
1254 Data
1255 rpowO(const boost::python::object& left) const;
1256
1257 /**
1258 \brief
1259 writes the object to a file in the DX file format
1260 */
1261 ESCRIPT_DLL_API
1262 void
1263 saveDX(std::string fileName) const;
1264
1265 /**
1266 \brief
1267 writes the object to a file in the VTK file format
1268 */
1269 ESCRIPT_DLL_API
1270 void
1271 saveVTK(std::string fileName) const;
1272
1273 /**
1274 \brief
1275 Overloaded operator +=
1276 \param right - Input - The right hand side.
1277 *
1278 */
1279 ESCRIPT_DLL_API
1280 Data& operator+=(const Data& right);
1281 ESCRIPT_DLL_API
1282 Data& operator+=(const boost::python::object& right);
1283
1284 ESCRIPT_DLL_API
1285 Data& operator=(const Data& other);
1286
1287 /**
1288 \brief
1289 Overloaded operator -=
1290 \param right - Input - The right hand side.
1291 *
1292 */
1293 ESCRIPT_DLL_API
1294 Data& operator-=(const Data& right);
1295 ESCRIPT_DLL_API
1296 Data& operator-=(const boost::python::object& right);
1297
1298 /**
1299 \brief
1300 Overloaded operator *=
1301 \param right - Input - The right hand side.
1302 *
1303 */
1304 ESCRIPT_DLL_API
1305 Data& operator*=(const Data& right);
1306 ESCRIPT_DLL_API
1307 Data& operator*=(const boost::python::object& right);
1308
1309 /**
1310 \brief
1311 Overloaded operator /=
1312 \param right - Input - The right hand side.
1313 *
1314 */
1315 ESCRIPT_DLL_API
1316 Data& operator/=(const Data& right);
1317 ESCRIPT_DLL_API
1318 Data& operator/=(const boost::python::object& right);
1319
1320 /**
1321 \brief
1322 Returns true if this can be interpolated to functionspace.
1323 */
1324 ESCRIPT_DLL_API
1325 bool
1326 probeInterpolation(const FunctionSpace& functionspace) const;
1327
1328 /**
1329 Data object slicing methods.
1330 */
1331
1332 /**
1333 \brief
1334 Returns a slice from this Data object.
1335
1336 /description
1337 Implements the [] get operator in python.
1338 Calls getSlice.
1339
1340 \param key - Input - python slice tuple specifying
1341 slice to return.
1342 */
1343 ESCRIPT_DLL_API
1344 Data
1345 getItem(const boost::python::object& key) const;
1346
1347 /**
1348 \brief
1349 Copies slice from value into this Data object.
1350
1351 Implements the [] set operator in python.
1352 Calls setSlice.
1353
1354 \param key - Input - python slice tuple specifying
1355 slice to copy from value.
1356 \param value - Input - Data object to copy from.
1357 */
1358 ESCRIPT_DLL_API
1359 void
1360 setItemD(const boost::python::object& key,
1361 const Data& value);
1362
1363 ESCRIPT_DLL_API
1364 void
1365 setItemO(const boost::python::object& key,
1366 const boost::python::object& value);
1367
1368 // These following public methods should be treated as private.
1369
1370 /**
1371 \brief
1372 Perform the given unary operation on every element of every data point in
1373 this Data object.
1374 */
1375 template <class UnaryFunction>
1376 ESCRIPT_DLL_API
1377 inline
1378 void
1379 unaryOp2(UnaryFunction operation);
1380
1381 /**
1382 \brief
1383 Return a Data object containing the specified slice of
1384 this Data object.
1385 \param region - Input - Region to copy.
1386 *
1387 */
1388 ESCRIPT_DLL_API
1389 Data
1390 getSlice(const DataTypes::RegionType& region) const;
1391
1392 /**
1393 \brief
1394 Copy the specified slice from the given value into this
1395 Data object.
1396 \param value - Input - Data to copy from.
1397 \param region - Input - Region to copy.
1398 *
1399 */
1400 ESCRIPT_DLL_API
1401 void
1402 setSlice(const Data& value,
1403 const DataTypes::RegionType& region);
1404
1405 /**
1406 \brief
1407 print the data values to stdout. Used for debugging
1408 */
1409 ESCRIPT_DLL_API
1410 void
1411 print(void);
1412
1413 /**
1414 \brief
1415 return the MPI rank number of the local data
1416 MPI_COMM_WORLD is assumed and the result of MPI_Comm_size()
1417 is returned
1418 */
1419 ESCRIPT_DLL_API
1420 int
1421 get_MPIRank(void) const;
1422
1423 /**
1424 \brief
1425 return the MPI rank number of the local data
1426 MPI_COMM_WORLD is assumed and the result of MPI_Comm_rank()
1427 is returned
1428 */
1429 ESCRIPT_DLL_API
1430 int
1431 get_MPISize(void) const;
1432
1433 /**
1434 \brief
1435 return the MPI rank number of the local data
1436 MPI_COMM_WORLD is assumed and returned.
1437 */
1438 ESCRIPT_DLL_API
1439 MPI_Comm
1440 get_MPIComm(void) const;
1441
1442 /**
1443 \brief
1444 return the object produced by the factory, which is a DataConstant or DataExpanded
1445 TODO Ownership of this object should be explained in doco.
1446 */
1447 ESCRIPT_DLL_API
1448 DataAbstract*
1449 borrowData(void) const;
1450
1451 ESCRIPT_DLL_API
1452 DataAbstract_ptr
1453 borrowDataPtr(void) const;
1454
1455 ESCRIPT_DLL_API
1456 DataReady_ptr
1457 borrowReadyPtr(void) const;
1458
1459
1460
1461 /**
1462 \brief
1463 Return a pointer to the beginning of the datapoint at the specified offset.
1464 TODO Eventually these should be inlined.
1465 \param i - position(offset) in the underlying datastructure
1466 */
1467
1468 ESCRIPT_DLL_API
1469 DataTypes::ValueType::const_reference
1470 getDataAtOffsetRO(DataTypes::ValueType::size_type i);
1471
1472
1473 ESCRIPT_DLL_API
1474 DataTypes::ValueType::reference
1475 getDataAtOffsetRW(DataTypes::ValueType::size_type i);
1476
1477
1478
1479 /**
1480 \brief Create a buffer for use by getSample
1481 Allocates a DataVector large enough for DataLazy::resolveSample to operate on for the current Data.
1482 Do not use this buffer for other Data instances (unless you are sure they will be the same size).
1483
1484 In multi-threaded sections, this needs to be called on each thread.
1485
1486 \return A BufferGroup* if Data is lazy, NULL otherwise.
1487 \warning This pointer must be deallocated using freeSampleBuffer to avoid cross library memory issues.
1488 */
1489 ESCRIPT_DLL_API
1490 BufferGroup*
1491 allocSampleBuffer() const;
1492
1493 /**
1494 \brief Free a buffer allocated with allocSampleBuffer.
1495 \param buffer Input - pointer to the buffer to deallocate.
1496 */
1497 ESCRIPT_DLL_API void freeSampleBuffer(BufferGroup* buffer);
1498
1499 protected:
1500
1501 private:
1502
1503 double
1504 LsupWorker() const;
1505
1506 double
1507 supWorker() const;
1508
1509 double
1510 infWorker() const;
1511
1512 boost::python::object
1513 integrateWorker() const;
1514
1515 void
1516 calc_minGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1517
1518 void
1519 calc_maxGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1520
1521
1522 /**
1523 \brief
1524 Check *this and the right operand are compatible. Throws
1525 an exception if they aren't.
1526 \param right - Input - The right hand side.
1527 */
1528 inline
1529 void
1530 operandCheck(const Data& right) const
1531 {
1532 return m_data->operandCheck(*(right.m_data.get()));
1533 }
1534
1535 /**
1536 \brief
1537 Perform the specified reduction algorithm on every element of every data point in
1538 this Data object according to the given function and return the single value result.
1539 */
1540 template <class BinaryFunction>
1541 inline
1542 double
1543 algorithm(BinaryFunction operation,
1544 double initial_value) const;
1545
1546 /**
1547 \brief
1548 Reduce each data-point in this Data object using the given operation. Return a Data
1549 object with the same number of data-points, but with each data-point containing only
1550 one value - the result of the reduction operation on the corresponding data-point in
1551 this Data object
1552 */
1553 template <class BinaryFunction>
1554 inline
1555 Data
1556 dp_algorithm(BinaryFunction operation,
1557 double initial_value) const;
1558
1559 /**
1560 \brief
1561 Perform the given binary operation on all of the data's elements.
1562 The underlying type of the right hand side (right) determines the final
1563 type of *this after the operation. For example if the right hand side
1564 is expanded *this will be expanded if necessary.
1565 RHS is a Data object.
1566 */
1567 template <class BinaryFunction>
1568 inline
1569 void
1570 binaryOp(const Data& right,
1571 BinaryFunction operation);
1572
1573 /**
1574 \brief
1575 Convert the data type of the RHS to match this.
1576 \param right - Input - data type to match.
1577 */
1578 void
1579 typeMatchLeft(Data& right) const;
1580
1581 /**
1582 \brief
1583 Convert the data type of this to match the RHS.
1584 \param right - Input - data type to match.
1585 */
1586 void
1587 typeMatchRight(const Data& right);
1588
1589 /**
1590 \brief
1591 Construct a Data object of the appropriate type.
1592 */
1593
1594 void
1595 initialise(const DataTypes::ValueType& value,
1596 const DataTypes::ShapeType& shape,
1597 const FunctionSpace& what,
1598 bool expanded);
1599
1600 void
1601 initialise(const WrappedArray& value,
1602 const FunctionSpace& what,
1603 bool expanded);
1604
1605 //
1606 // flag to protect the data object against any update
1607 bool m_protected;
1608 mutable bool m_shared;
1609 bool m_lazy;
1610
1611 //
1612 // pointer to the actual data object
1613 // boost::shared_ptr<DataAbstract> m_data;
1614 DataAbstract_ptr m_data;
1615
1616 // If possible please use getReadyPtr instead.
1617 // But see warning below.
1618 const DataReady*
1619 getReady() const;
1620
1621 DataReady*
1622 getReady();
1623
1624
1625 // Be wary of using this for local operations since it (temporarily) increases reference count.
1626 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1627 // getReady() instead
1628 DataReady_ptr
1629 getReadyPtr();
1630
1631 const_DataReady_ptr
1632 getReadyPtr() const;
1633
1634
1635 /**
1636 \brief Update the Data's shared flag
1637 This indicates that the DataAbstract used by this object is now shared (or no longer shared).
1638 For internal use only.
1639 */
1640 void updateShareStatus(bool nowshared) const
1641 {
1642 m_shared=nowshared; // m_shared is mutable
1643 }
1644
1645 // In the isShared() method below:
1646 // A problem would occur if m_data (the address pointed to) were being modified
1647 // while the call m_data->is_shared is being executed.
1648 //
1649 // Q: So why do I think this code can be thread safe/correct?
1650 // A: We need to make some assumptions.
1651 // 1. We assume it is acceptable to return true under some conditions when we aren't shared.
1652 // 2. We assume that no constructions or assignments which will share previously unshared
1653 // will occur while this call is executing. This is consistent with the way Data:: and C are written.
1654 //
1655 // This means that the only transition we need to consider, is when a previously shared object is
1656 // not shared anymore. ie. the other objects have been destroyed or a deep copy has been made.
1657 // In those cases the m_shared flag changes to false after m_data has completed changing.
1658 // For any threads executing before the flag switches they will assume the object is still shared.
1659 bool isShared() const
1660 {
1661 return m_shared;
1662 /* if (m_shared) return true;
1663 if (m_data->isShared())
1664 {
1665 updateShareStatus(true);
1666 return true;
1667 }
1668 return false;*/
1669 }
1670
1671 void forceResolve()
1672 {
1673 if (isLazy())
1674 {
1675 #ifdef _OPENMP
1676 if (omp_in_parallel())
1677 { // Yes this is throwing an exception out of an omp thread which is forbidden.
1678 throw DataException("Please do not call forceResolve() in a parallel region.");
1679 }
1680 #endif
1681 resolve();
1682 }
1683 }
1684
1685 /**
1686 \brief if another object is sharing out member data make a copy to work with instead.
1687 This code should only be called from single threaded sections of code.
1688 */
1689 void exclusiveWrite()
1690 {
1691 #ifdef _OPENMP
1692 if (omp_in_parallel())
1693 {
1694 // *((int*)0)=17;
1695 throw DataException("Programming error. Please do not run exclusiveWrite() in multi-threaded sections.");
1696 }
1697 #endif
1698 forceResolve();
1699 if (isShared())
1700 {
1701 DataAbstract* t=m_data->deepCopy();
1702 set_m_data(DataAbstract_ptr(t));
1703 }
1704 }
1705
1706 /**
1707 \brief checks if caller can have exclusive write to the object
1708 */
1709 void checkExclusiveWrite()
1710 {
1711 if (isLazy() || isShared())
1712 {
1713 throw DataException("Programming error. ExclusiveWrite required - please call requireWrite()");
1714 }
1715 }
1716
1717 /**
1718 \brief Modify the data abstract hosted by this Data object
1719 For internal use only.
1720 Passing a pointer to null is permitted (do this in the destructor)
1721 \warning Only to be called in single threaded code or inside a single/critical section. This method needs to be atomic.
1722 */
1723 void set_m_data(DataAbstract_ptr p);
1724
1725 friend class DataAbstract; // To allow calls to updateShareStatus
1726
1727 };
1728
1729 } // end namespace escript
1730
1731
1732 // No, this is not supposed to be at the top of the file
1733 // DataAbstact needs to be declared first, then DataReady needs to be fully declared
1734 // so that I can dynamic cast between them below.
1735 #include "DataReady.h"
1736 #include "DataLazy.h"
1737
1738 namespace escript
1739 {
1740
1741 inline
1742 const DataReady*
1743 Data::getReady() const
1744 {
1745 const DataReady* dr=dynamic_cast<const DataReady*>(m_data.get());
1746 EsysAssert((dr!=0), "Error - casting to DataReady.");
1747 return dr;
1748 }
1749
1750 inline
1751 DataReady*
1752 Data::getReady()
1753 {
1754 DataReady* dr=dynamic_cast<DataReady*>(m_data.get());
1755 EsysAssert((dr!=0), "Error - casting to DataReady.");
1756 return dr;
1757 }
1758
1759 // Be wary of using this for local operations since it (temporarily) increases reference count.
1760 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1761 // getReady() instead
1762 inline
1763 DataReady_ptr
1764 Data::getReadyPtr()
1765 {
1766 DataReady_ptr dr=boost::dynamic_pointer_cast<DataReady>(m_data);
1767 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1768 return dr;
1769 }
1770
1771
1772 inline
1773 const_DataReady_ptr
1774 Data::getReadyPtr() const
1775 {
1776 const_DataReady_ptr dr=boost::dynamic_pointer_cast<const DataReady>(m_data);
1777 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1778 return dr;
1779 }
1780
1781 inline
1782 DataAbstract::ValueType::value_type*
1783 Data::getSampleDataRW(DataAbstract::ValueType::size_type sampleNo)
1784 {
1785 if (isLazy())
1786 {
1787 throw DataException("Error, attempt to acquire RW access to lazy data. Please call requireWrite() first.");
1788 }
1789 return getReady()->getSampleDataRW(sampleNo);
1790 }
1791
1792 inline
1793 const DataAbstract::ValueType::value_type*
1794 Data::getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, BufferGroup* bufferg)
1795 {
1796 DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
1797 if (l!=0)
1798 {
1799 size_t offset=0;
1800 if (bufferg==NULL)
1801 {
1802 throw DataException("Error, attempt to getSampleDataRO for lazy Data with buffer==NULL");
1803 }
1804 const DataTypes::ValueType* res=l->resolveSample(*bufferg,sampleNo,offset);
1805 return &((*res)[offset]);
1806 }
1807 return getReady()->getSampleDataRO(sampleNo);
1808 }
1809
1810
1811
1812 /**
1813 Modify a filename for MPI parallel output to multiple files
1814 */
1815 char *Escript_MPI_appendRankToFileName(const char *, int, int);
1816
1817 /**
1818 Binary Data object operators.
1819 */
1820 inline double rpow(double x,double y)
1821 {
1822 return pow(y,x);
1823 }
1824
1825 /**
1826 \brief
1827 Operator+
1828 Takes two Data objects.
1829 */
1830 ESCRIPT_DLL_API Data operator+(const Data& left, const Data& right);
1831
1832 /**
1833 \brief
1834 Operator-
1835 Takes two Data objects.
1836 */
1837 ESCRIPT_DLL_API Data operator-(const Data& left, const Data& right);
1838
1839 /**
1840 \brief
1841 Operator*
1842 Takes two Data objects.
1843 */
1844 ESCRIPT_DLL_API Data operator*(const Data& left, const Data& right);
1845
1846 /**
1847 \brief
1848 Operator/
1849 Takes two Data objects.
1850 */
1851 ESCRIPT_DLL_API Data operator/(const Data& left, const Data& right);
1852
1853 /**
1854 \brief
1855 Operator+
1856 Takes LHS Data object and RHS python::object.
1857 python::object must be convertable to Data type.
1858 */
1859 ESCRIPT_DLL_API Data operator+(const Data& left, const boost::python::object& right);
1860
1861 /**
1862 \brief
1863 Operator-
1864 Takes LHS Data object and RHS python::object.
1865 python::object must be convertable to Data type.
1866 */
1867 ESCRIPT_DLL_API Data operator-(const Data& left, const boost::python::object& right);
1868
1869 /**
1870 \brief
1871 Operator*
1872 Takes LHS Data object and RHS python::object.
1873 python::object must be convertable to Data type.
1874 */
1875 ESCRIPT_DLL_API Data operator*(const Data& left, const boost::python::object& right);
1876
1877 /**
1878 \brief
1879 Operator/
1880 Takes LHS Data object and RHS python::object.
1881 python::object must be convertable to Data type.
1882 */
1883 ESCRIPT_DLL_API Data operator/(const Data& left, const boost::python::object& right);
1884
1885 /**
1886 \brief
1887 Operator+
1888 Takes LHS python::object and RHS Data object.
1889 python::object must be convertable to Data type.
1890 */
1891 ESCRIPT_DLL_API Data operator+(const boost::python::object& left, const Data& right);
1892
1893 /**
1894 \brief
1895 Operator-
1896 Takes LHS python::object and RHS Data object.
1897 python::object must be convertable to Data type.
1898 */
1899 ESCRIPT_DLL_API Data operator-(const boost::python::object& left, const Data& right);
1900
1901 /**
1902 \brief
1903 Operator*
1904 Takes LHS python::object and RHS Data object.
1905 python::object must be convertable to Data type.
1906 */
1907 ESCRIPT_DLL_API Data operator*(const boost::python::object& left, const Data& right);
1908
1909 /**
1910 \brief
1911 Operator/
1912 Takes LHS python::object and RHS Data object.
1913 python::object must be convertable to Data type.
1914 */
1915 ESCRIPT_DLL_API Data operator/(const boost::python::object& left, const Data& right);
1916
1917
1918
1919 /**
1920 \brief
1921 Output operator
1922 */
1923 ESCRIPT_DLL_API std::ostream& operator<<(std::ostream& o, const Data& data);
1924
1925 /**
1926 \brief
1927 Compute a tensor product of two Data objects
1928 \param arg_0 - Input - Data object
1929 \param arg_1 - Input - Data object
1930 \param axis_offset - Input - axis offset
1931 \param transpose - Input - 0: transpose neither, 1: transpose arg0, 2: transpose arg1
1932 */
1933 ESCRIPT_DLL_API
1934 Data
1935 C_GeneralTensorProduct(Data& arg_0,
1936 Data& arg_1,
1937 int axis_offset=0,
1938 int transpose=0);
1939
1940 /**
1941 \brief
1942 Perform the given binary operation with this and right as operands.
1943 Right is a Data object.
1944 */
1945 template <class BinaryFunction>
1946 inline
1947 void
1948 Data::binaryOp(const Data& right,
1949 BinaryFunction operation)
1950 {
1951 //
1952 // if this has a rank of zero promote it to the rank of the RHS
1953 if (getDataPointRank()==0 && right.getDataPointRank()!=0) {
1954 throw DataException("Error - attempt to update rank zero object with object with rank bigger than zero.");
1955 }
1956
1957 if (isLazy() || right.isLazy())
1958 {
1959 throw DataException("Programmer error - attempt to call binaryOp with Lazy Data.");
1960 }
1961 //
1962 // initially make the temporary a shallow copy
1963 Data tempRight(right);
1964
1965 if (getFunctionSpace()!=right.getFunctionSpace()) {
1966 if (right.probeInterpolation(getFunctionSpace())) {
1967 //
1968 // an interpolation is required so create a new Data
1969 tempRight=Data(right,this->getFunctionSpace());
1970 } else if (probeInterpolation(right.getFunctionSpace())) {
1971 //
1972 // interpolate onto the RHS function space
1973 Data tempLeft(*this,right.getFunctionSpace());
1974 // m_data=tempLeft.m_data;
1975 set_m_data(tempLeft.m_data);
1976 }
1977 }
1978 operandCheck(tempRight);
1979 //
1980 // ensure this has the right type for the RHS
1981 typeMatchRight(tempRight);
1982 //
1983 // Need to cast to the concrete types so that the correct binaryOp
1984 // is called.
1985 if (isExpanded()) {
1986 //
1987 // Expanded data will be done in parallel, the right hand side can be
1988 // of any data type
1989 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
1990 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
1991 escript::binaryOp(*leftC,*(tempRight.getReady()),operation);
1992 } else if (isTagged()) {
1993 //
1994 // Tagged data is operated on serially, the right hand side can be
1995 // either DataConstant or DataTagged
1996 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
1997 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
1998 if (right.isTagged()) {
1999 DataTagged* rightC=dynamic_cast<DataTagged*>(tempRight.m_data.get());
2000 EsysAssert((rightC!=0), "Programming error - casting to DataTagged.");
2001 escript::binaryOp(*leftC,*rightC,operation);
2002 } else {
2003 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2004 EsysAssert((rightC!=0), "Programming error - casting to DataConstant.");
2005 escript::binaryOp(*leftC,*rightC,operation);
2006 }
2007 } else if (isConstant()) {
2008 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2009 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2010 EsysAssert((leftC!=0 && rightC!=0), "Programming error - casting to DataConstant.");
2011 escript::binaryOp(*leftC,*rightC,operation);
2012 }
2013 }
2014
2015 /**
2016 \brief
2017 Perform the given Data object reduction algorithm on this and return the result.
2018 Given operation combines each element of each data point, thus argument
2019 object (*this) is a rank n Data object, and returned object is a scalar.
2020 Calls escript::algorithm.
2021 */
2022 template <class BinaryFunction>
2023 inline
2024 double
2025 Data::algorithm(BinaryFunction operation, double initial_value) const
2026 {
2027 if (isExpanded()) {
2028 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2029 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2030 return escript::algorithm(*leftC,operation,initial_value);
2031 } else if (isTagged()) {
2032 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2033 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2034 return escript::algorithm(*leftC,operation,initial_value);
2035 } else if (isConstant()) {
2036 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2037 EsysAssert((leftC!=0), "Programming error - casting to DataConstant.");
2038 return escript::algorithm(*leftC,operation,initial_value);
2039 } else if (isEmpty()) {
2040 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2041 } else if (isLazy()) {
2042 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2043 } else {
2044 throw DataException("Error - Data encapsulates an unknown type.");
2045 }
2046 }
2047
2048 /**
2049 \brief
2050 Perform the given data point reduction algorithm on data and return the result.
2051 Given operation combines each element within each data point into a scalar,
2052 thus argument object is a rank n Data object, and returned object is a
2053 rank 0 Data object.
2054 Calls escript::dp_algorithm.
2055 */
2056 template <class BinaryFunction>
2057 inline
2058 Data
2059 Data::dp_algorithm(BinaryFunction operation, double initial_value) const
2060 {
2061 if (isEmpty()) {
2062 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2063 }
2064 else if (isExpanded()) {
2065 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2066 DataExpanded* dataE=dynamic_cast<DataExpanded*>(m_data.get());
2067 DataExpanded* resultE=dynamic_cast<DataExpanded*>(result.m_data.get());
2068 EsysAssert((dataE!=0), "Programming error - casting data to DataExpanded.");
2069 EsysAssert((resultE!=0), "Programming error - casting result to DataExpanded.");
2070 escript::dp_algorithm(*dataE,*resultE,operation,initial_value);
2071 return result;
2072 }
2073 else if (isTagged()) {
2074 DataTagged* dataT=dynamic_cast<DataTagged*>(m_data.get());
2075 EsysAssert((dataT!=0), "Programming error - casting data to DataTagged.");
2076 DataTypes::ValueType defval(1);
2077 defval[0]=0;
2078 DataTagged* resultT=new DataTagged(getFunctionSpace(), DataTypes::scalarShape, defval, dataT);
2079 escript::dp_algorithm(*dataT,*resultT,operation,initial_value);
2080 return Data(resultT); // note: the Data object now owns the resultT pointer
2081 }
2082 else if (isConstant()) {
2083 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2084 DataConstant* dataC=dynamic_cast<DataConstant*>(m_data.get());
2085 DataConstant* resultC=dynamic_cast<DataConstant*>(result.m_data.get());
2086 EsysAssert((dataC!=0), "Programming error - casting data to DataConstant.");
2087 EsysAssert((resultC!=0), "Programming error - casting result to DataConstant.");
2088 escript::dp_algorithm(*dataC,*resultC,operation,initial_value);
2089 return result;
2090 } else if (isLazy()) {
2091 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2092 } else {
2093 throw DataException("Error - Data encapsulates an unknown type.");
2094 }
2095 }
2096
2097 /**
2098 \brief
2099 Compute a tensor operation with two Data objects
2100 \param arg_0 - Input - Data object
2101 \param arg_1 - Input - Data object
2102 \param operation - Input - Binary op functor
2103 */
2104 template <typename BinaryFunction>
2105 inline
2106 Data
2107 C_TensorBinaryOperation(Data const &arg_0,
2108 Data const &arg_1,
2109 BinaryFunction operation)
2110 {
2111 if (arg_0.isEmpty() || arg_1.isEmpty())
2112 {
2113 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2114 }
2115 if (arg_0.isLazy() || arg_1.isLazy())
2116 {
2117 throw DataException("Error - Operations not permitted on lazy data.");
2118 }
2119 // Interpolate if necessary and find an appropriate function space
2120 Data arg_0_Z, arg_1_Z;
2121 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2122 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2123 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2124 arg_1_Z = Data(arg_1);
2125 }
2126 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2127 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2128 arg_0_Z =Data(arg_0);
2129 }
2130 else {
2131 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible function spaces.");
2132 }
2133 } else {
2134 arg_0_Z = Data(arg_0);
2135 arg_1_Z = Data(arg_1);
2136 }
2137 // Get rank and shape of inputs
2138 int rank0 = arg_0_Z.getDataPointRank();
2139 int rank1 = arg_1_Z.getDataPointRank();
2140 DataTypes::ShapeType shape0 = arg_0_Z.getDataPointShape();
2141 DataTypes::ShapeType shape1 = arg_1_Z.getDataPointShape();
2142 int size0 = arg_0_Z.getDataPointSize();
2143 int size1 = arg_1_Z.getDataPointSize();
2144 // Declare output Data object
2145 Data res;
2146
2147 if (shape0 == shape1) {
2148 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2149 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2150 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2151 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2152 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2153
2154 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2155 }
2156 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2157
2158 // Prepare the DataConstant input
2159 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2160
2161 // Borrow DataTagged input from Data object
2162 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2163
2164 // Prepare a DataTagged output 2
2165 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2166 res.tag();
2167 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2168
2169 // Prepare offset into DataConstant
2170 int offset_0 = tmp_0->getPointOffset(0,0);
2171 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2172
2173 // Get the pointers to the actual data
2174 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2175 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2176
2177 // Compute a result for the default
2178 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2179 // Compute a result for each tag
2180 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2181 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2182 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2183 tmp_2->addTag(i->first);
2184 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2185 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2186
2187 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2188 }
2189
2190 }
2191 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2192 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2193 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2194 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2195 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2196
2197 int sampleNo_1,dataPointNo_1;
2198 int numSamples_1 = arg_1_Z.getNumSamples();
2199 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2200 int offset_0 = tmp_0->getPointOffset(0,0);
2201 res.requireWrite();
2202 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2203 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2204 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2205 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2206 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2207 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2208 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2209 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2210 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2211 }
2212 }
2213
2214 }
2215 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2216 // Borrow DataTagged input from Data object
2217 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2218
2219 // Prepare the DataConstant input
2220 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2221
2222 // Prepare a DataTagged output 2
2223 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2224 res.tag();
2225 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2226
2227 // Prepare offset into DataConstant
2228 int offset_1 = tmp_1->getPointOffset(0,0);
2229
2230 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2231 // Get the pointers to the actual data
2232 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2233 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2234 // Compute a result for the default
2235 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2236 // Compute a result for each tag
2237 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2238 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2239 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2240 tmp_2->addTag(i->first);
2241 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2242 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2243 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2244 }
2245
2246 }
2247 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2248 // Borrow DataTagged input from Data object
2249 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2250
2251 // Borrow DataTagged input from Data object
2252 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2253
2254 // Prepare a DataTagged output 2
2255 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2256 res.tag(); // DataTagged output
2257 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2258
2259 // Get the pointers to the actual data
2260 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2261 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2262 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2263
2264 // Compute a result for the default
2265 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2266 // Merge the tags
2267 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2268 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2269 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2270 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2271 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2272 }
2273 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2274 tmp_2->addTag(i->first);
2275 }
2276 // Compute a result for each tag
2277 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2278 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2279
2280 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2281 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2282 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2283
2284 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2285 }
2286
2287 }
2288 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2289 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2290 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2291 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2292 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2293 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2294
2295 int sampleNo_0,dataPointNo_0;
2296 int numSamples_0 = arg_0_Z.getNumSamples();
2297 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2298 res.requireWrite();
2299 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2300 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2301 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2302 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2303 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2304 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2305 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2306 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2307 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2308 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2309 }
2310 }
2311
2312 }
2313 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2314 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2315 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2316 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2317 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2318
2319 int sampleNo_0,dataPointNo_0;
2320 int numSamples_0 = arg_0_Z.getNumSamples();
2321 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2322 int offset_1 = tmp_1->getPointOffset(0,0);
2323 res.requireWrite();
2324 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2325 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2326 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2327 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2328 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2329
2330 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2331 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2332 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2333
2334
2335 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2336 }
2337 }
2338
2339 }
2340 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2341 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2342 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2343 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2344 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2345 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2346
2347 int sampleNo_0,dataPointNo_0;
2348 int numSamples_0 = arg_0_Z.getNumSamples();
2349 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2350 res.requireWrite();
2351 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2352 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2353 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2354 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2355 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2356 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2357 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2358 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2359 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2360 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2361 }
2362 }
2363
2364 }
2365 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2366 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2367 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2368 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2369 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2370 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2371
2372 int sampleNo_0,dataPointNo_0;
2373 int numSamples_0 = arg_0_Z.getNumSamples();
2374 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2375 res.requireWrite();
2376 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2377 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2378 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2379 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2380 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2381 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2382 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2383 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2384 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2385 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2386 }
2387 }
2388
2389 }
2390 else {
2391 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2392 }
2393
2394 } else if (0 == rank0) {
2395 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2396 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataConstant output
2397 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2398 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2399 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2400 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2401 }
2402 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2403
2404 // Prepare the DataConstant input
2405 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2406
2407 // Borrow DataTagged input from Data object
2408 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2409
2410 // Prepare a DataTagged output 2
2411 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataTagged output
2412 res.tag();
2413 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2414
2415 // Prepare offset into DataConstant
2416 int offset_0 = tmp_0->getPointOffset(0,0);
2417 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2418
2419 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2420 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2421
2422 // Compute a result for the default
2423 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2424 // Compute a result for each tag
2425 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2426 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2427 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2428 tmp_2->addTag(i->first);
2429 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2430 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2431 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2432 }
2433
2434 }
2435 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2436
2437 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2438 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2439 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2440 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2441
2442 int sampleNo_1,dataPointNo_1;
2443 int numSamples_1 = arg_1_Z.getNumSamples();
2444 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2445 int offset_0 = tmp_0->getPointOffset(0,0);
2446 res.requireWrite();
2447 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2448 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2449 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2450 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2451 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2452 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2453 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2454 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2455 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2456
2457 }
2458 }
2459
2460 }
2461 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2462
2463 // Borrow DataTagged input from Data object
2464 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2465
2466 // Prepare the DataConstant input
2467 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2468
2469 // Prepare a DataTagged output 2
2470 res = Data(0.0, shape1, arg_0_Z.getFunctionSpace()); // DataTagged output
2471 res.tag();
2472 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2473
2474 // Prepare offset into DataConstant
2475 int offset_1 = tmp_1->getPointOffset(0,0);
2476 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2477
2478 // Get the pointers to the actual data
2479 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2480 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2481
2482
2483 // Compute a result for the default
2484 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2485 // Compute a result for each tag
2486 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2487 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2488 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2489 tmp_2->addTag(i->first);
2490 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2491 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2492
2493 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2494 }
2495
2496 }
2497 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2498
2499 // Borrow DataTagged input from Data object
2500 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2501
2502 // Borrow DataTagged input from Data object
2503 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2504
2505 // Prepare a DataTagged output 2
2506 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());
2507 res.tag(); // DataTagged output
2508 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2509
2510 // Get the pointers to the actual data
2511 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2512 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2513 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2514
2515 // Compute a result for the default
2516 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2517 // Merge the tags
2518 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2519 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2520 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2521 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2522 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2523 }
2524 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2525 tmp_2->addTag(i->first);
2526 }
2527 // Compute a result for each tag
2528 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2529 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2530 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2531 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2532 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2533
2534 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2535 }
2536
2537 }
2538 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2539
2540 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2541 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2542 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2543 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2544 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2545
2546 int sampleNo_0,dataPointNo_0;
2547 int numSamples_0 = arg_0_Z.getNumSamples();
2548 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2549 res.requireWrite();
2550 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2551 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2552 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2553 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2554 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2555 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2556 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2557 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2558 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2559 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2560 }
2561 }
2562
2563 }
2564 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2565 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2566 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2567 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2568 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2569
2570 int sampleNo_0,dataPointNo_0;
2571 int numSamples_0 = arg_0_Z.getNumSamples();
2572 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2573 int offset_1 = tmp_1->getPointOffset(0,0);
2574 res.requireWrite();
2575 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2576 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2577 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2578 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2579 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2580 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2581 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2582 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2583 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2584 }
2585 }
2586
2587
2588 }
2589 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2590
2591 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2592 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2593 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2594 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2595 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2596
2597 int sampleNo_0,dataPointNo_0;
2598 int numSamples_0 = arg_0_Z.getNumSamples();
2599 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2600 res.requireWrite();
2601 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2602 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2603 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2604 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2605 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2606 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2607 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2608 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2609 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2610 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2611 }
2612 }
2613
2614 }
2615 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2616
2617 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2618 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2619 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2620 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2621 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2622
2623 int sampleNo_0,dataPointNo_0;
2624 int numSamples_0 = arg_0_Z.getNumSamples();
2625 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2626 res.requireWrite();
2627 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2628 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2629 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2630 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2631 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2632 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2633 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2634 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2635 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2636 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2637 }
2638 }
2639
2640 }
2641 else {
2642 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2643 }
2644
2645 } else if (0 == rank1) {
2646 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2647 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2648 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2649 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2650 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2651 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2652 }
2653 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2654
2655 // Prepare the DataConstant input
2656 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2657
2658 // Borrow DataTagged input from Data object
2659 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2660
2661 // Prepare a DataTagged output 2
2662 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2663 res.tag();
2664 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2665
2666 // Prepare offset into DataConstant
2667 int offset_0 = tmp_0->getPointOffset(0,0);
2668 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2669
2670 //Get the pointers to the actual data
2671 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2672 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2673
2674 // Compute a result for the default
2675 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2676 // Compute a result for each tag
2677 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2678 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2679 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2680 tmp_2->addTag(i->first);
2681 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2682 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2683 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2684 }
2685 }
2686 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2687
2688 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2689 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2690 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2691 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2692
2693 int sampleNo_1,dataPointNo_1;
2694 int numSamples_1 = arg_1_Z.getNumSamples();
2695 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2696 int offset_0 = tmp_0->getPointOffset(0,0);
2697 res.requireWrite();
2698 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2699 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2700 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2701 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2702 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2703 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2704 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2705 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2706 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2707 }
2708 }
2709
2710 }
2711 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2712
2713 // Borrow DataTagged input from Data object
2714 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2715
2716 // Prepare the DataConstant input
2717 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2718
2719 // Prepare a DataTagged output 2
2720 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2721 res.tag();
2722 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2723
2724 // Prepare offset into DataConstant
2725 int offset_1 = tmp_1->getPointOffset(0,0);
2726 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2727 // Get the pointers to the actual data
2728 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2729 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2730 // Compute a result for the default
2731 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2732 // Compute a result for each tag
2733 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2734 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2735 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2736 tmp_2->addTag(i->first);
2737 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2738 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2739 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2740 }
2741
2742 }
2743 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2744
2745 // Borrow DataTagged input from Data object
2746 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2747
2748 // Borrow DataTagged input from Data object
2749 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2750
2751 // Prepare a DataTagged output 2
2752 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2753 res.tag(); // DataTagged output
2754 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2755
2756 // Get the pointers to the actual data
2757 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2758 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2759 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2760
2761 // Compute a result for the default
2762 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2763 // Merge the tags
2764 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2765 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2766 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2767 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2768 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2769 }
2770 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2771 tmp_2->addTag(i->first);
2772 }
2773 // Compute a result for each tag
2774 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2775 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2776 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2777 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2778 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2779 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2780 }
2781
2782 }
2783 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2784
2785 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2786 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2787 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2788 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2789 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2790
2791 int sampleNo_0,dataPointNo_0;
2792 int numSamples_0 = arg_0_Z.getNumSamples();
2793 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2794 res.requireWrite();
2795 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2796 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2797 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2798 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2799 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2800 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2801 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2802 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2803 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2804 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2805 }
2806 }
2807
2808 }
2809 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2810 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2811 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2812 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2813 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2814
2815 int sampleNo_0,dataPointNo_0;
2816 int numSamples_0 = arg_0_Z.getNumSamples();
2817 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2818 int offset_1 = tmp_1->getPointOffset(0,0);
2819 res.requireWrite();
2820 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2821 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2822 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2823 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2824 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2825 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2826 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2827 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2828 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2829 }
2830 }
2831
2832
2833 }
2834 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2835
2836 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2837 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2838 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2839 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2840 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2841
2842 int sampleNo_0,dataPointNo_0;
2843 int numSamples_0 = arg_0_Z.getNumSamples();
2844 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2845 res.requireWrite();
2846 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2847 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2848 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2849 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2850 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2851 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2852 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2853 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2854 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2855 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2856 }
2857 }
2858
2859 }
2860 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2861
2862 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2863 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2864 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2865 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2866 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2867
2868 int sampleNo_0,dataPointNo_0;
2869 int numSamples_0 = arg_0_Z.getNumSamples();
2870 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2871 res.requireWrite();
2872 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2873 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2874 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2875 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2876 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2877 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2878 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2879 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2880 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2881 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2882 }
2883 }
2884
2885 }
2886 else {
2887 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2888 }
2889
2890 } else {
2891 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible shapes");
2892 }
2893
2894 return res;
2895 }
2896
2897 template <typename UnaryFunction>
2898 Data
2899 C_TensorUnaryOperation(Data const &arg_0,
2900 UnaryFunction operation)
2901 {
2902 if (arg_0.isEmpty()) // do this before we attempt to interpolate
2903 {
2904 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2905 }
2906 if (arg_0.isLazy())
2907 {
2908 throw DataException("Error - Operations not permitted on lazy data.");
2909 }
2910 // Interpolate if necessary and find an appropriate function space
2911 Data arg_0_Z = Data(arg_0);
2912
2913 // Get rank and shape of inputs
2914 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
2915 int size0 = arg_0_Z.getDataPointSize();
2916
2917 // Declare output Data object
2918 Data res;
2919
2920 if (arg_0_Z.isConstant()) {
2921 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataConstant output
2922 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2923 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2924 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2925 }
2926 else if (arg_0_Z.isTagged()) {
2927
2928 // Borrow DataTagged input from Data object
2929 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2930
2931 // Prepare a DataTagged output 2
2932 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2933 res.tag();
2934 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2935
2936 // Get the pointers to the actual data
2937 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2938 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2939 // Compute a result for the default
2940 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2941 // Compute a result for each tag
2942 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2943 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2944 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2945 tmp_2->addTag(i->first);
2946 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2947 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2948 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2949 }
2950
2951 }
2952 else if (arg_0_Z.isExpanded()) {
2953
2954 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace(),true); // DataExpanded output
2955 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2956 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2957
2958 int sampleNo_0,dataPointNo_0;
2959 int numSamples_0 = arg_0_Z.getNumSamples();
2960 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2961 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2962 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2963 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2964 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2965 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2966 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2967 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2968 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2969 }
2970 }
2971 }
2972 else {
2973 throw DataException("Error - C_TensorUnaryOperation: unknown combination of inputs");
2974 }
2975
2976 return res;
2977 }
2978
2979 }
2980 #endif

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26