/[escript]/trunk/escript/src/Data.h
ViewVC logotype

Contents of /trunk/escript/src/Data.h

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2519 - (show annotations)
Mon Jul 6 00:43:08 2009 UTC (10 years, 7 months ago) by jfenwick
File MIME type: text/plain
File size: 92125 byte(s)
Correcting some doxygen errors
1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2008 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 /** \file Data.h */
16
17 #ifndef DATA_H
18 #define DATA_H
19 #include "system_dep.h"
20
21 #include "DataTypes.h"
22 #include "DataAbstract.h"
23 #include "DataAlgorithm.h"
24 #include "FunctionSpace.h"
25 #include "BinaryOp.h"
26 #include "UnaryOp.h"
27 #include "DataException.h"
28
29
30 extern "C" {
31 #include "DataC.h"
32 //#include <omp.h>
33 }
34
35 #include "esysmpi.h"
36 #include <string>
37 #include <algorithm>
38 #include <sstream>
39
40 #include <boost/shared_ptr.hpp>
41 #include <boost/python/object.hpp>
42 #include <boost/python/tuple.hpp>
43
44 #include "BufferGroup.h"
45
46 namespace escript {
47
48 //
49 // Forward declaration for various implementations of Data.
50 class DataConstant;
51 class DataTagged;
52 class DataExpanded;
53 class DataLazy;
54
55 /**
56 \brief
57 Data represents a collection of datapoints.
58
59 Description:
60 Internally, the datapoints are actually stored by a DataAbstract object.
61 The specific instance of DataAbstract used may vary over the lifetime
62 of the Data object.
63 Some methods on this class return references (eg getShape()).
64 These references should not be used after an operation which changes the underlying DataAbstract object.
65 Doing so will lead to invalid memory access.
66 This should not affect any methods exposed via boost::python.
67 */
68 class Data {
69
70 public:
71
72 // These typedefs allow function names to be cast to pointers
73 // to functions of the appropriate type when calling unaryOp etc.
74 typedef double (*UnaryDFunPtr)(double);
75 typedef double (*BinaryDFunPtr)(double,double);
76
77
78 /**
79 Constructors.
80 */
81
82 /**
83 \brief
84 Default constructor.
85 Creates a DataEmpty object.
86 */
87 ESCRIPT_DLL_API
88 Data();
89
90 /**
91 \brief
92 Copy constructor.
93 WARNING: Only performs a shallow copy.
94 */
95 ESCRIPT_DLL_API
96 Data(const Data& inData);
97
98 /**
99 \brief
100 Constructor from another Data object. If "what" is different from the
101 function space of inData the inData are tried to be interpolated to what,
102 otherwise a shallow copy of inData is returned.
103 */
104 ESCRIPT_DLL_API
105 Data(const Data& inData,
106 const FunctionSpace& what);
107
108 /**
109 \brief Copy Data from an existing vector
110 */
111
112 ESCRIPT_DLL_API
113 Data(const DataTypes::ValueType& value,
114 const DataTypes::ShapeType& shape,
115 const FunctionSpace& what=FunctionSpace(),
116 bool expanded=false);
117
118 /**
119 \brief
120 Constructor which creates a Data with points having the specified shape.
121
122 \param value - Input - Single value applied to all Data.
123 \param dataPointShape - Input - The shape of each data point.
124 \param what - Input - A description of what this data represents.
125 \param expanded - Input - Flag, if true fill the entire container with
126 the given value. Otherwise a more efficient storage
127 mechanism will be used.
128 */
129 ESCRIPT_DLL_API
130 Data(double value,
131 const DataTypes::ShapeType& dataPointShape=DataTypes::ShapeType(),
132 const FunctionSpace& what=FunctionSpace(),
133 bool expanded=false);
134
135 /**
136 \brief
137 Constructor which performs a deep copy of a region from another Data object.
138
139 \param inData - Input - Input Data object.
140 \param region - Input - Region to copy.
141 */
142 ESCRIPT_DLL_API
143 Data(const Data& inData,
144 const DataTypes::RegionType& region);
145
146 /**
147 \brief
148 Constructor which copies data from any object that can be treated like a python array/sequence.
149
150 \param value - Input - Input data.
151 \param what - Input - A description of what this data represents.
152 \param expanded - Input - Flag, if true fill the entire container with
153 the value. Otherwise a more efficient storage
154 mechanism will be used.
155 */
156 ESCRIPT_DLL_API
157 Data(const boost::python::object& value,
158 const FunctionSpace& what=FunctionSpace(),
159 bool expanded=false);
160
161 /**
162 \brief
163 Constructor which creates a DataConstant.
164 Copies data from any object that can be treated like a python array/sequence.
165 All other parameters are copied from other.
166
167 \param value - Input - Input data.
168 \param other - Input - contains all other parameters.
169 */
170 ESCRIPT_DLL_API
171 Data(const boost::python::object& value,
172 const Data& other);
173
174 /**
175 \brief
176 Constructor which creates a DataConstant of "shape" with constant value.
177 */
178 ESCRIPT_DLL_API
179 Data(double value,
180 const boost::python::tuple& shape=boost::python::make_tuple(),
181 const FunctionSpace& what=FunctionSpace(),
182 bool expanded=false);
183
184
185
186 /**
187 \brief Create a Data using an existing DataAbstract. Warning: The new object assumes ownership of the pointer!
188 Once you have passed the pointer, do not delete it.
189 */
190 ESCRIPT_DLL_API
191 explicit Data(DataAbstract* underlyingdata);
192
193 /**
194 \brief Create a Data based on the supplied DataAbstract
195 */
196 ESCRIPT_DLL_API
197 explicit Data(DataAbstract_ptr underlyingdata);
198
199 /**
200 \brief
201 Destructor
202 */
203 ESCRIPT_DLL_API
204 ~Data();
205
206 /**
207 \brief Make this object a deep copy of "other".
208 */
209 ESCRIPT_DLL_API
210 void
211 copy(const Data& other);
212
213 /**
214 \brief Return a pointer to a deep copy of this object.
215 */
216 ESCRIPT_DLL_API
217 Data
218 copySelf();
219
220
221 /**
222 \brief produce a delayed evaluation version of this Data.
223 */
224 ESCRIPT_DLL_API
225 Data
226 delay();
227
228 /**
229 \brief convert the current data into lazy data.
230 */
231 ESCRIPT_DLL_API
232 void
233 delaySelf();
234
235
236 /**
237 Member access methods.
238 */
239
240 /**
241 \brief
242 switches on update protection
243
244 */
245 ESCRIPT_DLL_API
246 void
247 setProtection();
248
249 /**
250 \brief
251 Returns true, if the data object is protected against update
252
253 */
254 ESCRIPT_DLL_API
255 bool
256 isProtected() const;
257
258
259 /**
260 \brief
261 Return the value of a data point as a python tuple.
262 */
263 ESCRIPT_DLL_API
264 const boost::python::object
265 getValueOfDataPointAsTuple(int dataPointNo);
266
267 /**
268 \brief
269 sets the values of a data-point from a python object on this process
270 */
271 ESCRIPT_DLL_API
272 void
273 setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object);
274
275 /**
276 \brief
277 sets the values of a data-point from a array-like object on this process
278 */
279 ESCRIPT_DLL_API
280 void
281 setValueOfDataPointToArray(int dataPointNo, const boost::python::object&);
282
283 /**
284 \brief
285 sets the values of a data-point on this process
286 */
287 ESCRIPT_DLL_API
288 void
289 setValueOfDataPoint(int dataPointNo, const double);
290
291 /**
292 \brief Return a data point across all processors as a python tuple.
293 */
294 ESCRIPT_DLL_API
295 const boost::python::object
296 getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo);
297
298 /**
299 \brief
300 Return the tag number associated with the given data-point.
301
302 */
303 ESCRIPT_DLL_API
304 int
305 getTagNumber(int dpno);
306
307 /**
308 \brief
309 Return the C wrapper for the Data object.
310 */
311 ESCRIPT_DLL_API
312 escriptDataC
313 getDataC();
314
315
316
317 /**
318 \brief
319 Return the C wrapper for the Data object - const version.
320 */
321 ESCRIPT_DLL_API
322 escriptDataC
323 getDataC() const;
324
325 /**
326 \brief How much space is required to evaulate a sample of the Data.
327 */
328 ESCRIPT_DLL_API
329 size_t
330 getSampleBufferSize() const;
331
332
333
334 /**
335 \brief
336 Write the data as a string. For large amounts of data, a summary is printed.
337 */
338 ESCRIPT_DLL_API
339 std::string
340 toString() const;
341
342 /**
343 \brief
344 Whatever the current Data type make this into a DataExpanded.
345 */
346 ESCRIPT_DLL_API
347 void
348 expand();
349
350 /**
351 \brief
352 If possible convert this Data to DataTagged. This will only allow
353 Constant data to be converted to tagged. An attempt to convert
354 Expanded data to tagged will throw an exception.
355 */
356 ESCRIPT_DLL_API
357 void
358 tag();
359
360 /**
361 \brief If this data is lazy, then convert it to ready data.
362 What type of ready data depends on the expression. For example, Constant+Tagged==Tagged.
363 */
364 ESCRIPT_DLL_API
365 void
366 resolve();
367
368
369 /**
370 \brief Ensures data is ready for write access.
371 This means that the data will be resolved if lazy and will be copied if shared with another Data object.
372 \warning This method should only be called in single threaded sections of code. (It modifies m_data).
373 Do not create any Data objects from this one between calling requireWrite and getSampleDataRW.
374 Doing so might introduce additional sharing.
375 */
376 ESCRIPT_DLL_API
377 void
378 requireWrite();
379
380 /**
381 \brief
382 Return true if this Data is expanded.
383 \note To determine if a sample will contain separate values for each datapoint. Use actsExpanded instead.
384 */
385 ESCRIPT_DLL_API
386 bool
387 isExpanded() const;
388
389 /**
390 \brief
391 Return true if this Data is expanded or resolves to expanded.
392 That is, if it has a separate value for each datapoint in the sample.
393 */
394 ESCRIPT_DLL_API
395 bool
396 actsExpanded() const;
397
398
399 /**
400 \brief
401 Return true if this Data is tagged.
402 */
403 ESCRIPT_DLL_API
404 bool
405 isTagged() const;
406
407 /**
408 \brief
409 Return true if this Data is constant.
410 */
411 ESCRIPT_DLL_API
412 bool
413 isConstant() const;
414
415 /**
416 \brief Return true if this Data is lazy.
417 */
418 ESCRIPT_DLL_API
419 bool
420 isLazy() const;
421
422 /**
423 \brief Return true if this data is ready.
424 */
425 ESCRIPT_DLL_API
426 bool
427 isReady() const;
428
429 /**
430 \brief
431 Return true if this Data holds an instance of DataEmpty. This is _not_ the same as asking if the object
432 contains datapoints.
433 */
434 ESCRIPT_DLL_API
435 bool
436 isEmpty() const;
437
438 /**
439 \brief
440 Return the function space.
441 */
442 ESCRIPT_DLL_API
443 inline
444 const FunctionSpace&
445 getFunctionSpace() const
446 {
447 return m_data->getFunctionSpace();
448 }
449
450 /**
451 \brief
452 Return a copy of the function space.
453 */
454 ESCRIPT_DLL_API
455 const FunctionSpace
456 getCopyOfFunctionSpace() const;
457
458 /**
459 \brief
460 Return the domain.
461 */
462 ESCRIPT_DLL_API
463 inline
464 // const AbstractDomain&
465 const_Domain_ptr
466 getDomain() const
467 {
468 return getFunctionSpace().getDomain();
469 }
470
471
472 /**
473 \brief
474 Return the domain.
475 TODO: For internal use only. This should be removed.
476 */
477 ESCRIPT_DLL_API
478 inline
479 // const AbstractDomain&
480 Domain_ptr
481 getDomainPython() const
482 {
483 return getFunctionSpace().getDomainPython();
484 }
485
486 /**
487 \brief
488 Return a copy of the domain.
489 */
490 ESCRIPT_DLL_API
491 const AbstractDomain
492 getCopyOfDomain() const;
493
494 /**
495 \brief
496 Return the rank of the point data.
497 */
498 ESCRIPT_DLL_API
499 inline
500 unsigned int
501 getDataPointRank() const
502 {
503 return m_data->getRank();
504 }
505
506 /**
507 \brief
508 Return the number of data points
509 */
510 ESCRIPT_DLL_API
511 inline
512 int
513 getNumDataPoints() const
514 {
515 return getNumSamples() * getNumDataPointsPerSample();
516 }
517 /**
518 \brief
519 Return the number of samples.
520 */
521 ESCRIPT_DLL_API
522 inline
523 int
524 getNumSamples() const
525 {
526 return m_data->getNumSamples();
527 }
528
529 /**
530 \brief
531 Return the number of data points per sample.
532 */
533 ESCRIPT_DLL_API
534 inline
535 int
536 getNumDataPointsPerSample() const
537 {
538 return m_data->getNumDPPSample();
539 }
540
541
542 /**
543 \brief
544 Return the number of values in the shape for this object.
545 */
546 ESCRIPT_DLL_API
547 int
548 getNoValues() const
549 {
550 return m_data->getNoValues();
551 }
552
553
554 /**
555 \brief
556 dumps the object into a netCDF file
557 */
558 ESCRIPT_DLL_API
559 void
560 dump(const std::string fileName) const;
561
562 /**
563 \brief returns the values of the object as a list of tuples (one for each datapoint).
564
565 \param scalarastuple If true, scalar data will produce single valued tuples [(1,) (2,) ...]
566 If false, the result is a list of scalars [1, 2, ...]
567 */
568 ESCRIPT_DLL_API
569 const boost::python::object
570 toListOfTuples(bool scalarastuple=true);
571
572
573 /**
574 \brief
575 Return the sample data for the given sample no. This is not the
576 preferred interface but is provided for use by C code.
577 The bufferg parameter is only required for LazyData.
578 \param sampleNo - Input - the given sample no.
579 \param bufferg - A buffer to compute (and store) sample data in will be selected from this group.
580 \return pointer to the sample data.
581 */
582 ESCRIPT_DLL_API
583 inline
584 const DataAbstract::ValueType::value_type*
585 getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, BufferGroup* bufferg=0);
586
587
588 /**
589 \brief
590 Return the sample data for the given sample no. This is not the
591 preferred interface but is provided for use by C code.
592 \param sampleNo - Input - the given sample no.
593 \return pointer to the sample data.
594 */
595 ESCRIPT_DLL_API
596 inline
597 DataAbstract::ValueType::value_type*
598 getSampleDataRW(DataAbstract::ValueType::size_type sampleNo);
599
600
601 /**
602 \brief
603 Return the sample data for the given tag. If an attempt is made to
604 access data that isn't tagged an exception will be thrown.
605 \param tag - Input - the tag key.
606 */
607 ESCRIPT_DLL_API
608 inline
609 DataAbstract::ValueType::value_type*
610 getSampleDataByTag(int tag)
611 {
612 return m_data->getSampleDataByTag(tag);
613 }
614
615 /**
616 \brief
617 Return a reference into the DataVector which points to the specified data point.
618 \param sampleNo - Input -
619 \param dataPointNo - Input -
620 */
621 ESCRIPT_DLL_API
622 DataTypes::ValueType::const_reference
623 getDataPointRO(int sampleNo, int dataPointNo);
624
625 /**
626 \brief
627 Return a reference into the DataVector which points to the specified data point.
628 \param sampleNo - Input -
629 \param dataPointNo - Input -
630 */
631 ESCRIPT_DLL_API
632 DataTypes::ValueType::reference
633 getDataPointRW(int sampleNo, int dataPointNo);
634
635
636
637 /**
638 \brief
639 Return the offset for the given sample and point within the sample
640 */
641 ESCRIPT_DLL_API
642 inline
643 DataTypes::ValueType::size_type
644 getDataOffset(int sampleNo,
645 int dataPointNo)
646 {
647 return m_data->getPointOffset(sampleNo,dataPointNo);
648 }
649
650 /**
651 \brief
652 Return a reference to the data point shape.
653 */
654 ESCRIPT_DLL_API
655 inline
656 const DataTypes::ShapeType&
657 getDataPointShape() const
658 {
659 return m_data->getShape();
660 }
661
662 /**
663 \brief
664 Return the data point shape as a tuple of integers.
665 */
666 ESCRIPT_DLL_API
667 const boost::python::tuple
668 getShapeTuple() const;
669
670 /**
671 \brief
672 Return the size of the data point. It is the product of the
673 data point shape dimensions.
674 */
675 ESCRIPT_DLL_API
676 int
677 getDataPointSize() const;
678
679 /**
680 \brief
681 Return the number of doubles stored for this Data.
682 */
683 ESCRIPT_DLL_API
684 DataTypes::ValueType::size_type
685 getLength() const;
686
687
688
689 /**
690 \brief
691 Assign the given value to the tag assocciated with name. Implicitly converts this
692 object to type DataTagged. Throws an exception if this object
693 cannot be converted to a DataTagged object or name cannot be mapped onto a tag key.
694 \param name - Input - name of tag.
695 \param value - Input - Value to associate with given key.
696 */
697 ESCRIPT_DLL_API
698 void
699 setTaggedValueByName(std::string name,
700 const boost::python::object& value);
701
702 /**
703 \brief
704 Assign the given value to the tag. Implicitly converts this
705 object to type DataTagged if it is constant.
706
707 \param tagKey - Input - Integer key.
708 \param value - Input - Value to associate with given key.
709 ==>*
710 */
711 ESCRIPT_DLL_API
712 void
713 setTaggedValue(int tagKey,
714 const boost::python::object& value);
715
716 /**
717 \brief
718 Assign the given value to the tag. Implicitly converts this
719 object to type DataTagged if it is constant.
720
721 \param tagKey - Input - Integer key.
722 \param pointshape - Input - The shape of the value parameter
723 \param value - Input - Value to associate with given key.
724 \param dataOffset - Input - Offset of the begining of the point within the value parameter
725 */
726 ESCRIPT_DLL_API
727 void
728 setTaggedValueFromCPP(int tagKey,
729 const DataTypes::ShapeType& pointshape,
730 const DataTypes::ValueType& value,
731 int dataOffset=0);
732
733
734
735 /**
736 \brief
737 Copy other Data object into this Data object where mask is positive.
738 */
739 ESCRIPT_DLL_API
740 void
741 copyWithMask(const Data& other,
742 const Data& mask);
743
744 /**
745 Data object operation methods and operators.
746 */
747
748 /**
749 \brief
750 set all values to zero
751 *
752 */
753 ESCRIPT_DLL_API
754 void
755 setToZero();
756
757 /**
758 \brief
759 Interpolates this onto the given functionspace and returns
760 the result as a Data object.
761 *
762 */
763 ESCRIPT_DLL_API
764 Data
765 interpolate(const FunctionSpace& functionspace) const;
766 /**
767 \brief
768 Calculates the gradient of the data at the data points of functionspace.
769 If functionspace is not present the function space of Function(getDomain()) is used.
770 *
771 */
772 ESCRIPT_DLL_API
773 Data
774 gradOn(const FunctionSpace& functionspace) const;
775
776 ESCRIPT_DLL_API
777 Data
778 grad() const;
779
780 /**
781 \brief
782 Calculate the integral over the function space domain as a python tuple.
783 */
784 ESCRIPT_DLL_API
785 boost::python::object
786 integrateToTuple_const() const;
787
788
789 /**
790 \brief
791 Calculate the integral over the function space domain as a python tuple.
792 */
793 ESCRIPT_DLL_API
794 boost::python::object
795 integrateToTuple();
796
797
798
799 /**
800 \brief
801 Returns 1./ Data object
802 *
803 */
804 ESCRIPT_DLL_API
805 Data
806 oneOver() const;
807 /**
808 \brief
809 Return a Data with a 1 for +ive values and a 0 for 0 or -ive values.
810 *
811 */
812 ESCRIPT_DLL_API
813 Data
814 wherePositive() const;
815
816 /**
817 \brief
818 Return a Data with a 1 for -ive values and a 0 for +ive or 0 values.
819 *
820 */
821 ESCRIPT_DLL_API
822 Data
823 whereNegative() const;
824
825 /**
826 \brief
827 Return a Data with a 1 for +ive or 0 values and a 0 for -ive values.
828 *
829 */
830 ESCRIPT_DLL_API
831 Data
832 whereNonNegative() const;
833
834 /**
835 \brief
836 Return a Data with a 1 for -ive or 0 values and a 0 for +ive values.
837 *
838 */
839 ESCRIPT_DLL_API
840 Data
841 whereNonPositive() const;
842
843 /**
844 \brief
845 Return a Data with a 1 for 0 values and a 0 for +ive or -ive values.
846 *
847 */
848 ESCRIPT_DLL_API
849 Data
850 whereZero(double tol=0.0) const;
851
852 /**
853 \brief
854 Return a Data with a 0 for 0 values and a 1 for +ive or -ive values.
855 *
856 */
857 ESCRIPT_DLL_API
858 Data
859 whereNonZero(double tol=0.0) const;
860
861 /**
862 \brief
863 Return the maximum absolute value of this Data object.
864
865 The method is not const because lazy data needs to be expanded before Lsup can be computed.
866 The _const form can be used when the Data object is const, however this will only work for
867 Data which is not Lazy.
868
869 For Data which contain no samples (or tagged Data for which no tags in use have a value)
870 zero is returned.
871 */
872 ESCRIPT_DLL_API
873 double
874 Lsup();
875
876 ESCRIPT_DLL_API
877 double
878 Lsup_const() const;
879
880
881 /**
882 \brief
883 Return the maximum value of this Data object.
884
885 The method is not const because lazy data needs to be expanded before sup can be computed.
886 The _const form can be used when the Data object is const, however this will only work for
887 Data which is not Lazy.
888
889 For Data which contain no samples (or tagged Data for which no tags in use have a value)
890 a large negative value is returned.
891 */
892 ESCRIPT_DLL_API
893 double
894 sup();
895
896 ESCRIPT_DLL_API
897 double
898 sup_const() const;
899
900
901 /**
902 \brief
903 Return the minimum value of this Data object.
904
905 The method is not const because lazy data needs to be expanded before inf can be computed.
906 The _const form can be used when the Data object is const, however this will only work for
907 Data which is not Lazy.
908
909 For Data which contain no samples (or tagged Data for which no tags in use have a value)
910 a large positive value is returned.
911 */
912 ESCRIPT_DLL_API
913 double
914 inf();
915
916 ESCRIPT_DLL_API
917 double
918 inf_const() const;
919
920
921
922 /**
923 \brief
924 Return the absolute value of each data point of this Data object.
925 *
926 */
927 ESCRIPT_DLL_API
928 Data
929 abs() const;
930
931 /**
932 \brief
933 Return the maximum value of each data point of this Data object.
934 *
935 */
936 ESCRIPT_DLL_API
937 Data
938 maxval() const;
939
940 /**
941 \brief
942 Return the minimum value of each data point of this Data object.
943 *
944 */
945 ESCRIPT_DLL_API
946 Data
947 minval() const;
948
949 /**
950 \brief
951 Return the (sample number, data-point number) of the data point with
952 the minimum component value in this Data object.
953 \note If you are working in python, please consider using Locator
954 instead of manually manipulating process and point IDs.
955 */
956 ESCRIPT_DLL_API
957 const boost::python::tuple
958 minGlobalDataPoint() const;
959
960 /**
961 \brief
962 Return the (sample number, data-point number) of the data point with
963 the minimum component value in this Data object.
964 \note If you are working in python, please consider using Locator
965 instead of manually manipulating process and point IDs.
966 */
967 ESCRIPT_DLL_API
968 const boost::python::tuple
969 maxGlobalDataPoint() const;
970
971
972
973 /**
974 \brief
975 Return the sign of each data point of this Data object.
976 -1 for negative values, zero for zero values, 1 for positive values.
977 *
978 */
979 ESCRIPT_DLL_API
980 Data
981 sign() const;
982
983 /**
984 \brief
985 Return the symmetric part of a matrix which is half the matrix plus its transpose.
986 *
987 */
988 ESCRIPT_DLL_API
989 Data
990 symmetric() const;
991
992 /**
993 \brief
994 Return the nonsymmetric part of a matrix which is half the matrix minus its transpose.
995 *
996 */
997 ESCRIPT_DLL_API
998 Data
999 nonsymmetric() const;
1000
1001 /**
1002 \brief
1003 Return the trace of a matrix
1004 *
1005 */
1006 ESCRIPT_DLL_API
1007 Data
1008 trace(int axis_offset) const;
1009
1010 /**
1011 \brief
1012 Transpose each data point of this Data object around the given axis.
1013 *
1014 */
1015 ESCRIPT_DLL_API
1016 Data
1017 transpose(int axis_offset) const;
1018
1019 /**
1020 \brief
1021 Return the eigenvalues of the symmetric part at each data point of this Data object in increasing values.
1022 Currently this function is restricted to rank 2, square shape, and dimension 3.
1023 *
1024 */
1025 ESCRIPT_DLL_API
1026 Data
1027 eigenvalues() const;
1028
1029 /**
1030 \brief
1031 Return the eigenvalues and corresponding eigenvcetors of the symmetric part at each data point of this Data object.
1032 the eigenvalues are ordered in increasing size where eigenvalues with relative difference less than
1033 tol are treated as equal. The eigenvectors are orthogonal, normalized and the sclaed such that the
1034 first non-zero entry is positive.
1035 Currently this function is restricted to rank 2, square shape, and dimension 3
1036 *
1037 */
1038 ESCRIPT_DLL_API
1039 const boost::python::tuple
1040 eigenvalues_and_eigenvectors(const double tol=1.e-12) const;
1041
1042 /**
1043 \brief
1044 swaps the components axis0 and axis1
1045 *
1046 */
1047 ESCRIPT_DLL_API
1048 Data
1049 swapaxes(const int axis0, const int axis1) const;
1050
1051 /**
1052 \brief
1053 Return the error function erf of each data point of this Data object.
1054 *
1055 */
1056 ESCRIPT_DLL_API
1057 Data
1058 erf() const;
1059
1060 /**
1061 \brief
1062 Return the sin of each data point of this Data object.
1063 *
1064 */
1065 ESCRIPT_DLL_API
1066 Data
1067 sin() const;
1068
1069 /**
1070 \brief
1071 Return the cos of each data point of this Data object.
1072 *
1073 */
1074 ESCRIPT_DLL_API
1075 Data
1076 cos() const;
1077
1078 /**
1079 \brief
1080 Return the tan of each data point of this Data object.
1081 *
1082 */
1083 ESCRIPT_DLL_API
1084 Data
1085 tan() const;
1086
1087 /**
1088 \brief
1089 Return the asin of each data point of this Data object.
1090 *
1091 */
1092 ESCRIPT_DLL_API
1093 Data
1094 asin() const;
1095
1096 /**
1097 \brief
1098 Return the acos of each data point of this Data object.
1099 *
1100 */
1101 ESCRIPT_DLL_API
1102 Data
1103 acos() const;
1104
1105 /**
1106 \brief
1107 Return the atan of each data point of this Data object.
1108 *
1109 */
1110 ESCRIPT_DLL_API
1111 Data
1112 atan() const;
1113
1114 /**
1115 \brief
1116 Return the sinh of each data point of this Data object.
1117 *
1118 */
1119 ESCRIPT_DLL_API
1120 Data
1121 sinh() const;
1122
1123 /**
1124 \brief
1125 Return the cosh of each data point of this Data object.
1126 *
1127 */
1128 ESCRIPT_DLL_API
1129 Data
1130 cosh() const;
1131
1132 /**
1133 \brief
1134 Return the tanh of each data point of this Data object.
1135 *
1136 */
1137 ESCRIPT_DLL_API
1138 Data
1139 tanh() const;
1140
1141 /**
1142 \brief
1143 Return the asinh of each data point of this Data object.
1144 *
1145 */
1146 ESCRIPT_DLL_API
1147 Data
1148 asinh() const;
1149
1150 /**
1151 \brief
1152 Return the acosh of each data point of this Data object.
1153 *
1154 */
1155 ESCRIPT_DLL_API
1156 Data
1157 acosh() const;
1158
1159 /**
1160 \brief
1161 Return the atanh of each data point of this Data object.
1162 *
1163 */
1164 ESCRIPT_DLL_API
1165 Data
1166 atanh() const;
1167
1168 /**
1169 \brief
1170 Return the log to base 10 of each data point of this Data object.
1171 *
1172 */
1173 ESCRIPT_DLL_API
1174 Data
1175 log10() const;
1176
1177 /**
1178 \brief
1179 Return the natural log of each data point of this Data object.
1180 *
1181 */
1182 ESCRIPT_DLL_API
1183 Data
1184 log() const;
1185
1186 /**
1187 \brief
1188 Return the exponential function of each data point of this Data object.
1189 *
1190 */
1191 ESCRIPT_DLL_API
1192 Data
1193 exp() const;
1194
1195 /**
1196 \brief
1197 Return the square root of each data point of this Data object.
1198 *
1199 */
1200 ESCRIPT_DLL_API
1201 Data
1202 sqrt() const;
1203
1204 /**
1205 \brief
1206 Return the negation of each data point of this Data object.
1207 *
1208 */
1209 ESCRIPT_DLL_API
1210 Data
1211 neg() const;
1212
1213 /**
1214 \brief
1215 Return the identity of each data point of this Data object.
1216 Simply returns this object unmodified.
1217 *
1218 */
1219 ESCRIPT_DLL_API
1220 Data
1221 pos() const;
1222
1223 /**
1224 \brief
1225 Return the given power of each data point of this Data object.
1226
1227 \param right Input - the power to raise the object to.
1228 *
1229 */
1230 ESCRIPT_DLL_API
1231 Data
1232 powD(const Data& right) const;
1233
1234 /**
1235 \brief
1236 Return the given power of each data point of this boost python object.
1237
1238 \param right Input - the power to raise the object to.
1239 *
1240 */
1241 ESCRIPT_DLL_API
1242 Data
1243 powO(const boost::python::object& right) const;
1244
1245 /**
1246 \brief
1247 Return the given power of each data point of this boost python object.
1248
1249 \param left Input - the bases
1250 *
1251 */
1252
1253 ESCRIPT_DLL_API
1254 Data
1255 rpowO(const boost::python::object& left) const;
1256
1257 /**
1258 \brief
1259 writes the object to a file in the DX file format
1260 */
1261 ESCRIPT_DLL_API
1262 void
1263 saveDX(std::string fileName) const;
1264
1265 /**
1266 \brief
1267 writes the object to a file in the VTK file format
1268 */
1269 ESCRIPT_DLL_API
1270 void
1271 saveVTK(std::string fileName) const;
1272
1273 /**
1274 \brief
1275 Overloaded operator +=
1276 \param right - Input - The right hand side.
1277 *
1278 */
1279 ESCRIPT_DLL_API
1280 Data& operator+=(const Data& right);
1281 ESCRIPT_DLL_API
1282 Data& operator+=(const boost::python::object& right);
1283
1284 ESCRIPT_DLL_API
1285 Data& operator=(const Data& other);
1286
1287 /**
1288 \brief
1289 Overloaded operator -=
1290 \param right - Input - The right hand side.
1291 *
1292 */
1293 ESCRIPT_DLL_API
1294 Data& operator-=(const Data& right);
1295 ESCRIPT_DLL_API
1296 Data& operator-=(const boost::python::object& right);
1297
1298 /**
1299 \brief
1300 Overloaded operator *=
1301 \param right - Input - The right hand side.
1302 *
1303 */
1304 ESCRIPT_DLL_API
1305 Data& operator*=(const Data& right);
1306 ESCRIPT_DLL_API
1307 Data& operator*=(const boost::python::object& right);
1308
1309 /**
1310 \brief
1311 Overloaded operator /=
1312 \param right - Input - The right hand side.
1313 *
1314 */
1315 ESCRIPT_DLL_API
1316 Data& operator/=(const Data& right);
1317 ESCRIPT_DLL_API
1318 Data& operator/=(const boost::python::object& right);
1319
1320 /**
1321 \brief
1322 Returns true if this can be interpolated to functionspace.
1323 */
1324 ESCRIPT_DLL_API
1325 bool
1326 probeInterpolation(const FunctionSpace& functionspace) const;
1327
1328 /**
1329 Data object slicing methods.
1330 */
1331
1332 /**
1333 \brief
1334 Returns a slice from this Data object.
1335
1336 /description
1337 Implements the [] get operator in python.
1338 Calls getSlice.
1339
1340 \param key - Input - python slice tuple specifying
1341 slice to return.
1342 */
1343 ESCRIPT_DLL_API
1344 Data
1345 getItem(const boost::python::object& key) const;
1346
1347 /**
1348 \brief
1349 Copies slice from value into this Data object.
1350
1351 Implements the [] set operator in python.
1352 Calls setSlice.
1353
1354 \param key - Input - python slice tuple specifying
1355 slice to copy from value.
1356 \param value - Input - Data object to copy from.
1357 */
1358 ESCRIPT_DLL_API
1359 void
1360 setItemD(const boost::python::object& key,
1361 const Data& value);
1362
1363 ESCRIPT_DLL_API
1364 void
1365 setItemO(const boost::python::object& key,
1366 const boost::python::object& value);
1367
1368 // These following public methods should be treated as private.
1369
1370 /**
1371 \brief
1372 Perform the given unary operation on every element of every data point in
1373 this Data object.
1374 */
1375 template <class UnaryFunction>
1376 ESCRIPT_DLL_API
1377 inline
1378 void
1379 unaryOp2(UnaryFunction operation);
1380
1381 /**
1382 \brief
1383 Return a Data object containing the specified slice of
1384 this Data object.
1385 \param region - Input - Region to copy.
1386 *
1387 */
1388 ESCRIPT_DLL_API
1389 Data
1390 getSlice(const DataTypes::RegionType& region) const;
1391
1392 /**
1393 \brief
1394 Copy the specified slice from the given value into this
1395 Data object.
1396 \param value - Input - Data to copy from.
1397 \param region - Input - Region to copy.
1398 *
1399 */
1400 ESCRIPT_DLL_API
1401 void
1402 setSlice(const Data& value,
1403 const DataTypes::RegionType& region);
1404
1405 /**
1406 \brief
1407 print the data values to stdout. Used for debugging
1408 */
1409 ESCRIPT_DLL_API
1410 void
1411 print(void);
1412
1413 /**
1414 \brief
1415 return the MPI rank number of the local data
1416 MPI_COMM_WORLD is assumed and the result of MPI_Comm_size()
1417 is returned
1418 */
1419 ESCRIPT_DLL_API
1420 int
1421 get_MPIRank(void) const;
1422
1423 /**
1424 \brief
1425 return the MPI rank number of the local data
1426 MPI_COMM_WORLD is assumed and the result of MPI_Comm_rank()
1427 is returned
1428 */
1429 ESCRIPT_DLL_API
1430 int
1431 get_MPISize(void) const;
1432
1433 /**
1434 \brief
1435 return the MPI rank number of the local data
1436 MPI_COMM_WORLD is assumed and returned.
1437 */
1438 ESCRIPT_DLL_API
1439 MPI_Comm
1440 get_MPIComm(void) const;
1441
1442 /**
1443 \brief
1444 return the object produced by the factory, which is a DataConstant or DataExpanded
1445 TODO Ownership of this object should be explained in doco.
1446 */
1447 ESCRIPT_DLL_API
1448 DataAbstract*
1449 borrowData(void) const;
1450
1451 ESCRIPT_DLL_API
1452 DataAbstract_ptr
1453 borrowDataPtr(void) const;
1454
1455 ESCRIPT_DLL_API
1456 DataReady_ptr
1457 borrowReadyPtr(void) const;
1458
1459
1460
1461 /**
1462 \brief
1463 Return a pointer to the beginning of the datapoint at the specified offset.
1464 TODO Eventually these should be inlined.
1465 \param i - position(offset) in the underlying datastructure
1466 */
1467
1468 ESCRIPT_DLL_API
1469 DataTypes::ValueType::const_reference
1470 getDataAtOffsetRO(DataTypes::ValueType::size_type i);
1471
1472
1473 ESCRIPT_DLL_API
1474 DataTypes::ValueType::reference
1475 getDataAtOffsetRW(DataTypes::ValueType::size_type i);
1476
1477
1478
1479 /**
1480 \brief Create a buffer for use by getSample
1481 Allocates a DataVector large enough for DataLazy::resolveSample to operate on for the current Data.
1482 Do not use this buffer for other Data instances (unless you are sure they will be the same size).
1483
1484 In multi-threaded sections, this needs to be called on each thread.
1485
1486 \return A BufferGroup* if Data is lazy, NULL otherwise.
1487 \warning This pointer must be deallocated using freeSampleBuffer to avoid cross library memory issues.
1488 */
1489 ESCRIPT_DLL_API
1490 BufferGroup*
1491 allocSampleBuffer() const;
1492
1493 /**
1494 \brief Free a buffer allocated with allocSampleBuffer.
1495 \param buffer Input - pointer to the buffer to deallocate.
1496 */
1497 ESCRIPT_DLL_API void freeSampleBuffer(BufferGroup* buffer);
1498
1499 protected:
1500
1501 private:
1502
1503 double
1504 LsupWorker() const;
1505
1506 double
1507 supWorker() const;
1508
1509 double
1510 infWorker() const;
1511
1512 boost::python::object
1513 integrateWorker() const;
1514
1515 void
1516 calc_minGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1517
1518 void
1519 calc_maxGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1520
1521
1522 /**
1523 \brief
1524 Check *this and the right operand are compatible. Throws
1525 an exception if they aren't.
1526 \param right - Input - The right hand side.
1527 */
1528 inline
1529 void
1530 operandCheck(const Data& right) const
1531 {
1532 return m_data->operandCheck(*(right.m_data.get()));
1533 }
1534
1535 /**
1536 \brief
1537 Perform the specified reduction algorithm on every element of every data point in
1538 this Data object according to the given function and return the single value result.
1539 */
1540 template <class BinaryFunction>
1541 inline
1542 double
1543 algorithm(BinaryFunction operation,
1544 double initial_value) const;
1545
1546 /**
1547 \brief
1548 Reduce each data-point in this Data object using the given operation. Return a Data
1549 object with the same number of data-points, but with each data-point containing only
1550 one value - the result of the reduction operation on the corresponding data-point in
1551 this Data object
1552 */
1553 template <class BinaryFunction>
1554 inline
1555 Data
1556 dp_algorithm(BinaryFunction operation,
1557 double initial_value) const;
1558
1559 /**
1560 \brief
1561 Perform the given binary operation on all of the data's elements.
1562 The underlying type of the right hand side (right) determines the final
1563 type of *this after the operation. For example if the right hand side
1564 is expanded *this will be expanded if necessary.
1565 RHS is a Data object.
1566 */
1567 template <class BinaryFunction>
1568 inline
1569 void
1570 binaryOp(const Data& right,
1571 BinaryFunction operation);
1572
1573 /**
1574 \brief
1575 Convert the data type of the RHS to match this.
1576 \param right - Input - data type to match.
1577 */
1578 void
1579 typeMatchLeft(Data& right) const;
1580
1581 /**
1582 \brief
1583 Convert the data type of this to match the RHS.
1584 \param right - Input - data type to match.
1585 */
1586 void
1587 typeMatchRight(const Data& right);
1588
1589 /**
1590 \brief
1591 Construct a Data object of the appropriate type.
1592 */
1593
1594 void
1595 initialise(const DataTypes::ValueType& value,
1596 const DataTypes::ShapeType& shape,
1597 const FunctionSpace& what,
1598 bool expanded);
1599
1600 void
1601 initialise(const WrappedArray& value,
1602 const FunctionSpace& what,
1603 bool expanded);
1604
1605 //
1606 // flag to protect the data object against any update
1607 bool m_protected;
1608 mutable bool m_shared;
1609 bool m_lazy;
1610
1611 //
1612 // pointer to the actual data object
1613 // boost::shared_ptr<DataAbstract> m_data;
1614 DataAbstract_ptr m_data;
1615
1616 // If possible please use getReadyPtr instead.
1617 // But see warning below.
1618 const DataReady*
1619 getReady() const;
1620
1621 DataReady*
1622 getReady();
1623
1624
1625 // Be wary of using this for local operations since it (temporarily) increases reference count.
1626 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1627 // getReady() instead
1628 DataReady_ptr
1629 getReadyPtr();
1630
1631 const_DataReady_ptr
1632 getReadyPtr() const;
1633
1634
1635 /**
1636 \brief Update the Data's shared flag
1637 This indicates that the DataAbstract used by this object is now shared (or no longer shared).
1638 For internal use only.
1639 */
1640 void updateShareStatus(bool nowshared) const
1641 {
1642 m_shared=nowshared; // m_shared is mutable
1643 }
1644
1645 // In the isShared() method below:
1646 // A problem would occur if m_data (the address pointed to) were being modified
1647 // while the call m_data->is_shared is being executed.
1648 //
1649 // Q: So why do I think this code can be thread safe/correct?
1650 // A: We need to make some assumptions.
1651 // 1. We assume it is acceptable to return true under some conditions when we aren't shared.
1652 // 2. We assume that no constructions or assignments which will share previously unshared
1653 // will occur while this call is executing. This is consistent with the way Data:: and C are written.
1654 //
1655 // This means that the only transition we need to consider, is when a previously shared object is
1656 // not shared anymore. ie. the other objects have been destroyed or a deep copy has been made.
1657 // In those cases the m_shared flag changes to false after m_data has completed changing.
1658 // For any threads executing before the flag switches they will assume the object is still shared.
1659 bool isShared() const
1660 {
1661 return m_shared;
1662 /* if (m_shared) return true;
1663 if (m_data->isShared())
1664 {
1665 updateShareStatus(true);
1666 return true;
1667 }
1668 return false;*/
1669 }
1670
1671 void forceResolve()
1672 {
1673 if (isLazy())
1674 {
1675 #ifdef _OPENMP
1676 if (omp_in_parallel())
1677 { // Yes this is throwing an exception out of an omp thread which is forbidden.
1678 throw DataException("Please do not call forceResolve() in a parallel region.");
1679 }
1680 #endif
1681 resolve();
1682 }
1683 }
1684
1685 /**
1686 \brief if another object is sharing out member data make a copy to work with instead.
1687 This code should only be called from single threaded sections of code.
1688 */
1689 void exclusiveWrite()
1690 {
1691 #ifdef _OPENMP
1692 if (omp_in_parallel())
1693 {
1694 // *((int*)0)=17;
1695 throw DataException("Programming error. Please do not run exclusiveWrite() in multi-threaded sections.");
1696 }
1697 #endif
1698 forceResolve();
1699 if (isShared())
1700 {
1701 DataAbstract* t=m_data->deepCopy();
1702 set_m_data(DataAbstract_ptr(t));
1703 }
1704 }
1705
1706 /**
1707 \brief checks if caller can have exclusive write to the object
1708 */
1709 void checkExclusiveWrite()
1710 {
1711 if (isLazy() || isShared())
1712 {
1713 throw DataException("Programming error. ExclusiveWrite required - please call requireWrite()");
1714 }
1715 }
1716
1717 /**
1718 \brief Modify the data abstract hosted by this Data object
1719 For internal use only.
1720 Passing a pointer to null is permitted (do this in the destructor)
1721 \warning Only to be called in single threaded code or inside a single/critical section. This method needs to be atomic.
1722 */
1723 void set_m_data(DataAbstract_ptr p);
1724
1725 friend class DataAbstract; // To allow calls to updateShareStatus
1726
1727 };
1728
1729 } // end namespace escript
1730
1731
1732 // No, this is not supposed to be at the top of the file
1733 // DataAbstact needs to be declared first, then DataReady needs to be fully declared
1734 // so that I can dynamic cast between them below.
1735 #include "DataReady.h"
1736 #include "DataLazy.h"
1737
1738 namespace escript
1739 {
1740
1741 inline
1742 const DataReady*
1743 Data::getReady() const
1744 {
1745 const DataReady* dr=dynamic_cast<const DataReady*>(m_data.get());
1746 EsysAssert((dr!=0), "Error - casting to DataReady.");
1747 return dr;
1748 }
1749
1750 inline
1751 DataReady*
1752 Data::getReady()
1753 {
1754 DataReady* dr=dynamic_cast<DataReady*>(m_data.get());
1755 EsysAssert((dr!=0), "Error - casting to DataReady.");
1756 return dr;
1757 }
1758
1759 // Be wary of using this for local operations since it (temporarily) increases reference count.
1760 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1761 // getReady() instead
1762 inline
1763 DataReady_ptr
1764 Data::getReadyPtr()
1765 {
1766 DataReady_ptr dr=boost::dynamic_pointer_cast<DataReady>(m_data);
1767 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1768 return dr;
1769 }
1770
1771
1772 inline
1773 const_DataReady_ptr
1774 Data::getReadyPtr() const
1775 {
1776 const_DataReady_ptr dr=boost::dynamic_pointer_cast<const DataReady>(m_data);
1777 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1778 return dr;
1779 }
1780
1781 inline
1782 DataAbstract::ValueType::value_type*
1783 Data::getSampleDataRW(DataAbstract::ValueType::size_type sampleNo)
1784 {
1785 // if (isLazy())
1786 // {
1787 // resolve();
1788 // }
1789 // exclusiveWrite();
1790 if (isLazy())
1791 {
1792 throw DataException("Error, attempt to acquire RW access to lazy data. Please call requireWrite() first.");
1793 }
1794 return getReady()->getSampleDataRW(sampleNo);
1795 }
1796
1797 // inline
1798 // const DataAbstract::ValueType::value_type*
1799 // Data::getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, DataTypes::ValueType* buffer)
1800 // {
1801 // DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
1802 // if (l!=0)
1803 // {
1804 // size_t offset=0;
1805 // if (buffer==NULL)
1806 // {
1807 // throw DataException("Error, attempt to getSampleDataRO for lazy Data with buffer==NULL");
1808 // }
1809 // const DataTypes::ValueType* res=l->resolveSample(*buffer,0,sampleNo,offset);
1810 // return &((*res)[offset]);
1811 // }
1812 // return getReady()->getSampleDataRO(sampleNo);
1813 // }
1814
1815 inline
1816 const DataAbstract::ValueType::value_type*
1817 Data::getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, BufferGroup* bufferg)
1818 {
1819 DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
1820 if (l!=0)
1821 {
1822 size_t offset=0;
1823 if (bufferg==NULL)
1824 {
1825 throw DataException("Error, attempt to getSampleDataRO for lazy Data with buffer==NULL");
1826 }
1827 const DataTypes::ValueType* res=l->resolveSample(*bufferg,sampleNo,offset);
1828 return &((*res)[offset]);
1829 }
1830 return getReady()->getSampleDataRO(sampleNo);
1831 }
1832
1833
1834
1835 /**
1836 Modify a filename for MPI parallel output to multiple files
1837 */
1838 char *Escript_MPI_appendRankToFileName(const char *, int, int);
1839
1840 /**
1841 Binary Data object operators.
1842 */
1843 inline double rpow(double x,double y)
1844 {
1845 return pow(y,x);
1846 }
1847
1848 /**
1849 \brief
1850 Operator+
1851 Takes two Data objects.
1852 */
1853 ESCRIPT_DLL_API Data operator+(const Data& left, const Data& right);
1854
1855 /**
1856 \brief
1857 Operator-
1858 Takes two Data objects.
1859 */
1860 ESCRIPT_DLL_API Data operator-(const Data& left, const Data& right);
1861
1862 /**
1863 \brief
1864 Operator*
1865 Takes two Data objects.
1866 */
1867 ESCRIPT_DLL_API Data operator*(const Data& left, const Data& right);
1868
1869 /**
1870 \brief
1871 Operator/
1872 Takes two Data objects.
1873 */
1874 ESCRIPT_DLL_API Data operator/(const Data& left, const Data& right);
1875
1876 /**
1877 \brief
1878 Operator+
1879 Takes LHS Data object and RHS python::object.
1880 python::object must be convertable to Data type.
1881 */
1882 ESCRIPT_DLL_API Data operator+(const Data& left, const boost::python::object& right);
1883
1884 /**
1885 \brief
1886 Operator-
1887 Takes LHS Data object and RHS python::object.
1888 python::object must be convertable to Data type.
1889 */
1890 ESCRIPT_DLL_API Data operator-(const Data& left, const boost::python::object& right);
1891
1892 /**
1893 \brief
1894 Operator*
1895 Takes LHS Data object and RHS python::object.
1896 python::object must be convertable to Data type.
1897 */
1898 ESCRIPT_DLL_API Data operator*(const Data& left, const boost::python::object& right);
1899
1900 /**
1901 \brief
1902 Operator/
1903 Takes LHS Data object and RHS python::object.
1904 python::object must be convertable to Data type.
1905 */
1906 ESCRIPT_DLL_API Data operator/(const Data& left, const boost::python::object& right);
1907
1908 /**
1909 \brief
1910 Operator+
1911 Takes LHS python::object and RHS Data object.
1912 python::object must be convertable to Data type.
1913 */
1914 ESCRIPT_DLL_API Data operator+(const boost::python::object& left, const Data& right);
1915
1916 /**
1917 \brief
1918 Operator-
1919 Takes LHS python::object and RHS Data object.
1920 python::object must be convertable to Data type.
1921 */
1922 ESCRIPT_DLL_API Data operator-(const boost::python::object& left, const Data& right);
1923
1924 /**
1925 \brief
1926 Operator*
1927 Takes LHS python::object and RHS Data object.
1928 python::object must be convertable to Data type.
1929 */
1930 ESCRIPT_DLL_API Data operator*(const boost::python::object& left, const Data& right);
1931
1932 /**
1933 \brief
1934 Operator/
1935 Takes LHS python::object and RHS Data object.
1936 python::object must be convertable to Data type.
1937 */
1938 ESCRIPT_DLL_API Data operator/(const boost::python::object& left, const Data& right);
1939
1940
1941
1942 /**
1943 \brief
1944 Output operator
1945 */
1946 ESCRIPT_DLL_API std::ostream& operator<<(std::ostream& o, const Data& data);
1947
1948 /**
1949 \brief
1950 Compute a tensor product of two Data objects
1951 \param arg_0 - Input - Data object
1952 \param arg_1 - Input - Data object
1953 \param axis_offset - Input - axis offset
1954 \param transpose - Input - 0: transpose neither, 1: transpose arg0, 2: transpose arg1
1955 */
1956 ESCRIPT_DLL_API
1957 Data
1958 C_GeneralTensorProduct(Data& arg_0,
1959 Data& arg_1,
1960 int axis_offset=0,
1961 int transpose=0);
1962
1963 /**
1964 \brief
1965 Perform the given binary operation with this and right as operands.
1966 Right is a Data object.
1967 */
1968 template <class BinaryFunction>
1969 inline
1970 void
1971 Data::binaryOp(const Data& right,
1972 BinaryFunction operation)
1973 {
1974 //
1975 // if this has a rank of zero promote it to the rank of the RHS
1976 if (getDataPointRank()==0 && right.getDataPointRank()!=0) {
1977 throw DataException("Error - attempt to update rank zero object with object with rank bigger than zero.");
1978 }
1979
1980 if (isLazy() || right.isLazy())
1981 {
1982 throw DataException("Programmer error - attempt to call binaryOp with Lazy Data.");
1983 }
1984 //
1985 // initially make the temporary a shallow copy
1986 Data tempRight(right);
1987
1988 if (getFunctionSpace()!=right.getFunctionSpace()) {
1989 if (right.probeInterpolation(getFunctionSpace())) {
1990 //
1991 // an interpolation is required so create a new Data
1992 tempRight=Data(right,this->getFunctionSpace());
1993 } else if (probeInterpolation(right.getFunctionSpace())) {
1994 //
1995 // interpolate onto the RHS function space
1996 Data tempLeft(*this,right.getFunctionSpace());
1997 // m_data=tempLeft.m_data;
1998 set_m_data(tempLeft.m_data);
1999 }
2000 }
2001 operandCheck(tempRight);
2002 //
2003 // ensure this has the right type for the RHS
2004 typeMatchRight(tempRight);
2005 //
2006 // Need to cast to the concrete types so that the correct binaryOp
2007 // is called.
2008 if (isExpanded()) {
2009 //
2010 // Expanded data will be done in parallel, the right hand side can be
2011 // of any data type
2012 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2013 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2014 escript::binaryOp(*leftC,*(tempRight.getReady()),operation);
2015 } else if (isTagged()) {
2016 //
2017 // Tagged data is operated on serially, the right hand side can be
2018 // either DataConstant or DataTagged
2019 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2020 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2021 if (right.isTagged()) {
2022 DataTagged* rightC=dynamic_cast<DataTagged*>(tempRight.m_data.get());
2023 EsysAssert((rightC!=0), "Programming error - casting to DataTagged.");
2024 escript::binaryOp(*leftC,*rightC,operation);
2025 } else {
2026 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2027 EsysAssert((rightC!=0), "Programming error - casting to DataConstant.");
2028 escript::binaryOp(*leftC,*rightC,operation);
2029 }
2030 } else if (isConstant()) {
2031 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2032 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2033 EsysAssert((leftC!=0 && rightC!=0), "Programming error - casting to DataConstant.");
2034 escript::binaryOp(*leftC,*rightC,operation);
2035 }
2036 }
2037
2038 /**
2039 \brief
2040 Perform the given Data object reduction algorithm on this and return the result.
2041 Given operation combines each element of each data point, thus argument
2042 object (*this) is a rank n Data object, and returned object is a scalar.
2043 Calls escript::algorithm.
2044 */
2045 template <class BinaryFunction>
2046 inline
2047 double
2048 Data::algorithm(BinaryFunction operation, double initial_value) const
2049 {
2050 if (isExpanded()) {
2051 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2052 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2053 return escript::algorithm(*leftC,operation,initial_value);
2054 } else if (isTagged()) {
2055 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2056 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2057 return escript::algorithm(*leftC,operation,initial_value);
2058 } else if (isConstant()) {
2059 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2060 EsysAssert((leftC!=0), "Programming error - casting to DataConstant.");
2061 return escript::algorithm(*leftC,operation,initial_value);
2062 } else if (isEmpty()) {
2063 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2064 } else if (isLazy()) {
2065 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2066 } else {
2067 throw DataException("Error - Data encapsulates an unknown type.");
2068 }
2069 }
2070
2071 /**
2072 \brief
2073 Perform the given data point reduction algorithm on data and return the result.
2074 Given operation combines each element within each data point into a scalar,
2075 thus argument object is a rank n Data object, and returned object is a
2076 rank 0 Data object.
2077 Calls escript::dp_algorithm.
2078 */
2079 template <class BinaryFunction>
2080 inline
2081 Data
2082 Data::dp_algorithm(BinaryFunction operation, double initial_value) const
2083 {
2084 if (isEmpty()) {
2085 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2086 }
2087 else if (isExpanded()) {
2088 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2089 DataExpanded* dataE=dynamic_cast<DataExpanded*>(m_data.get());
2090 DataExpanded* resultE=dynamic_cast<DataExpanded*>(result.m_data.get());
2091 EsysAssert((dataE!=0), "Programming error - casting data to DataExpanded.");
2092 EsysAssert((resultE!=0), "Programming error - casting result to DataExpanded.");
2093 escript::dp_algorithm(*dataE,*resultE,operation,initial_value);
2094 return result;
2095 }
2096 else if (isTagged()) {
2097 DataTagged* dataT=dynamic_cast<DataTagged*>(m_data.get());
2098 EsysAssert((dataT!=0), "Programming error - casting data to DataTagged.");
2099 DataTypes::ValueType defval(1);
2100 defval[0]=0;
2101 DataTagged* resultT=new DataTagged(getFunctionSpace(), DataTypes::scalarShape, defval, dataT);
2102 escript::dp_algorithm(*dataT,*resultT,operation,initial_value);
2103 return Data(resultT); // note: the Data object now owns the resultT pointer
2104 }
2105 else if (isConstant()) {
2106 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2107 DataConstant* dataC=dynamic_cast<DataConstant*>(m_data.get());
2108 DataConstant* resultC=dynamic_cast<DataConstant*>(result.m_data.get());
2109 EsysAssert((dataC!=0), "Programming error - casting data to DataConstant.");
2110 EsysAssert((resultC!=0), "Programming error - casting result to DataConstant.");
2111 escript::dp_algorithm(*dataC,*resultC,operation,initial_value);
2112 return result;
2113 } else if (isLazy()) {
2114 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2115 } else {
2116 throw DataException("Error - Data encapsulates an unknown type.");
2117 }
2118 }
2119
2120 /**
2121 \brief
2122 Compute a tensor operation with two Data objects
2123 \param arg_0 - Input - Data object
2124 \param arg_1 - Input - Data object
2125 \param operation - Input - Binary op functor
2126 */
2127 template <typename BinaryFunction>
2128 inline
2129 Data
2130 C_TensorBinaryOperation(Data const &arg_0,
2131 Data const &arg_1,
2132 BinaryFunction operation)
2133 {
2134 if (arg_0.isEmpty() || arg_1.isEmpty())
2135 {
2136 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2137 }
2138 if (arg_0.isLazy() || arg_1.isLazy())
2139 {
2140 throw DataException("Error - Operations not permitted on lazy data.");
2141 }
2142 // Interpolate if necessary and find an appropriate function space
2143 Data arg_0_Z, arg_1_Z;
2144 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2145 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2146 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2147 arg_1_Z = Data(arg_1);
2148 }
2149 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2150 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2151 arg_0_Z =Data(arg_0);
2152 }
2153 else {
2154 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible function spaces.");
2155 }
2156 } else {
2157 arg_0_Z = Data(arg_0);
2158 arg_1_Z = Data(arg_1);
2159 }
2160 // Get rank and shape of inputs
2161 int rank0 = arg_0_Z.getDataPointRank();
2162 int rank1 = arg_1_Z.getDataPointRank();
2163 DataTypes::ShapeType shape0 = arg_0_Z.getDataPointShape();
2164 DataTypes::ShapeType shape1 = arg_1_Z.getDataPointShape();
2165 int size0 = arg_0_Z.getDataPointSize();
2166 int size1 = arg_1_Z.getDataPointSize();
2167 // Declare output Data object
2168 Data res;
2169
2170 if (shape0 == shape1) {
2171 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2172 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2173 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2174 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2175 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2176
2177 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2178 }
2179 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2180
2181 // Prepare the DataConstant input
2182 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2183
2184 // Borrow DataTagged input from Data object
2185 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2186
2187 // Prepare a DataTagged output 2
2188 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2189 res.tag();
2190 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2191
2192 // Prepare offset into DataConstant
2193 int offset_0 = tmp_0->getPointOffset(0,0);
2194 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2195
2196 // Get the pointers to the actual data
2197 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2198 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2199
2200 // Compute a result for the default
2201 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2202 // Compute a result for each tag
2203 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2204 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2205 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2206 tmp_2->addTag(i->first);
2207 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2208 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2209
2210 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2211 }
2212
2213 }
2214 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2215 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2216 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2217 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2218 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2219
2220 int sampleNo_1,dataPointNo_1;
2221 int numSamples_1 = arg_1_Z.getNumSamples();
2222 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2223 int offset_0 = tmp_0->getPointOffset(0,0);
2224 res.requireWrite();
2225 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2226 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2227 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2228 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2229 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2230 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2231 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2232 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2233 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2234 }
2235 }
2236
2237 }
2238 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2239 // Borrow DataTagged input from Data object
2240 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2241
2242 // Prepare the DataConstant input
2243 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2244
2245 // Prepare a DataTagged output 2
2246 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2247 res.tag();
2248 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2249
2250 // Prepare offset into DataConstant
2251 int offset_1 = tmp_1->getPointOffset(0,0);
2252
2253 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2254 // Get the pointers to the actual data
2255 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2256 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2257 // Compute a result for the default
2258 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2259 // Compute a result for each tag
2260 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2261 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2262 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2263 tmp_2->addTag(i->first);
2264 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2265 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2266 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2267 }
2268
2269 }
2270 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2271 // Borrow DataTagged input from Data object
2272 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2273
2274 // Borrow DataTagged input from Data object
2275 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2276
2277 // Prepare a DataTagged output 2
2278 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2279 res.tag(); // DataTagged output
2280 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2281
2282 // Get the pointers to the actual data
2283 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2284 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2285 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2286
2287 // Compute a result for the default
2288 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2289 // Merge the tags
2290 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2291 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2292 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2293 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2294 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2295 }
2296 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2297 tmp_2->addTag(i->first);
2298 }
2299 // Compute a result for each tag
2300 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2301 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2302
2303 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2304 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2305 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2306
2307 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2308 }
2309
2310 }
2311 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2312 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2313 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2314 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2315 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2316 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2317
2318 int sampleNo_0,dataPointNo_0;
2319 int numSamples_0 = arg_0_Z.getNumSamples();
2320 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2321 res.requireWrite();
2322 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2323 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2324 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2325 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2326 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2327 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2328 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2329 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2330 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2331 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2332 }
2333 }
2334
2335 }
2336 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2337 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2338 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2339 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2340 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2341
2342 int sampleNo_0,dataPointNo_0;
2343 int numSamples_0 = arg_0_Z.getNumSamples();
2344 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2345 int offset_1 = tmp_1->getPointOffset(0,0);
2346 res.requireWrite();
2347 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2348 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2349 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2350 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2351 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2352
2353 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2354 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2355 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2356
2357
2358 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2359 }
2360 }
2361
2362 }
2363 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2364 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2365 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2366 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2367 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2368 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2369
2370 int sampleNo_0,dataPointNo_0;
2371 int numSamples_0 = arg_0_Z.getNumSamples();
2372 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2373 res.requireWrite();
2374 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2375 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2376 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2377 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2378 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2379 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2380 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2381 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2382 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2383 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2384 }
2385 }
2386
2387 }
2388 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2389 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2390 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2391 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2392 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2393 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2394
2395 int sampleNo_0,dataPointNo_0;
2396 int numSamples_0 = arg_0_Z.getNumSamples();
2397 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2398 res.requireWrite();
2399 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2400 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2401 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2402 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2403 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2404 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2405 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2406 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2407 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2408 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2409 }
2410 }
2411
2412 }
2413 else {
2414 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2415 }
2416
2417 } else if (0 == rank0) {
2418 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2419 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataConstant output
2420 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2421 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2422 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2423 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2424 }
2425 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2426
2427 // Prepare the DataConstant input
2428 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2429
2430 // Borrow DataTagged input from Data object
2431 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2432
2433 // Prepare a DataTagged output 2
2434 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataTagged output
2435 res.tag();
2436 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2437
2438 // Prepare offset into DataConstant
2439 int offset_0 = tmp_0->getPointOffset(0,0);
2440 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2441
2442 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2443 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2444
2445 // Compute a result for the default
2446 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2447 // Compute a result for each tag
2448 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2449 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2450 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2451 tmp_2->addTag(i->first);
2452 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2453 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2454 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2455 }
2456
2457 }
2458 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2459
2460 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2461 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2462 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2463 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2464
2465 int sampleNo_1,dataPointNo_1;
2466 int numSamples_1 = arg_1_Z.getNumSamples();
2467 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2468 int offset_0 = tmp_0->getPointOffset(0,0);
2469 res.requireWrite();
2470 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2471 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2472 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2473 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2474 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2475 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2476 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2477 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2478 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2479
2480 }
2481 }
2482
2483 }
2484 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2485
2486 // Borrow DataTagged input from Data object
2487 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2488
2489 // Prepare the DataConstant input
2490 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2491
2492 // Prepare a DataTagged output 2
2493 res = Data(0.0, shape1, arg_0_Z.getFunctionSpace()); // DataTagged output
2494 res.tag();
2495 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2496
2497 // Prepare offset into DataConstant
2498 int offset_1 = tmp_1->getPointOffset(0,0);
2499 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2500
2501 // Get the pointers to the actual data
2502 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2503 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2504
2505
2506 // Compute a result for the default
2507 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2508 // Compute a result for each tag
2509 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2510 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2511 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2512 tmp_2->addTag(i->first);
2513 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2514 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2515
2516 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2517 }
2518
2519 }
2520 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2521
2522 // Borrow DataTagged input from Data object
2523 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2524
2525 // Borrow DataTagged input from Data object
2526 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2527
2528 // Prepare a DataTagged output 2
2529 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());
2530 res.tag(); // DataTagged output
2531 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2532
2533 // Get the pointers to the actual data
2534 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2535 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2536 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2537
2538 // Compute a result for the default
2539 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2540 // Merge the tags
2541 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2542 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2543 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2544 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2545 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2546 }
2547 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2548 tmp_2->addTag(i->first);
2549 }
2550 // Compute a result for each tag
2551 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2552 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2553 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2554 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2555 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2556
2557 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2558 }
2559
2560 }
2561 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2562
2563 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2564 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2565 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2566 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2567 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2568
2569 int sampleNo_0,dataPointNo_0;
2570 int numSamples_0 = arg_0_Z.getNumSamples();
2571 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2572 res.requireWrite();
2573 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2574 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2575 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2576 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2577 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2578 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2579 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2580 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2581 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2582 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2583 }
2584 }
2585
2586 }
2587 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2588 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2589 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2590 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2591 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2592
2593 int sampleNo_0,dataPointNo_0;
2594 int numSamples_0 = arg_0_Z.getNumSamples();
2595 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2596 int offset_1 = tmp_1->getPointOffset(0,0);
2597 res.requireWrite();
2598 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2599 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2600 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2601 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2602 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2603 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2604 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2605 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2606 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2607 }
2608 }
2609
2610
2611 }
2612 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2613
2614 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2615 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2616 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2617 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2618 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2619
2620 int sampleNo_0,dataPointNo_0;
2621 int numSamples_0 = arg_0_Z.getNumSamples();
2622 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2623 res.requireWrite();
2624 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2625 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2626 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2627 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2628 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2629 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2630 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2631 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2632 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2633 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2634 }
2635 }
2636
2637 }
2638 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2639
2640 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2641 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2642 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2643 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2644 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2645
2646 int sampleNo_0,dataPointNo_0;
2647 int numSamples_0 = arg_0_Z.getNumSamples();
2648 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2649 res.requireWrite();
2650 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2651 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2652 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2653 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2654 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2655 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2656 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2657 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2658 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2659 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2660 }
2661 }
2662
2663 }
2664 else {
2665 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2666 }
2667
2668 } else if (0 == rank1) {
2669 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2670 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2671 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2672 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2673 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2674 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2675 }
2676 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2677
2678 // Prepare the DataConstant input
2679 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2680
2681 // Borrow DataTagged input from Data object
2682 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2683
2684 // Prepare a DataTagged output 2
2685 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2686 res.tag();
2687 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2688
2689 // Prepare offset into DataConstant
2690 int offset_0 = tmp_0->getPointOffset(0,0);
2691 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2692
2693 //Get the pointers to the actual data
2694 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2695 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2696
2697 // Compute a result for the default
2698 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2699 // Compute a result for each tag
2700 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2701 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2702 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2703 tmp_2->addTag(i->first);
2704 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2705 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2706 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2707 }
2708 }
2709 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2710
2711 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2712 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2713 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2714 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2715
2716 int sampleNo_1,dataPointNo_1;
2717 int numSamples_1 = arg_1_Z.getNumSamples();
2718 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2719 int offset_0 = tmp_0->getPointOffset(0,0);
2720 res.requireWrite();
2721 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2722 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2723 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2724 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2725 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2726 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2727 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2728 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2729 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2730 }
2731 }
2732
2733 }
2734 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2735
2736 // Borrow DataTagged input from Data object
2737 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2738
2739 // Prepare the DataConstant input
2740 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2741
2742 // Prepare a DataTagged output 2
2743 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2744 res.tag();
2745 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2746
2747 // Prepare offset into DataConstant
2748 int offset_1 = tmp_1->getPointOffset(0,0);
2749 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2750 // Get the pointers to the actual data
2751 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2752 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2753 // Compute a result for the default
2754 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2755 // Compute a result for each tag
2756 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2757 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2758 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2759 tmp_2->addTag(i->first);
2760 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2761 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2762 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2763 }
2764
2765 }
2766 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2767
2768 // Borrow DataTagged input from Data object
2769 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2770
2771 // Borrow DataTagged input from Data object
2772 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2773
2774 // Prepare a DataTagged output 2
2775 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2776 res.tag(); // DataTagged output
2777 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2778
2779 // Get the pointers to the actual data
2780 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2781 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2782 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2783
2784 // Compute a result for the default
2785 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2786 // Merge the tags
2787 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2788 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2789 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2790 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2791 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2792 }
2793 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2794 tmp_2->addTag(i->first);
2795 }
2796 // Compute a result for each tag
2797 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2798 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2799 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2800 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2801 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2802 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2803 }
2804
2805 }
2806 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2807
2808 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2809 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2810 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2811 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2812 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2813
2814 int sampleNo_0,dataPointNo_0;
2815 int numSamples_0 = arg_0_Z.getNumSamples();
2816 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2817 res.requireWrite();
2818 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2819 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2820 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2821 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2822 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2823 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2824 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2825 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2826 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2827 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2828 }
2829 }
2830
2831 }
2832 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2833 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2834 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2835 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2836 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2837
2838 int sampleNo_0,dataPointNo_0;
2839 int numSamples_0 = arg_0_Z.getNumSamples();
2840 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2841 int offset_1 = tmp_1->getPointOffset(0,0);
2842 res.requireWrite();
2843 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2844 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2845 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2846 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2847 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2848 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2849 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2850 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2851 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2852 }
2853 }
2854
2855
2856 }
2857 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2858
2859 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2860 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2861 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2862 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2863 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2864
2865 int sampleNo_0,dataPointNo_0;
2866 int numSamples_0 = arg_0_Z.getNumSamples();
2867 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2868 res.requireWrite();
2869 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2870 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2871 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2872 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2873 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2874 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2875 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2876 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2877 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2878 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2879 }
2880 }
2881
2882 }
2883 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2884
2885 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2886 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2887 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2888 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2889 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2890
2891 int sampleNo_0,dataPointNo_0;
2892 int numSamples_0 = arg_0_Z.getNumSamples();
2893 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2894 res.requireWrite();
2895 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2896 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2897 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2898 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2899 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2900 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2901 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2902 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2903 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2904 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2905 }
2906 }
2907
2908 }
2909 else {
2910 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2911 }
2912
2913 } else {
2914 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible shapes");
2915 }
2916
2917 return res;
2918 }
2919
2920 template <typename UnaryFunction>
2921 Data
2922 C_TensorUnaryOperation(Data const &arg_0,
2923 UnaryFunction operation)
2924 {
2925 if (arg_0.isEmpty()) // do this before we attempt to interpolate
2926 {
2927 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2928 }
2929 if (arg_0.isLazy())
2930 {
2931 throw DataException("Error - Operations not permitted on lazy data.");
2932 }
2933 // Interpolate if necessary and find an appropriate function space
2934 Data arg_0_Z = Data(arg_0);
2935
2936 // Get rank and shape of inputs
2937 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
2938 int size0 = arg_0_Z.getDataPointSize();
2939
2940 // Declare output Data object
2941 Data res;
2942
2943 if (arg_0_Z.isConstant()) {
2944 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataConstant output
2945 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2946 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2947 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2948 }
2949 else if (arg_0_Z.isTagged()) {
2950
2951 // Borrow DataTagged input from Data object
2952 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2953
2954 // Prepare a DataTagged output 2
2955 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2956 res.tag();
2957 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2958
2959 // Get the pointers to the actual data
2960 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2961 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2962 // Compute a result for the default
2963 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2964 // Compute a result for each tag
2965 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2966 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2967 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2968 tmp_2->addTag(i->first);
2969 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2970 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2971 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2972 }
2973
2974 }
2975 else if (arg_0_Z.isExpanded()) {
2976
2977 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace(),true); // DataExpanded output
2978 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2979 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2980
2981 int sampleNo_0,dataPointNo_0;
2982 int numSamples_0 = arg_0_Z.getNumSamples();
2983 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2984 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2985 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2986 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2987 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2988 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2989 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2990 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2991 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2992 }
2993 }
2994 }
2995 else {
2996 throw DataException("Error - C_TensorUnaryOperation: unknown combination of inputs");
2997 }
2998
2999 return res;
3000 }
3001
3002 }
3003 #endif

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26