/[escript]/branches/numpy/escript/src/Data.h
ViewVC logotype

Contents of /branches/numpy/escript/src/Data.h

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2453 - (show annotations)
Tue Jun 2 22:54:20 2009 UTC (9 years, 10 months ago) by jfenwick
File MIME type: text/plain
File size: 92766 byte(s)
Removing NONUMARRAY defines

1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2008 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 /** \file Data.h */
16
17 #ifndef DATA_H
18 #define DATA_H
19 #include "system_dep.h"
20
21 #include "DataTypes.h"
22 #include "DataAbstract.h"
23 #include "DataAlgorithm.h"
24 #include "FunctionSpace.h"
25 #include "BinaryOp.h"
26 #include "UnaryOp.h"
27 #include "DataException.h"
28
29
30 extern "C" {
31 #include "DataC.h"
32 //#include <omp.h>
33 }
34
35 #include "esysmpi.h"
36 #include <string>
37 #include <algorithm>
38 #include <sstream>
39
40 #include <boost/shared_ptr.hpp>
41 #include <boost/python/object.hpp>
42 #include <boost/python/tuple.hpp>
43
44 #include "BufferGroup.h"
45
46 namespace escript {
47
48 //
49 // Forward declaration for various implementations of Data.
50 class DataConstant;
51 class DataTagged;
52 class DataExpanded;
53 class DataLazy;
54
55 /**
56 \brief
57 Data represents a collection of datapoints.
58
59 Description:
60 Internally, the datapoints are actually stored by a DataAbstract object.
61 The specific instance of DataAbstract used may vary over the lifetime
62 of the Data object.
63 Some methods on this class return references (eg getShape()).
64 These references should not be used after an operation which changes the underlying DataAbstract object.
65 Doing so will lead to invalid memory access.
66 This should not affect any methods exposed via boost::python.
67 */
68 class Data {
69
70 public:
71
72 // These typedefs allow function names to be cast to pointers
73 // to functions of the appropriate type when calling unaryOp etc.
74 typedef double (*UnaryDFunPtr)(double);
75 typedef double (*BinaryDFunPtr)(double,double);
76
77
78 /**
79 Constructors.
80 */
81
82 /**
83 \brief
84 Default constructor.
85 Creates a DataEmpty object.
86 */
87 ESCRIPT_DLL_API
88 Data();
89
90 /**
91 \brief
92 Copy constructor.
93 WARNING: Only performs a shallow copy.
94 */
95 ESCRIPT_DLL_API
96 Data(const Data& inData);
97
98 /**
99 \brief
100 Constructor from another Data object. If "what" is different from the
101 function space of inData the inData are tried to be interpolated to what,
102 otherwise a shallow copy of inData is returned.
103 */
104 ESCRIPT_DLL_API
105 Data(const Data& inData,
106 const FunctionSpace& what);
107
108 /**
109 \brief Copy Data from an existing vector
110 */
111
112 ESCRIPT_DLL_API
113 Data(const DataTypes::ValueType& value,
114 const DataTypes::ShapeType& shape,
115 const FunctionSpace& what=FunctionSpace(),
116 bool expanded=false);
117
118 /**
119 \brief
120 Constructor which creates a Data from a DataArrayView shape.
121
122 \param value - Input - Single value applied to all Data.
123 \param dataPointShape - Input - The shape of each data point.
124 \param what - Input - A description of what this data represents.
125 \param expanded - Input - Flag, if true fill the entire container with
126 the given value. Otherwise a more efficient storage
127 mechanism will be used.
128 */
129 ESCRIPT_DLL_API
130 Data(double value,
131 const DataTypes::ShapeType& dataPointShape=DataTypes::ShapeType(),
132 const FunctionSpace& what=FunctionSpace(),
133 bool expanded=false);
134
135 /**
136 \brief
137 Constructor which performs a deep copy of a region from another Data object.
138
139 \param inData - Input - Input Data object.
140 \param region - Input - Region to copy.
141 */
142 ESCRIPT_DLL_API
143 Data(const Data& inData,
144 const DataTypes::RegionType& region);
145
146 /**
147 \brief
148 Constructor which copies data from any object that can be converted into
149 a python numarray.
150
151 \param value - Input - Input data.
152 \param what - Input - A description of what this data represents.
153 \param expanded - Input - Flag, if true fill the entire container with
154 the value. Otherwise a more efficient storage
155 mechanism will be used.
156 */
157 ESCRIPT_DLL_API
158 Data(const boost::python::object& value,
159 const FunctionSpace& what=FunctionSpace(),
160 bool expanded=false);
161
162 /**
163 \brief
164 Constructor which creates a DataConstant.
165 Copies data from any object that can be converted
166 into a numarray. All other parameters are copied from other.
167
168 \param value - Input - Input data.
169 \param other - Input - contains all other parameters.
170 */
171 ESCRIPT_DLL_API
172 Data(const boost::python::object& value,
173 const Data& other);
174
175 /**
176 \brief
177 Constructor which creates a DataConstant of "shape" with constant value.
178 */
179 ESCRIPT_DLL_API
180 Data(double value,
181 const boost::python::tuple& shape=boost::python::make_tuple(),
182 const FunctionSpace& what=FunctionSpace(),
183 bool expanded=false);
184
185
186
187 /**
188 \brief Create a Data using an existing DataAbstract. Warning: The new object assumes ownership of the pointer!
189 Once you have passed the pointer, do not delete it.
190 */
191 ESCRIPT_DLL_API
192 explicit Data(DataAbstract* underlyingdata);
193
194 /**
195 \brief Create a Data based on the supplied DataAbstract
196 */
197 ESCRIPT_DLL_API
198 explicit Data(DataAbstract_ptr underlyingdata);
199
200 /**
201 \brief
202 Destructor
203 */
204 ESCRIPT_DLL_API
205 ~Data();
206
207 /**
208 \brief Make this object a deep copy of "other".
209 */
210 ESCRIPT_DLL_API
211 void
212 copy(const Data& other);
213
214 /**
215 \brief Return a pointer to a deep copy of this object.
216 */
217 ESCRIPT_DLL_API
218 Data
219 copySelf();
220
221
222 /**
223 \brief produce a delayed evaluation version of this Data.
224 */
225 ESCRIPT_DLL_API
226 Data
227 delay();
228
229 /**
230 \brief convert the current data into lazy data.
231 */
232 ESCRIPT_DLL_API
233 void
234 delaySelf();
235
236
237 /**
238 Member access methods.
239 */
240
241 /**
242 \brief
243 switches on update protection
244
245 */
246 ESCRIPT_DLL_API
247 void
248 setProtection();
249
250 /**
251 \brief
252 Returns trueif the data object is protected against update
253
254 */
255 ESCRIPT_DLL_API
256 bool
257 isProtected() const;
258
259
260 /**
261 \brief
262 Return teh value of a data point as a python tuple.
263 */
264 ESCRIPT_DLL_API
265 const boost::python::object
266 getValueOfDataPointAsTuple(int dataPointNo);
267
268 /**
269 \brief
270 sets the values of a data-point from a python object on this process
271 */
272 ESCRIPT_DLL_API
273 void
274 setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object);
275
276 /**
277 \brief
278 sets the values of a data-point from a numarray object on this process
279 */
280 ESCRIPT_DLL_API
281 void
282 setValueOfDataPointToArray(int dataPointNo, const boost::python::object&);
283
284 /**
285 \brief
286 sets the values of a data-point on this process
287 */
288 ESCRIPT_DLL_API
289 void
290 setValueOfDataPoint(int dataPointNo, const double);
291
292 /**
293 \brief Return a data point across all processors as a python tuple.
294 */
295 ESCRIPT_DLL_API
296 const boost::python::object
297 getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo);
298
299 /**
300 \brief
301 Return the tag number associated with the given data-point.
302
303 */
304 ESCRIPT_DLL_API
305 int
306 getTagNumber(int dpno);
307
308 /**
309 \brief
310 Return the C wrapper for the Data object.
311 */
312 ESCRIPT_DLL_API
313 escriptDataC
314 getDataC();
315
316
317
318 /**
319 \brief
320 Return the C wrapper for the Data object - const version.
321 */
322 ESCRIPT_DLL_API
323 escriptDataC
324 getDataC() const;
325
326 /**
327 \brief How much space is required to evaulate a sample of the Data.
328 */
329 ESCRIPT_DLL_API
330 size_t
331 getSampleBufferSize() const;
332
333
334
335 /**
336 \brief
337 Write the data as a string. For large amounts of data, a summary is printed.
338 */
339 ESCRIPT_DLL_API
340 std::string
341 toString() const;
342
343 /**
344 \brief
345 Whatever the current Data type make this into a DataExpanded.
346 */
347 ESCRIPT_DLL_API
348 void
349 expand();
350
351 /**
352 \brief
353 If possible convert this Data to DataTagged. This will only allow
354 Constant data to be converted to tagged. An attempt to convert
355 Expanded data to tagged will throw an exception.
356 */
357 ESCRIPT_DLL_API
358 void
359 tag();
360
361 /**
362 \brief If this data is lazy, then convert it to ready data.
363 What type of ready data depends on the expression. For example, Constant+Tagged==Tagged.
364 */
365 ESCRIPT_DLL_API
366 void
367 resolve();
368
369
370 /**
371 \brief Ensures data is ready for write access.
372 This means that the data will be resolved if lazy and will be copied if shared with another Data object.
373 \warning This method should only be called in single threaded sections of code. (It modifies m_data).
374 Do not create any Data objects from this one between calling requireWrite and getSampleDataRW.
375 Doing so might introduce additional sharing.
376 */
377 ESCRIPT_DLL_API
378 void
379 requireWrite();
380
381 /**
382 \brief
383 Return true if this Data is expanded.
384 \note To determine if a sample will contain separate values for each datapoint. Use actsExpanded instead.
385 */
386 ESCRIPT_DLL_API
387 bool
388 isExpanded() const;
389
390 /**
391 \brief
392 Return true if this Data is expanded or resolves to expanded.
393 That is, if it has a separate value for each datapoint in the sample.
394 */
395 ESCRIPT_DLL_API
396 bool
397 actsExpanded() const;
398
399
400 /**
401 \brief
402 Return true if this Data is tagged.
403 */
404 ESCRIPT_DLL_API
405 bool
406 isTagged() const;
407
408 /**
409 \brief
410 Return true if this Data is constant.
411 */
412 ESCRIPT_DLL_API
413 bool
414 isConstant() const;
415
416 /**
417 \brief Return true if this Data is lazy.
418 */
419 ESCRIPT_DLL_API
420 bool
421 isLazy() const;
422
423 /**
424 \brief Return true if this data is ready.
425 */
426 ESCRIPT_DLL_API
427 bool
428 isReady() const;
429
430 /**
431 \brief
432 Return true if this Data holds an instance of DataEmpty. This is _not_ the same as asking if the object
433 contains datapoints.
434 */
435 ESCRIPT_DLL_API
436 bool
437 isEmpty() const;
438
439 /**
440 \brief
441 Return the function space.
442 */
443 ESCRIPT_DLL_API
444 inline
445 const FunctionSpace&
446 getFunctionSpace() const
447 {
448 return m_data->getFunctionSpace();
449 }
450
451 /**
452 \brief
453 Return a copy of the function space.
454 */
455 ESCRIPT_DLL_API
456 const FunctionSpace
457 getCopyOfFunctionSpace() const;
458
459 /**
460 \brief
461 Return the domain.
462 */
463 ESCRIPT_DLL_API
464 inline
465 // const AbstractDomain&
466 const_Domain_ptr
467 getDomain() const
468 {
469 return getFunctionSpace().getDomain();
470 }
471
472
473 /**
474 \brief
475 Return the domain.
476 TODO: For internal use only. This should be removed.
477 */
478 ESCRIPT_DLL_API
479 inline
480 // const AbstractDomain&
481 Domain_ptr
482 getDomainPython() const
483 {
484 return getFunctionSpace().getDomainPython();
485 }
486
487 /**
488 \brief
489 Return a copy of the domain.
490 */
491 ESCRIPT_DLL_API
492 const AbstractDomain
493 getCopyOfDomain() const;
494
495 /**
496 \brief
497 Return the rank of the point data.
498 */
499 ESCRIPT_DLL_API
500 inline
501 unsigned int
502 getDataPointRank() const
503 {
504 return m_data->getRank();
505 }
506
507 /**
508 \brief
509 Return the number of data points
510 */
511 ESCRIPT_DLL_API
512 inline
513 int
514 getNumDataPoints() const
515 {
516 return getNumSamples() * getNumDataPointsPerSample();
517 }
518 /**
519 \brief
520 Return the number of samples.
521 */
522 ESCRIPT_DLL_API
523 inline
524 int
525 getNumSamples() const
526 {
527 return m_data->getNumSamples();
528 }
529
530 /**
531 \brief
532 Return the number of data points per sample.
533 */
534 ESCRIPT_DLL_API
535 inline
536 int
537 getNumDataPointsPerSample() const
538 {
539 return m_data->getNumDPPSample();
540 }
541
542
543 /**
544 \brief
545 Return the number of values in the shape for this object.
546 */
547 ESCRIPT_DLL_API
548 int
549 getNoValues() const
550 {
551 return m_data->getNoValues();
552 }
553
554
555 /**
556 \brief
557 dumps the object into a netCDF file
558 */
559 ESCRIPT_DLL_API
560 void
561 dump(const std::string fileName) const;
562
563 // /**
564 // \brief
565 // Return the sample data for the given sample no. This is not the
566 // preferred interface but is provided for use by C code.
567 // The buffer parameter is only required for LazyData.
568 // \param sampleNo - Input - the given sample no.
569 // \param buffer - Vector to compute (and store) sample data in.
570 // \return pointer to the sample data.
571 //*/
572 // ESCRIPT_DLL_API
573 // inline
574 // const DataAbstract::ValueType::value_type*
575 // getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, DataTypes::ValueType* buffer=0);
576
577
578 /**
579 \brief
580 Return the sample data for the given sample no. This is not the
581 preferred interface but is provided for use by C code.
582 The bufferg parameter is only required for LazyData.
583 \param sampleNo - Input - the given sample no.
584 \param buffer - A buffer to to compute (and store) sample data in will be selected from this group.
585 \return pointer to the sample data.
586 */
587 ESCRIPT_DLL_API
588 inline
589 const DataAbstract::ValueType::value_type*
590 getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, BufferGroup* bufferg=0);
591
592
593 /**
594 \brief
595 Return the sample data for the given sample no. This is not the
596 preferred interface but is provided for use by C code.
597 \param sampleNo - Input - the given sample no.
598 \return pointer to the sample data.
599 */
600 ESCRIPT_DLL_API
601 inline
602 DataAbstract::ValueType::value_type*
603 getSampleDataRW(DataAbstract::ValueType::size_type sampleNo);
604
605
606 /**
607 \brief
608 Return the sample data for the given tag. If an attempt is made to
609 access data that isn't tagged an exception will be thrown.
610 \param tag - Input - the tag key.
611 */
612 ESCRIPT_DLL_API
613 inline
614 DataAbstract::ValueType::value_type*
615 getSampleDataByTag(int tag)
616 {
617 return m_data->getSampleDataByTag(tag);
618 }
619
620 /**
621 \brief
622 Return a view into the data for the data point specified.
623 NOTE: Construction of the DataArrayView is a relatively expensive
624 operation.
625 \param sampleNo - Input -
626 \param dataPointNo - Input -
627 */
628 ESCRIPT_DLL_API
629 DataTypes::ValueType::const_reference
630 getDataPointRO(int sampleNo, int dataPointNo);
631
632
633 ESCRIPT_DLL_API
634 DataTypes::ValueType::reference
635 getDataPointRW(int sampleNo, int dataPointNo);
636
637
638
639 /**
640 \brief
641 Return the offset for the given sample and point within the sample
642 */
643 ESCRIPT_DLL_API
644 inline
645 DataTypes::ValueType::size_type
646 getDataOffset(int sampleNo,
647 int dataPointNo)
648 {
649 return m_data->getPointOffset(sampleNo,dataPointNo);
650 }
651
652 /**
653 \brief
654 Return a reference to the data point shape.
655 */
656 ESCRIPT_DLL_API
657 inline
658 const DataTypes::ShapeType&
659 getDataPointShape() const
660 {
661 return m_data->getShape();
662 }
663
664 /**
665 \brief
666 Return the data point shape as a tuple of integers.
667 */
668 ESCRIPT_DLL_API
669 const boost::python::tuple
670 getShapeTuple() const;
671
672 /**
673 \brief
674 Return the size of the data point. It is the product of the
675 data point shape dimensions.
676 */
677 ESCRIPT_DLL_API
678 int
679 getDataPointSize() const;
680
681 /**
682 \brief
683 Return the number of doubles stored for this Data.
684 */
685 ESCRIPT_DLL_API
686 DataTypes::ValueType::size_type
687 getLength() const;
688
689
690
691 /**
692 \brief
693 Assign the given value to the tag assocciated with name. Implicitly converts this
694 object to type DataTagged. Throws an exception if this object
695 cannot be converted to a DataTagged object or name cannot be mapped onto a tag key.
696 \param tagKey - Input - Integer key.
697 \param value - Input - Value to associate with given key.
698 ==>*
699 */
700 ESCRIPT_DLL_API
701 void
702 setTaggedValueByName(std::string name,
703 const boost::python::object& value);
704
705 /**
706 \brief
707 Assign the given value to the tag. Implicitly converts this
708 object to type DataTagged if it is constant.
709
710 \param tagKey - Input - Integer key.
711 \param value - Input - Value to associate with given key.
712 ==>*
713 */
714 ESCRIPT_DLL_API
715 void
716 setTaggedValue(int tagKey,
717 const boost::python::object& value);
718
719 /**
720 \brief
721 Assign the given value to the tag. Implicitly converts this
722 object to type DataTagged if it is constant.
723
724 \param tagKey - Input - Integer key.
725 \param pointshape - Input - The shape of the value parameter
726 \param value - Input - Value to associate with given key.
727 \param dataOffset - Input - Offset of the begining of the point within the value parameter
728 */
729 ESCRIPT_DLL_API
730 void
731 setTaggedValueFromCPP(int tagKey,
732 const DataTypes::ShapeType& pointshape,
733 const DataTypes::ValueType& value,
734 int dataOffset=0);
735
736
737
738 /**
739 \brief
740 Copy other Data object into this Data object where mask is positive.
741 */
742 ESCRIPT_DLL_API
743 void
744 copyWithMask(const Data& other,
745 const Data& mask);
746
747 /**
748 Data object operation methods and operators.
749 */
750
751 /**
752 \brief
753 set all values to zero
754 *
755 */
756 ESCRIPT_DLL_API
757 void
758 setToZero();
759
760 /**
761 \brief
762 Interpolates this onto the given functionspace and returns
763 the result as a Data object.
764 *
765 */
766 ESCRIPT_DLL_API
767 Data
768 interpolate(const FunctionSpace& functionspace) const;
769 /**
770 \brief
771 Calculates the gradient of the data at the data points of functionspace.
772 If functionspace is not present the function space of Function(getDomain()) is used.
773 *
774 */
775 ESCRIPT_DLL_API
776 Data
777 gradOn(const FunctionSpace& functionspace) const;
778
779 ESCRIPT_DLL_API
780 Data
781 grad() const;
782
783 /**
784 \brief
785 Calculate the integral over the function space domain as a python tuple.
786 */
787 ESCRIPT_DLL_API
788 boost::python::object
789 integrateToTuple_const() const;
790
791
792 /**
793 \brief
794 Calculate the integral over the function space domain as a python tuple.
795 */
796 ESCRIPT_DLL_API
797 boost::python::object
798 integrateToTuple();
799
800
801
802 /**
803 \brief
804 Returns 1./ Data object
805 *
806 */
807 ESCRIPT_DLL_API
808 Data
809 oneOver() const;
810 /**
811 \brief
812 Return a Data with a 1 for +ive values and a 0 for 0 or -ive values.
813 *
814 */
815 ESCRIPT_DLL_API
816 Data
817 wherePositive() const;
818
819 /**
820 \brief
821 Return a Data with a 1 for -ive values and a 0 for +ive or 0 values.
822 *
823 */
824 ESCRIPT_DLL_API
825 Data
826 whereNegative() const;
827
828 /**
829 \brief
830 Return a Data with a 1 for +ive or 0 values and a 0 for -ive values.
831 *
832 */
833 ESCRIPT_DLL_API
834 Data
835 whereNonNegative() const;
836
837 /**
838 \brief
839 Return a Data with a 1 for -ive or 0 values and a 0 for +ive values.
840 *
841 */
842 ESCRIPT_DLL_API
843 Data
844 whereNonPositive() const;
845
846 /**
847 \brief
848 Return a Data with a 1 for 0 values and a 0 for +ive or -ive values.
849 *
850 */
851 ESCRIPT_DLL_API
852 Data
853 whereZero(double tol=0.0) const;
854
855 /**
856 \brief
857 Return a Data with a 0 for 0 values and a 1 for +ive or -ive values.
858 *
859 */
860 ESCRIPT_DLL_API
861 Data
862 whereNonZero(double tol=0.0) const;
863
864 /**
865 \brief
866 Return the maximum absolute value of this Data object.
867
868 The method is not const because lazy data needs to be expanded before Lsup can be computed.
869 The _const form can be used when the Data object is const, however this will only work for
870 Data which is not Lazy.
871
872 For Data which contain no samples (or tagged Data for which no tags in use have a value)
873 zero is returned.
874 */
875 ESCRIPT_DLL_API
876 double
877 Lsup();
878
879 ESCRIPT_DLL_API
880 double
881 Lsup_const() const;
882
883
884 /**
885 \brief
886 Return the maximum value of this Data object.
887
888 The method is not const because lazy data needs to be expanded before sup can be computed.
889 The _const form can be used when the Data object is const, however this will only work for
890 Data which is not Lazy.
891
892 For Data which contain no samples (or tagged Data for which no tags in use have a value)
893 a large negative value is returned.
894 */
895 ESCRIPT_DLL_API
896 double
897 sup();
898
899 ESCRIPT_DLL_API
900 double
901 sup_const() const;
902
903
904 /**
905 \brief
906 Return the minimum value of this Data object.
907
908 The method is not const because lazy data needs to be expanded before inf can be computed.
909 The _const form can be used when the Data object is const, however this will only work for
910 Data which is not Lazy.
911
912 For Data which contain no samples (or tagged Data for which no tags in use have a value)
913 a large positive value is returned.
914 */
915 ESCRIPT_DLL_API
916 double
917 inf();
918
919 ESCRIPT_DLL_API
920 double
921 inf_const() const;
922
923
924
925 /**
926 \brief
927 Return the absolute value of each data point of this Data object.
928 *
929 */
930 ESCRIPT_DLL_API
931 Data
932 abs() const;
933
934 /**
935 \brief
936 Return the maximum value of each data point of this Data object.
937 *
938 */
939 ESCRIPT_DLL_API
940 Data
941 maxval() const;
942
943 /**
944 \brief
945 Return the minimum value of each data point of this Data object.
946 *
947 */
948 ESCRIPT_DLL_API
949 Data
950 minval() const;
951
952 /**
953 \brief
954 Return the (sample number, data-point number) of the data point with
955 the minimum value in this Data object.
956 */
957 ESCRIPT_DLL_API
958 const boost::python::tuple
959 minGlobalDataPoint() const;
960
961 ESCRIPT_DLL_API
962 void
963 calc_minGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
964 /**
965 \brief
966 Return the sign of each data point of this Data object.
967 -1 for negative values, zero for zero values, 1 for positive values.
968 *
969 */
970 ESCRIPT_DLL_API
971 Data
972 sign() const;
973
974 /**
975 \brief
976 Return the symmetric part of a matrix which is half the matrix plus its transpose.
977 *
978 */
979 ESCRIPT_DLL_API
980 Data
981 symmetric() const;
982
983 /**
984 \brief
985 Return the nonsymmetric part of a matrix which is half the matrix minus its transpose.
986 *
987 */
988 ESCRIPT_DLL_API
989 Data
990 nonsymmetric() const;
991
992 /**
993 \brief
994 Return the trace of a matrix
995 *
996 */
997 ESCRIPT_DLL_API
998 Data
999 trace(int axis_offset) const;
1000
1001 /**
1002 \brief
1003 Transpose each data point of this Data object around the given axis.
1004 *
1005 */
1006 ESCRIPT_DLL_API
1007 Data
1008 transpose(int axis_offset) const;
1009
1010 /**
1011 \brief
1012 Return the eigenvalues of the symmetric part at each data point of this Data object in increasing values.
1013 Currently this function is restricted to rank 2, square shape, and dimension 3.
1014 *
1015 */
1016 ESCRIPT_DLL_API
1017 Data
1018 eigenvalues() const;
1019
1020 /**
1021 \brief
1022 Return the eigenvalues and corresponding eigenvcetors of the symmetric part at each data point of this Data object.
1023 the eigenvalues are ordered in increasing size where eigenvalues with relative difference less than
1024 tol are treated as equal. The eigenvectors are orthogonal, normalized and the sclaed such that the
1025 first non-zero entry is positive.
1026 Currently this function is restricted to rank 2, square shape, and dimension 3
1027 *
1028 */
1029 ESCRIPT_DLL_API
1030 const boost::python::tuple
1031 eigenvalues_and_eigenvectors(const double tol=1.e-12) const;
1032
1033 /**
1034 \brief
1035 swaps the components axis0 and axis1
1036 *
1037 */
1038 ESCRIPT_DLL_API
1039 Data
1040 swapaxes(const int axis0, const int axis1) const;
1041
1042 /**
1043 \brief
1044 Return the error function erf of each data point of this Data object.
1045 *
1046 */
1047 ESCRIPT_DLL_API
1048 Data
1049 erf() const;
1050
1051 /**
1052 \brief
1053 Return the sin of each data point of this Data object.
1054 *
1055 */
1056 ESCRIPT_DLL_API
1057 Data
1058 sin() const;
1059
1060 /**
1061 \brief
1062 Return the cos of each data point of this Data object.
1063 *
1064 */
1065 ESCRIPT_DLL_API
1066 Data
1067 cos() const;
1068
1069 /**
1070 \brief
1071 Return the tan of each data point of this Data object.
1072 *
1073 */
1074 ESCRIPT_DLL_API
1075 Data
1076 tan() const;
1077
1078 /**
1079 \brief
1080 Return the asin of each data point of this Data object.
1081 *
1082 */
1083 ESCRIPT_DLL_API
1084 Data
1085 asin() const;
1086
1087 /**
1088 \brief
1089 Return the acos of each data point of this Data object.
1090 *
1091 */
1092 ESCRIPT_DLL_API
1093 Data
1094 acos() const;
1095
1096 /**
1097 \brief
1098 Return the atan of each data point of this Data object.
1099 *
1100 */
1101 ESCRIPT_DLL_API
1102 Data
1103 atan() const;
1104
1105 /**
1106 \brief
1107 Return the sinh of each data point of this Data object.
1108 *
1109 */
1110 ESCRIPT_DLL_API
1111 Data
1112 sinh() const;
1113
1114 /**
1115 \brief
1116 Return the cosh of each data point of this Data object.
1117 *
1118 */
1119 ESCRIPT_DLL_API
1120 Data
1121 cosh() const;
1122
1123 /**
1124 \brief
1125 Return the tanh of each data point of this Data object.
1126 *
1127 */
1128 ESCRIPT_DLL_API
1129 Data
1130 tanh() const;
1131
1132 /**
1133 \brief
1134 Return the asinh of each data point of this Data object.
1135 *
1136 */
1137 ESCRIPT_DLL_API
1138 Data
1139 asinh() const;
1140
1141 /**
1142 \brief
1143 Return the acosh of each data point of this Data object.
1144 *
1145 */
1146 ESCRIPT_DLL_API
1147 Data
1148 acosh() const;
1149
1150 /**
1151 \brief
1152 Return the atanh of each data point of this Data object.
1153 *
1154 */
1155 ESCRIPT_DLL_API
1156 Data
1157 atanh() const;
1158
1159 /**
1160 \brief
1161 Return the log to base 10 of each data point of this Data object.
1162 *
1163 */
1164 ESCRIPT_DLL_API
1165 Data
1166 log10() const;
1167
1168 /**
1169 \brief
1170 Return the natural log of each data point of this Data object.
1171 *
1172 */
1173 ESCRIPT_DLL_API
1174 Data
1175 log() const;
1176
1177 /**
1178 \brief
1179 Return the exponential function of each data point of this Data object.
1180 *
1181 */
1182 ESCRIPT_DLL_API
1183 Data
1184 exp() const;
1185
1186 /**
1187 \brief
1188 Return the square root of each data point of this Data object.
1189 *
1190 */
1191 ESCRIPT_DLL_API
1192 Data
1193 sqrt() const;
1194
1195 /**
1196 \brief
1197 Return the negation of each data point of this Data object.
1198 *
1199 */
1200 ESCRIPT_DLL_API
1201 Data
1202 neg() const;
1203
1204 /**
1205 \brief
1206 Return the identity of each data point of this Data object.
1207 Simply returns this object unmodified.
1208 *
1209 */
1210 ESCRIPT_DLL_API
1211 Data
1212 pos() const;
1213
1214 /**
1215 \brief
1216 Return the given power of each data point of this Data object.
1217
1218 \param right Input - the power to raise the object to.
1219 *
1220 */
1221 ESCRIPT_DLL_API
1222 Data
1223 powD(const Data& right) const;
1224
1225 /**
1226 \brief
1227 Return the given power of each data point of this boost python object.
1228
1229 \param right Input - the power to raise the object to.
1230 *
1231 */
1232 ESCRIPT_DLL_API
1233 Data
1234 powO(const boost::python::object& right) const;
1235
1236 /**
1237 \brief
1238 Return the given power of each data point of this boost python object.
1239
1240 \param left Input - the bases
1241 *
1242 */
1243
1244 ESCRIPT_DLL_API
1245 Data
1246 rpowO(const boost::python::object& left) const;
1247
1248 /**
1249 \brief
1250 writes the object to a file in the DX file format
1251 */
1252 ESCRIPT_DLL_API
1253 void
1254 saveDX(std::string fileName) const;
1255
1256 /**
1257 \brief
1258 writes the object to a file in the VTK file format
1259 */
1260 ESCRIPT_DLL_API
1261 void
1262 saveVTK(std::string fileName) const;
1263
1264 /**
1265 \brief
1266 Overloaded operator +=
1267 \param right - Input - The right hand side.
1268 *
1269 */
1270 ESCRIPT_DLL_API
1271 Data& operator+=(const Data& right);
1272 ESCRIPT_DLL_API
1273 Data& operator+=(const boost::python::object& right);
1274
1275 ESCRIPT_DLL_API
1276 Data& operator=(const Data& other);
1277
1278 /**
1279 \brief
1280 Overloaded operator -=
1281 \param right - Input - The right hand side.
1282 *
1283 */
1284 ESCRIPT_DLL_API
1285 Data& operator-=(const Data& right);
1286 ESCRIPT_DLL_API
1287 Data& operator-=(const boost::python::object& right);
1288
1289 /**
1290 \brief
1291 Overloaded operator *=
1292 \param right - Input - The right hand side.
1293 *
1294 */
1295 ESCRIPT_DLL_API
1296 Data& operator*=(const Data& right);
1297 ESCRIPT_DLL_API
1298 Data& operator*=(const boost::python::object& right);
1299
1300 /**
1301 \brief
1302 Overloaded operator /=
1303 \param right - Input - The right hand side.
1304 *
1305 */
1306 ESCRIPT_DLL_API
1307 Data& operator/=(const Data& right);
1308 ESCRIPT_DLL_API
1309 Data& operator/=(const boost::python::object& right);
1310
1311 /**
1312 \brief
1313 Returns true if this can be interpolated to functionspace.
1314 */
1315 ESCRIPT_DLL_API
1316 bool
1317 probeInterpolation(const FunctionSpace& functionspace) const;
1318
1319 /**
1320 Data object slicing methods.
1321 */
1322
1323 /**
1324 \brief
1325 Returns a slice from this Data object.
1326
1327 /description
1328 Implements the [] get operator in python.
1329 Calls getSlice.
1330
1331 \param key - Input - python slice tuple specifying
1332 slice to return.
1333 */
1334 ESCRIPT_DLL_API
1335 Data
1336 getItem(const boost::python::object& key) const;
1337
1338 /**
1339 \brief
1340 Copies slice from value into this Data object.
1341
1342 Implements the [] set operator in python.
1343 Calls setSlice.
1344
1345 \param key - Input - python slice tuple specifying
1346 slice to copy from value.
1347 \param value - Input - Data object to copy from.
1348 */
1349 ESCRIPT_DLL_API
1350 void
1351 setItemD(const boost::python::object& key,
1352 const Data& value);
1353
1354 ESCRIPT_DLL_API
1355 void
1356 setItemO(const boost::python::object& key,
1357 const boost::python::object& value);
1358
1359 // These following public methods should be treated as private.
1360
1361 /**
1362 \brief
1363 Perform the given unary operation on every element of every data point in
1364 this Data object.
1365 */
1366 template <class UnaryFunction>
1367 ESCRIPT_DLL_API
1368 inline
1369 void
1370 unaryOp2(UnaryFunction operation);
1371
1372 /**
1373 \brief
1374 Return a Data object containing the specified slice of
1375 this Data object.
1376 \param region - Input - Region to copy.
1377 *
1378 */
1379 ESCRIPT_DLL_API
1380 Data
1381 getSlice(const DataTypes::RegionType& region) const;
1382
1383 /**
1384 \brief
1385 Copy the specified slice from the given value into this
1386 Data object.
1387 \param value - Input - Data to copy from.
1388 \param region - Input - Region to copy.
1389 *
1390 */
1391 ESCRIPT_DLL_API
1392 void
1393 setSlice(const Data& value,
1394 const DataTypes::RegionType& region);
1395
1396 /**
1397 \brief
1398 print the data values to stdout. Used for debugging
1399 */
1400 ESCRIPT_DLL_API
1401 void
1402 print(void);
1403
1404 /**
1405 \brief
1406 return the MPI rank number of the local data
1407 MPI_COMM_WORLD is assumed and the result of MPI_Comm_size()
1408 is returned
1409 */
1410 ESCRIPT_DLL_API
1411 int
1412 get_MPIRank(void) const;
1413
1414 /**
1415 \brief
1416 return the MPI rank number of the local data
1417 MPI_COMM_WORLD is assumed and the result of MPI_Comm_rank()
1418 is returned
1419 */
1420 ESCRIPT_DLL_API
1421 int
1422 get_MPISize(void) const;
1423
1424 /**
1425 \brief
1426 return the MPI rank number of the local data
1427 MPI_COMM_WORLD is assumed and returned.
1428 */
1429 ESCRIPT_DLL_API
1430 MPI_Comm
1431 get_MPIComm(void) const;
1432
1433 /**
1434 \brief
1435 return the object produced by the factory, which is a DataConstant or DataExpanded
1436 TODO Ownership of this object should be explained in doco.
1437 */
1438 ESCRIPT_DLL_API
1439 DataAbstract*
1440 borrowData(void) const;
1441
1442 ESCRIPT_DLL_API
1443 DataAbstract_ptr
1444 borrowDataPtr(void) const;
1445
1446 ESCRIPT_DLL_API
1447 DataReady_ptr
1448 borrowReadyPtr(void) const;
1449
1450
1451
1452 /**
1453 \brief
1454 Return a pointer to the beginning of the datapoint at the specified offset.
1455 TODO Eventually these should be inlined.
1456 \param i - position(offset) in the underlying datastructure
1457 */
1458
1459 ESCRIPT_DLL_API
1460 DataTypes::ValueType::const_reference
1461 getDataAtOffsetRO(DataTypes::ValueType::size_type i);
1462
1463
1464 ESCRIPT_DLL_API
1465 DataTypes::ValueType::reference
1466 getDataAtOffsetRW(DataTypes::ValueType::size_type i);
1467
1468
1469
1470 /**
1471 \brief Create a buffer for use by getSample
1472 Allocates a DataVector large enough for DataLazy::resolveSample to operate on for the current Data.
1473 Do not use this buffer for other Data instances (unless you are sure they will be the same size).
1474
1475 In multi-threaded sections, this needs to be called on each thread.
1476
1477 \return A BufferGroup* if Data is lazy, NULL otherwise.
1478 \warning This pointer must be deallocated using freeSampleBuffer to avoid cross library memory issues.
1479 */
1480 ESCRIPT_DLL_API
1481 BufferGroup*
1482 allocSampleBuffer() const;
1483
1484 /**
1485 \brief Free a buffer allocated with allocSampleBuffer.
1486 \param buffer Input - pointer to the buffer to deallocate.
1487 */
1488 ESCRIPT_DLL_API void freeSampleBuffer(BufferGroup* buffer);
1489
1490 protected:
1491
1492 private:
1493
1494 double
1495 LsupWorker() const;
1496
1497 double
1498 supWorker() const;
1499
1500 double
1501 infWorker() const;
1502
1503 boost::python::object
1504 integrateWorker() const;
1505
1506 /**
1507 \brief
1508 Check *this and the right operand are compatible. Throws
1509 an exception if they aren't.
1510 \param right - Input - The right hand side.
1511 */
1512 inline
1513 void
1514 operandCheck(const Data& right) const
1515 {
1516 return m_data->operandCheck(*(right.m_data.get()));
1517 }
1518
1519 /**
1520 \brief
1521 Perform the specified reduction algorithm on every element of every data point in
1522 this Data object according to the given function and return the single value result.
1523 */
1524 template <class BinaryFunction>
1525 inline
1526 double
1527 algorithm(BinaryFunction operation,
1528 double initial_value) const;
1529
1530 /**
1531 \brief
1532 Reduce each data-point in this Data object using the given operation. Return a Data
1533 object with the same number of data-points, but with each data-point containing only
1534 one value - the result of the reduction operation on the corresponding data-point in
1535 this Data object
1536 */
1537 template <class BinaryFunction>
1538 inline
1539 Data
1540 dp_algorithm(BinaryFunction operation,
1541 double initial_value) const;
1542
1543 /**
1544 \brief
1545 Perform the given binary operation on all of the data's elements.
1546 The underlying type of the right hand side (right) determines the final
1547 type of *this after the operation. For example if the right hand side
1548 is expanded *this will be expanded if necessary.
1549 RHS is a Data object.
1550 */
1551 template <class BinaryFunction>
1552 inline
1553 void
1554 binaryOp(const Data& right,
1555 BinaryFunction operation);
1556
1557 /**
1558 \brief
1559 Convert the data type of the RHS to match this.
1560 \param right - Input - data type to match.
1561 */
1562 void
1563 typeMatchLeft(Data& right) const;
1564
1565 /**
1566 \brief
1567 Convert the data type of this to match the RHS.
1568 \param right - Input - data type to match.
1569 */
1570 void
1571 typeMatchRight(const Data& right);
1572
1573 /**
1574 \brief
1575 Construct a Data object of the appropriate type.
1576 */
1577
1578 void
1579 initialise(const DataTypes::ValueType& value,
1580 const DataTypes::ShapeType& shape,
1581 const FunctionSpace& what,
1582 bool expanded);
1583
1584 void
1585 initialise(const WrappedArray& value,
1586 const FunctionSpace& what,
1587 bool expanded);
1588
1589 //
1590 // flag to protect the data object against any update
1591 bool m_protected;
1592 mutable bool m_shared;
1593 bool m_lazy;
1594
1595 //
1596 // pointer to the actual data object
1597 // boost::shared_ptr<DataAbstract> m_data;
1598 DataAbstract_ptr m_data;
1599
1600 // If possible please use getReadyPtr instead.
1601 // But see warning below.
1602 const DataReady*
1603 getReady() const;
1604
1605 DataReady*
1606 getReady();
1607
1608
1609 // Be wary of using this for local operations since it (temporarily) increases reference count.
1610 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1611 // getReady() instead
1612 DataReady_ptr
1613 getReadyPtr();
1614
1615 const_DataReady_ptr
1616 getReadyPtr() const;
1617
1618
1619 /**
1620 \brief Update the Data's shared flag
1621 This indicates that the DataAbstract used by this object is now shared (or no longer shared).
1622 For internal use only.
1623 */
1624 void updateShareStatus(bool nowshared) const
1625 {
1626 m_shared=nowshared; // m_shared is mutable
1627 }
1628
1629 // In the isShared() method below:
1630 // A problem would occur if m_data (the address pointed to) were being modified
1631 // while the call m_data->is_shared is being executed.
1632 //
1633 // Q: So why do I think this code can be thread safe/correct?
1634 // A: We need to make some assumptions.
1635 // 1. We assume it is acceptable to return true under some conditions when we aren't shared.
1636 // 2. We assume that no constructions or assignments which will share previously unshared
1637 // will occur while this call is executing. This is consistent with the way Data:: and C are written.
1638 //
1639 // This means that the only transition we need to consider, is when a previously shared object is
1640 // not shared anymore. ie. the other objects have been destroyed or a deep copy has been made.
1641 // In those cases the m_shared flag changes to false after m_data has completed changing.
1642 // For any threads executing before the flag switches they will assume the object is still shared.
1643 bool isShared() const
1644 {
1645 return m_shared;
1646 /* if (m_shared) return true;
1647 if (m_data->isShared())
1648 {
1649 updateShareStatus(true);
1650 return true;
1651 }
1652 return false;*/
1653 }
1654
1655 void forceResolve()
1656 {
1657 if (isLazy())
1658 {
1659 #ifdef _OPENMP
1660 if (omp_in_parallel())
1661 { // Yes this is throwing an exception out of an omp thread which is forbidden.
1662 throw DataException("Please do not call forceResolve() in a parallel region.");
1663 }
1664 #endif
1665 resolve();
1666 }
1667 }
1668
1669 /**
1670 \brief if another object is sharing out member data make a copy to work with instead.
1671 This code should only be called from single threaded sections of code.
1672 */
1673 void exclusiveWrite()
1674 {
1675 #ifdef _OPENMP
1676 if (omp_in_parallel())
1677 {
1678 // *((int*)0)=17;
1679 throw DataException("Programming error. Please do not run exclusiveWrite() in multi-threaded sections.");
1680 }
1681 #endif
1682 forceResolve();
1683 if (isShared())
1684 {
1685 DataAbstract* t=m_data->deepCopy();
1686 set_m_data(DataAbstract_ptr(t));
1687 }
1688 }
1689
1690 /**
1691 \brief checks if caller can have exclusive write to the object
1692 */
1693 void checkExclusiveWrite()
1694 {
1695 if (isLazy() || isShared())
1696 {
1697 throw DataException("Programming error. ExclusiveWrite required - please call requireWrite()");
1698 }
1699 }
1700
1701 /**
1702 \brief Modify the data abstract hosted by this Data object
1703 For internal use only.
1704 Passing a pointer to null is permitted (do this in the destructor)
1705 \warning Only to be called in single threaded code or inside a single/critical section. This method needs to be atomic.
1706 */
1707 void set_m_data(DataAbstract_ptr p);
1708
1709 friend class DataAbstract; // To allow calls to updateShareStatus
1710
1711 };
1712
1713 } // end namespace escript
1714
1715
1716 // No, this is not supposed to be at the top of the file
1717 // DataAbstact needs to be declared first, then DataReady needs to be fully declared
1718 // so that I can dynamic cast between them below.
1719 #include "DataReady.h"
1720 #include "DataLazy.h"
1721
1722 namespace escript
1723 {
1724
1725 inline
1726 const DataReady*
1727 Data::getReady() const
1728 {
1729 const DataReady* dr=dynamic_cast<const DataReady*>(m_data.get());
1730 EsysAssert((dr!=0), "Error - casting to DataReady.");
1731 return dr;
1732 }
1733
1734 inline
1735 DataReady*
1736 Data::getReady()
1737 {
1738 DataReady* dr=dynamic_cast<DataReady*>(m_data.get());
1739 EsysAssert((dr!=0), "Error - casting to DataReady.");
1740 return dr;
1741 }
1742
1743 // Be wary of using this for local operations since it (temporarily) increases reference count.
1744 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1745 // getReady() instead
1746 inline
1747 DataReady_ptr
1748 Data::getReadyPtr()
1749 {
1750 DataReady_ptr dr=boost::dynamic_pointer_cast<DataReady>(m_data);
1751 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1752 return dr;
1753 }
1754
1755
1756 inline
1757 const_DataReady_ptr
1758 Data::getReadyPtr() const
1759 {
1760 const_DataReady_ptr dr=boost::dynamic_pointer_cast<const DataReady>(m_data);
1761 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1762 return dr;
1763 }
1764
1765 inline
1766 DataAbstract::ValueType::value_type*
1767 Data::getSampleDataRW(DataAbstract::ValueType::size_type sampleNo)
1768 {
1769 // if (isLazy())
1770 // {
1771 // resolve();
1772 // }
1773 // exclusiveWrite();
1774 if (isLazy())
1775 {
1776 throw DataException("Error, attempt to acquire RW access to lazy data. Please call requireWrite() first.");
1777 }
1778 return getReady()->getSampleDataRW(sampleNo);
1779 }
1780
1781 // inline
1782 // const DataAbstract::ValueType::value_type*
1783 // Data::getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, DataTypes::ValueType* buffer)
1784 // {
1785 // DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
1786 // if (l!=0)
1787 // {
1788 // size_t offset=0;
1789 // if (buffer==NULL)
1790 // {
1791 // throw DataException("Error, attempt to getSampleDataRO for lazy Data with buffer==NULL");
1792 // }
1793 // const DataTypes::ValueType* res=l->resolveSample(*buffer,0,sampleNo,offset);
1794 // return &((*res)[offset]);
1795 // }
1796 // return getReady()->getSampleDataRO(sampleNo);
1797 // }
1798
1799 inline
1800 const DataAbstract::ValueType::value_type*
1801 Data::getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, BufferGroup* bufferg)
1802 {
1803 DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
1804 if (l!=0)
1805 {
1806 size_t offset=0;
1807 if (bufferg==NULL)
1808 {
1809 throw DataException("Error, attempt to getSampleDataRO for lazy Data with buffer==NULL");
1810 }
1811 const DataTypes::ValueType* res=l->resolveSample(*bufferg,sampleNo,offset);
1812 return &((*res)[offset]);
1813 }
1814 return getReady()->getSampleDataRO(sampleNo);
1815 }
1816
1817
1818
1819 /**
1820 Modify a filename for MPI parallel output to multiple files
1821 */
1822 char *Escript_MPI_appendRankToFileName(const char *, int, int);
1823
1824 /**
1825 Binary Data object operators.
1826 */
1827 inline double rpow(double x,double y)
1828 {
1829 return pow(y,x);
1830 }
1831
1832 /**
1833 \brief
1834 Operator+
1835 Takes two Data objects.
1836 */
1837 ESCRIPT_DLL_API Data operator+(const Data& left, const Data& right);
1838
1839 /**
1840 \brief
1841 Operator-
1842 Takes two Data objects.
1843 */
1844 ESCRIPT_DLL_API Data operator-(const Data& left, const Data& right);
1845
1846 /**
1847 \brief
1848 Operator*
1849 Takes two Data objects.
1850 */
1851 ESCRIPT_DLL_API Data operator*(const Data& left, const Data& right);
1852
1853 /**
1854 \brief
1855 Operator/
1856 Takes two Data objects.
1857 */
1858 ESCRIPT_DLL_API Data operator/(const Data& left, const Data& right);
1859
1860 /**
1861 \brief
1862 Operator+
1863 Takes LHS Data object and RHS python::object.
1864 python::object must be convertable to Data type.
1865 */
1866 ESCRIPT_DLL_API Data operator+(const Data& left, const boost::python::object& right);
1867
1868 /**
1869 \brief
1870 Operator-
1871 Takes LHS Data object and RHS python::object.
1872 python::object must be convertable to Data type.
1873 */
1874 ESCRIPT_DLL_API Data operator-(const Data& left, const boost::python::object& right);
1875
1876 /**
1877 \brief
1878 Operator*
1879 Takes LHS Data object and RHS python::object.
1880 python::object must be convertable to Data type.
1881 */
1882 ESCRIPT_DLL_API Data operator*(const Data& left, const boost::python::object& right);
1883
1884 /**
1885 \brief
1886 Operator/
1887 Takes LHS Data object and RHS python::object.
1888 python::object must be convertable to Data type.
1889 */
1890 ESCRIPT_DLL_API Data operator/(const Data& left, const boost::python::object& right);
1891
1892 /**
1893 \brief
1894 Operator+
1895 Takes LHS python::object and RHS Data object.
1896 python::object must be convertable to Data type.
1897 */
1898 ESCRIPT_DLL_API Data operator+(const boost::python::object& left, const Data& right);
1899
1900 /**
1901 \brief
1902 Operator-
1903 Takes LHS python::object and RHS Data object.
1904 python::object must be convertable to Data type.
1905 */
1906 ESCRIPT_DLL_API Data operator-(const boost::python::object& left, const Data& right);
1907
1908 /**
1909 \brief
1910 Operator*
1911 Takes LHS python::object and RHS Data object.
1912 python::object must be convertable to Data type.
1913 */
1914 ESCRIPT_DLL_API Data operator*(const boost::python::object& left, const Data& right);
1915
1916 /**
1917 \brief
1918 Operator/
1919 Takes LHS python::object and RHS Data object.
1920 python::object must be convertable to Data type.
1921 */
1922 ESCRIPT_DLL_API Data operator/(const boost::python::object& left, const Data& right);
1923
1924
1925
1926 /**
1927 \brief
1928 Output operator
1929 */
1930 ESCRIPT_DLL_API std::ostream& operator<<(std::ostream& o, const Data& data);
1931
1932 /**
1933 \brief
1934 Compute a tensor product of two Data objects
1935 \param arg0 - Input - Data object
1936 \param arg1 - Input - Data object
1937 \param axis_offset - Input - axis offset
1938 \param transpose - Input - 0: transpose neither, 1: transpose arg0, 2: transpose arg1
1939 */
1940 ESCRIPT_DLL_API
1941 Data
1942 C_GeneralTensorProduct(Data& arg0,
1943 Data& arg1,
1944 int axis_offset=0,
1945 int transpose=0);
1946
1947 /**
1948 \brief
1949 Perform the given binary operation with this and right as operands.
1950 Right is a Data object.
1951 */
1952 template <class BinaryFunction>
1953 inline
1954 void
1955 Data::binaryOp(const Data& right,
1956 BinaryFunction operation)
1957 {
1958 //
1959 // if this has a rank of zero promote it to the rank of the RHS
1960 if (getDataPointRank()==0 && right.getDataPointRank()!=0) {
1961 throw DataException("Error - attempt to update rank zero object with object with rank bigger than zero.");
1962 }
1963
1964 if (isLazy() || right.isLazy())
1965 {
1966 throw DataException("Programmer error - attempt to call binaryOp with Lazy Data.");
1967 }
1968 //
1969 // initially make the temporary a shallow copy
1970 Data tempRight(right);
1971
1972 if (getFunctionSpace()!=right.getFunctionSpace()) {
1973 if (right.probeInterpolation(getFunctionSpace())) {
1974 //
1975 // an interpolation is required so create a new Data
1976 tempRight=Data(right,this->getFunctionSpace());
1977 } else if (probeInterpolation(right.getFunctionSpace())) {
1978 //
1979 // interpolate onto the RHS function space
1980 Data tempLeft(*this,right.getFunctionSpace());
1981 // m_data=tempLeft.m_data;
1982 set_m_data(tempLeft.m_data);
1983 }
1984 }
1985 operandCheck(tempRight);
1986 //
1987 // ensure this has the right type for the RHS
1988 typeMatchRight(tempRight);
1989 //
1990 // Need to cast to the concrete types so that the correct binaryOp
1991 // is called.
1992 if (isExpanded()) {
1993 //
1994 // Expanded data will be done in parallel, the right hand side can be
1995 // of any data type
1996 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
1997 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
1998 escript::binaryOp(*leftC,*(tempRight.getReady()),operation);
1999 } else if (isTagged()) {
2000 //
2001 // Tagged data is operated on serially, the right hand side can be
2002 // either DataConstant or DataTagged
2003 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2004 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2005 if (right.isTagged()) {
2006 DataTagged* rightC=dynamic_cast<DataTagged*>(tempRight.m_data.get());
2007 EsysAssert((rightC!=0), "Programming error - casting to DataTagged.");
2008 escript::binaryOp(*leftC,*rightC,operation);
2009 } else {
2010 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2011 EsysAssert((rightC!=0), "Programming error - casting to DataConstant.");
2012 escript::binaryOp(*leftC,*rightC,operation);
2013 }
2014 } else if (isConstant()) {
2015 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2016 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2017 EsysAssert((leftC!=0 && rightC!=0), "Programming error - casting to DataConstant.");
2018 escript::binaryOp(*leftC,*rightC,operation);
2019 }
2020 }
2021
2022 /**
2023 \brief
2024 Perform the given Data object reduction algorithm on this and return the result.
2025 Given operation combines each element of each data point, thus argument
2026 object (*this) is a rank n Data object, and returned object is a scalar.
2027 Calls escript::algorithm.
2028 */
2029 template <class BinaryFunction>
2030 inline
2031 double
2032 Data::algorithm(BinaryFunction operation, double initial_value) const
2033 {
2034 if (isExpanded()) {
2035 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2036 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2037 return escript::algorithm(*leftC,operation,initial_value);
2038 } else if (isTagged()) {
2039 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2040 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2041 return escript::algorithm(*leftC,operation,initial_value);
2042 } else if (isConstant()) {
2043 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2044 EsysAssert((leftC!=0), "Programming error - casting to DataConstant.");
2045 return escript::algorithm(*leftC,operation,initial_value);
2046 } else if (isEmpty()) {
2047 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2048 } else if (isLazy()) {
2049 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2050 } else {
2051 throw DataException("Error - Data encapsulates an unknown type.");
2052 }
2053 }
2054
2055 /**
2056 \brief
2057 Perform the given data point reduction algorithm on data and return the result.
2058 Given operation combines each element within each data point into a scalar,
2059 thus argument object is a rank n Data object, and returned object is a
2060 rank 0 Data object.
2061 Calls escript::dp_algorithm.
2062 */
2063 template <class BinaryFunction>
2064 inline
2065 Data
2066 Data::dp_algorithm(BinaryFunction operation, double initial_value) const
2067 {
2068 if (isEmpty()) {
2069 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2070 }
2071 else if (isExpanded()) {
2072 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2073 DataExpanded* dataE=dynamic_cast<DataExpanded*>(m_data.get());
2074 DataExpanded* resultE=dynamic_cast<DataExpanded*>(result.m_data.get());
2075 EsysAssert((dataE!=0), "Programming error - casting data to DataExpanded.");
2076 EsysAssert((resultE!=0), "Programming error - casting result to DataExpanded.");
2077 escript::dp_algorithm(*dataE,*resultE,operation,initial_value);
2078 return result;
2079 }
2080 else if (isTagged()) {
2081 DataTagged* dataT=dynamic_cast<DataTagged*>(m_data.get());
2082 EsysAssert((dataT!=0), "Programming error - casting data to DataTagged.");
2083 DataTypes::ValueType defval(1);
2084 defval[0]=0;
2085 DataTagged* resultT=new DataTagged(getFunctionSpace(), DataTypes::scalarShape, defval, dataT);
2086 escript::dp_algorithm(*dataT,*resultT,operation,initial_value);
2087 return Data(resultT); // note: the Data object now owns the resultT pointer
2088 }
2089 else if (isConstant()) {
2090 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2091 DataConstant* dataC=dynamic_cast<DataConstant*>(m_data.get());
2092 DataConstant* resultC=dynamic_cast<DataConstant*>(result.m_data.get());
2093 EsysAssert((dataC!=0), "Programming error - casting data to DataConstant.");
2094 EsysAssert((resultC!=0), "Programming error - casting result to DataConstant.");
2095 escript::dp_algorithm(*dataC,*resultC,operation,initial_value);
2096 return result;
2097 } else if (isLazy()) {
2098 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2099 } else {
2100 throw DataException("Error - Data encapsulates an unknown type.");
2101 }
2102 }
2103
2104 /**
2105 \brief
2106 Compute a tensor operation with two Data objects
2107 \param arg0 - Input - Data object
2108 \param arg1 - Input - Data object
2109 \param operation - Input - Binary op functor
2110 */
2111 template <typename BinaryFunction>
2112 inline
2113 Data
2114 C_TensorBinaryOperation(Data const &arg_0,
2115 Data const &arg_1,
2116 BinaryFunction operation)
2117 {
2118 if (arg_0.isEmpty() || arg_1.isEmpty())
2119 {
2120 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2121 }
2122 if (arg_0.isLazy() || arg_1.isLazy())
2123 {
2124 throw DataException("Error - Operations not permitted on lazy data.");
2125 }
2126 // Interpolate if necessary and find an appropriate function space
2127 Data arg_0_Z, arg_1_Z;
2128 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2129 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2130 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2131 arg_1_Z = Data(arg_1);
2132 }
2133 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2134 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2135 arg_0_Z =Data(arg_0);
2136 }
2137 else {
2138 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible function spaces.");
2139 }
2140 } else {
2141 arg_0_Z = Data(arg_0);
2142 arg_1_Z = Data(arg_1);
2143 }
2144 // Get rank and shape of inputs
2145 int rank0 = arg_0_Z.getDataPointRank();
2146 int rank1 = arg_1_Z.getDataPointRank();
2147 DataTypes::ShapeType shape0 = arg_0_Z.getDataPointShape();
2148 DataTypes::ShapeType shape1 = arg_1_Z.getDataPointShape();
2149 int size0 = arg_0_Z.getDataPointSize();
2150 int size1 = arg_1_Z.getDataPointSize();
2151 // Declare output Data object
2152 Data res;
2153
2154 if (shape0 == shape1) {
2155 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2156 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2157 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2158 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2159 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2160
2161 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2162 }
2163 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2164
2165 // Prepare the DataConstant input
2166 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2167
2168 // Borrow DataTagged input from Data object
2169 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2170
2171 // Prepare a DataTagged output 2
2172 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2173 res.tag();
2174 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2175
2176 // Prepare offset into DataConstant
2177 int offset_0 = tmp_0->getPointOffset(0,0);
2178 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2179
2180 // Get the pointers to the actual data
2181 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2182 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2183
2184 // Compute a result for the default
2185 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2186 // Compute a result for each tag
2187 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2188 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2189 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2190 tmp_2->addTag(i->first);
2191 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2192 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2193
2194 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2195 }
2196
2197 }
2198 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2199 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2200 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2201 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2202 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2203
2204 int sampleNo_1,dataPointNo_1;
2205 int numSamples_1 = arg_1_Z.getNumSamples();
2206 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2207 int offset_0 = tmp_0->getPointOffset(0,0);
2208 res.requireWrite();
2209 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2210 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2211 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2212 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2213 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2214 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2215 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2216 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2217 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2218 }
2219 }
2220
2221 }
2222 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2223 // Borrow DataTagged input from Data object
2224 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2225
2226 // Prepare the DataConstant input
2227 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2228
2229 // Prepare a DataTagged output 2
2230 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2231 res.tag();
2232 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2233
2234 // Prepare offset into DataConstant
2235 int offset_1 = tmp_1->getPointOffset(0,0);
2236
2237 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2238 // Get the pointers to the actual data
2239 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2240 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2241 // Compute a result for the default
2242 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2243 // Compute a result for each tag
2244 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2245 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2246 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2247 tmp_2->addTag(i->first);
2248 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2249 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2250 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2251 }
2252
2253 }
2254 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2255 // Borrow DataTagged input from Data object
2256 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2257
2258 // Borrow DataTagged input from Data object
2259 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2260
2261 // Prepare a DataTagged output 2
2262 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2263 res.tag(); // DataTagged output
2264 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2265
2266 // Get the pointers to the actual data
2267 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2268 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2269 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2270
2271 // Compute a result for the default
2272 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2273 // Merge the tags
2274 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2275 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2276 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2277 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2278 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2279 }
2280 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2281 tmp_2->addTag(i->first);
2282 }
2283 // Compute a result for each tag
2284 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2285 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2286
2287 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2288 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2289 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2290
2291 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2292 }
2293
2294 }
2295 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2296 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2297 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2298 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2299 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2300 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2301
2302 int sampleNo_0,dataPointNo_0;
2303 int numSamples_0 = arg_0_Z.getNumSamples();
2304 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2305 res.requireWrite();
2306 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2307 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2308 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2309 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2310 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2311 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2312 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2313 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2314 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2315 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2316 }
2317 }
2318
2319 }
2320 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2321 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2322 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2323 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2324 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2325
2326 int sampleNo_0,dataPointNo_0;
2327 int numSamples_0 = arg_0_Z.getNumSamples();
2328 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2329 int offset_1 = tmp_1->getPointOffset(0,0);
2330 res.requireWrite();
2331 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2332 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2333 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2334 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2335 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2336
2337 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2338 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2339 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2340
2341
2342 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2343 }
2344 }
2345
2346 }
2347 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2348 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2349 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2350 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2351 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2352 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2353
2354 int sampleNo_0,dataPointNo_0;
2355 int numSamples_0 = arg_0_Z.getNumSamples();
2356 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2357 res.requireWrite();
2358 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2359 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2360 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2361 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2362 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2363 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2364 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2365 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2366 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2367 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2368 }
2369 }
2370
2371 }
2372 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2373 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2374 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2375 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2376 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2377 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2378
2379 int sampleNo_0,dataPointNo_0;
2380 int numSamples_0 = arg_0_Z.getNumSamples();
2381 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2382 res.requireWrite();
2383 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2384 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2385 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2386 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2387 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2388 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2389 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2390 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2391 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2392 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2393 }
2394 }
2395
2396 }
2397 else {
2398 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2399 }
2400
2401 } else if (0 == rank0) {
2402 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2403 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataConstant output
2404 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2405 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2406 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2407 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2408 }
2409 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2410
2411 // Prepare the DataConstant input
2412 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2413
2414 // Borrow DataTagged input from Data object
2415 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2416
2417 // Prepare a DataTagged output 2
2418 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataTagged output
2419 res.tag();
2420 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2421
2422 // Prepare offset into DataConstant
2423 int offset_0 = tmp_0->getPointOffset(0,0);
2424 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2425
2426 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2427 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2428
2429 // Compute a result for the default
2430 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2431 // Compute a result for each tag
2432 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2433 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2434 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2435 tmp_2->addTag(i->first);
2436 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2437 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2438 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2439 }
2440
2441 }
2442 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2443
2444 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2445 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2446 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2447 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2448
2449 int sampleNo_1,dataPointNo_1;
2450 int numSamples_1 = arg_1_Z.getNumSamples();
2451 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2452 int offset_0 = tmp_0->getPointOffset(0,0);
2453 res.requireWrite();
2454 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2455 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2456 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2457 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2458 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2459 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2460 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2461 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2462 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2463
2464 }
2465 }
2466
2467 }
2468 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2469
2470 // Borrow DataTagged input from Data object
2471 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2472
2473 // Prepare the DataConstant input
2474 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2475
2476 // Prepare a DataTagged output 2
2477 res = Data(0.0, shape1, arg_0_Z.getFunctionSpace()); // DataTagged output
2478 res.tag();
2479 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2480
2481 // Prepare offset into DataConstant
2482 int offset_1 = tmp_1->getPointOffset(0,0);
2483 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2484
2485 // Get the pointers to the actual data
2486 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2487 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2488
2489
2490 // Compute a result for the default
2491 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2492 // Compute a result for each tag
2493 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2494 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2495 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2496 tmp_2->addTag(i->first);
2497 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2498 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2499
2500 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2501 }
2502
2503 }
2504 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2505
2506 // Borrow DataTagged input from Data object
2507 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2508
2509 // Borrow DataTagged input from Data object
2510 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2511
2512 // Prepare a DataTagged output 2
2513 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());
2514 res.tag(); // DataTagged output
2515 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2516
2517 // Get the pointers to the actual data
2518 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2519 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2520 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2521
2522 // Compute a result for the default
2523 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2524 // Merge the tags
2525 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2526 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2527 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2528 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2529 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2530 }
2531 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2532 tmp_2->addTag(i->first);
2533 }
2534 // Compute a result for each tag
2535 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2536 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2537
2538 /* DataArrayView view_0 = tmp_0->getDataPointByTag(i->first);
2539 DataArrayView view_1 = tmp_1->getDataPointByTag(i->first);
2540 DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2541 double *ptr_0 = &view_0.getData(0);
2542 double *ptr_1 = &view_1.getData(0);
2543 double *ptr_2 = &view_2.getData(0);*/
2544
2545 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2546 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2547 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2548
2549 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2550 }
2551
2552 }
2553 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2554
2555 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2556 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2557 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2558 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2559 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2560
2561 int sampleNo_0,dataPointNo_0;
2562 int numSamples_0 = arg_0_Z.getNumSamples();
2563 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2564 res.requireWrite();
2565 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2566 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2567 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2568 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2569 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2570 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2571 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2572 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2573 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2574 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2575 }
2576 }
2577
2578 }
2579 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2580 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2581 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2582 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2583 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2584
2585 int sampleNo_0,dataPointNo_0;
2586 int numSamples_0 = arg_0_Z.getNumSamples();
2587 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2588 int offset_1 = tmp_1->getPointOffset(0,0);
2589 res.requireWrite();
2590 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2591 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2592 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2593 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2594 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2595 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2596 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2597 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2598 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2599 }
2600 }
2601
2602
2603 }
2604 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2605
2606 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2607 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2608 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2609 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2610 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2611
2612 int sampleNo_0,dataPointNo_0;
2613 int numSamples_0 = arg_0_Z.getNumSamples();
2614 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2615 res.requireWrite();
2616 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2617 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2618 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2619 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2620 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2621 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2622 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2623 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2624 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2625 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2626 }
2627 }
2628
2629 }
2630 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2631
2632 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2633 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2634 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2635 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2636 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2637
2638 int sampleNo_0,dataPointNo_0;
2639 int numSamples_0 = arg_0_Z.getNumSamples();
2640 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2641 res.requireWrite();
2642 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2643 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2644 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2645 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2646 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2647 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2648 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2649 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2650 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2651 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2652 }
2653 }
2654
2655 }
2656 else {
2657 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2658 }
2659
2660 } else if (0 == rank1) {
2661 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2662 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2663 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2664 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2665 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2666 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2667 }
2668 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2669
2670 // Prepare the DataConstant input
2671 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2672
2673 // Borrow DataTagged input from Data object
2674 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2675
2676 // Prepare a DataTagged output 2
2677 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2678 res.tag();
2679 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2680
2681 // Prepare offset into DataConstant
2682 int offset_0 = tmp_0->getPointOffset(0,0);
2683 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2684
2685 //Get the pointers to the actual data
2686 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2687 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2688
2689 // Compute a result for the default
2690 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2691 // Compute a result for each tag
2692 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2693 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2694 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2695 tmp_2->addTag(i->first);
2696 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2697 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2698 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2699 }
2700 }
2701 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2702
2703 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2704 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2705 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2706 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2707
2708 int sampleNo_1,dataPointNo_1;
2709 int numSamples_1 = arg_1_Z.getNumSamples();
2710 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2711 int offset_0 = tmp_0->getPointOffset(0,0);
2712 res.requireWrite();
2713 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2714 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2715 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2716 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2717 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2718 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2719 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2720 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2721 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2722 }
2723 }
2724
2725 }
2726 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2727
2728 // Borrow DataTagged input from Data object
2729 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2730
2731 // Prepare the DataConstant input
2732 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2733
2734 // Prepare a DataTagged output 2
2735 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2736 res.tag();
2737 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2738
2739 // Prepare offset into DataConstant
2740 int offset_1 = tmp_1->getPointOffset(0,0);
2741 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2742 // Get the pointers to the actual data
2743 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2744 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2745 // Compute a result for the default
2746 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2747 // Compute a result for each tag
2748 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2749 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2750 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2751 tmp_2->addTag(i->first);
2752 /* DataArrayView view_0 = tmp_0->getDataPointByTag(i->first);
2753 DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2754 double *ptr_0 = &view_0.getData(0);
2755 double *ptr_2 = &view_2.getData(0);*/
2756 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2757 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2758 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2759 }
2760
2761 }
2762 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2763
2764 // Borrow DataTagged input from Data object
2765 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2766
2767 // Borrow DataTagged input from Data object
2768 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2769
2770 // Prepare a DataTagged output 2
2771 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2772 res.tag(); // DataTagged output
2773 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2774
2775 // Get the pointers to the actual data
2776 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2777 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2778 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2779
2780 // Compute a result for the default
2781 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2782 // Merge the tags
2783 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2784 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2785 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2786 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2787 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2788 }
2789 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2790 tmp_2->addTag(i->first);
2791 }
2792 // Compute a result for each tag
2793 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2794 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2795 // DataArrayView view_0 = tmp_0->getDataPointByTag(i->first);
2796 // DataArrayView view_1 = tmp_1->getDataPointByTag(i->first);
2797 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2798 // double *ptr_0 = &view_0.getData(0);
2799 // double *ptr_1 = &view_1.getData(0);
2800 // double *ptr_2 = &view_2.getData(0);
2801
2802 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2803 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2804 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2805 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2806 }
2807
2808 }
2809 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2810
2811 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2812 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2813 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2814 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2815 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2816
2817 int sampleNo_0,dataPointNo_0;
2818 int numSamples_0 = arg_0_Z.getNumSamples();
2819 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2820 res.requireWrite();
2821 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2822 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2823 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2824 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2825 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2826 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2827 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2828 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2829 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2830 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2831 }
2832 }
2833
2834 }
2835 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2836 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2837 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2838 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2839 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2840
2841 int sampleNo_0,dataPointNo_0;
2842 int numSamples_0 = arg_0_Z.getNumSamples();
2843 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2844 int offset_1 = tmp_1->getPointOffset(0,0);
2845 res.requireWrite();
2846 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2847 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2848 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2849 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2850 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2851 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2852 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2853 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2854 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2855 }
2856 }
2857
2858
2859 }
2860 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2861
2862 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2863 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2864 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2865 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2866 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2867
2868 int sampleNo_0,dataPointNo_0;
2869 int numSamples_0 = arg_0_Z.getNumSamples();
2870 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2871 res.requireWrite();
2872 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2873 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2874 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2875 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2876 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2877 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2878 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2879 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2880 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2881 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2882 }
2883 }
2884
2885 }
2886 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2887
2888 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2889 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2890 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2891 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2892 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2893
2894 int sampleNo_0,dataPointNo_0;
2895 int numSamples_0 = arg_0_Z.getNumSamples();
2896 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2897 res.requireWrite();
2898 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2899 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2900 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2901 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2902 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2903 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2904 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2905 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2906 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2907 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2908 }
2909 }
2910
2911 }
2912 else {
2913 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2914 }
2915
2916 } else {
2917 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible shapes");
2918 }
2919
2920 return res;
2921 }
2922
2923 template <typename UnaryFunction>
2924 Data
2925 C_TensorUnaryOperation(Data const &arg_0,
2926 UnaryFunction operation)
2927 {
2928 if (arg_0.isEmpty()) // do this before we attempt to interpolate
2929 {
2930 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2931 }
2932 if (arg_0.isLazy())
2933 {
2934 throw DataException("Error - Operations not permitted on lazy data.");
2935 }
2936 // Interpolate if necessary and find an appropriate function space
2937 Data arg_0_Z = Data(arg_0);
2938
2939 // Get rank and shape of inputs
2940 // int rank0 = arg_0_Z.getDataPointRank();
2941 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
2942 int size0 = arg_0_Z.getDataPointSize();
2943
2944 // Declare output Data object
2945 Data res;
2946
2947 if (arg_0_Z.isConstant()) {
2948 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataConstant output
2949 // double *ptr_0 = &((arg_0_Z.getPointDataView().getData())[0]);
2950 // double *ptr_2 = &((res.getPointDataView().getData())[0]);
2951 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2952 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2953 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2954 }
2955 else if (arg_0_Z.isTagged()) {
2956
2957 // Borrow DataTagged input from Data object
2958 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2959
2960 // Prepare a DataTagged output 2
2961 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2962 res.tag();
2963 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2964
2965 // Get the pointers to the actual data
2966 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2967 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2968 // Compute a result for the default
2969 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2970 // Compute a result for each tag
2971 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2972 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2973 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2974 tmp_2->addTag(i->first);
2975 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2976 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2977 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2978 }
2979
2980 }
2981 else if (arg_0_Z.isExpanded()) {
2982
2983 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace(),true); // DataExpanded output
2984 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2985 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2986
2987 int sampleNo_0,dataPointNo_0;
2988 int numSamples_0 = arg_0_Z.getNumSamples();
2989 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2990 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2991 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2992 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2993 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2994 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2995 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2996 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2997 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2998 }
2999 }
3000 }
3001 else {
3002 throw DataException("Error - C_TensorUnaryOperation: unknown combination of inputs");
3003 }
3004
3005 return res;
3006 }
3007
3008 }
3009 #endif

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26