/[escript]/trunk/escript/src/Data.h
ViewVC logotype

Contents of /trunk/escript/src/Data.h

Parent Directory Parent Directory | Revision Log Revision Log


Revision 4114 - (show annotations)
Fri Dec 14 04:24:46 2012 UTC (6 years, 8 months ago) by caltinay
File MIME type: text/plain
File size: 96176 byte(s)
Time to remove deprecated saveVTK/DX methods from Data and Domain.

1
2 /*****************************************************************************
3 *
4 * Copyright (c) 2003-2012 by University of Queensland
5 * http://www.uq.edu.au
6 *
7 * Primary Business: Queensland, Australia
8 * Licensed under the Open Software License version 3.0
9 * http://www.opensource.org/licenses/osl-3.0.php
10 *
11 * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12 * Development since 2012 by School of Earth Sciences
13 *
14 *****************************************************************************/
15
16
17 /** \file Data.h */
18
19 #ifndef DATA_H
20 #define DATA_H
21 #include "system_dep.h"
22
23 #include "DataTypes.h"
24 #include "DataAbstract.h"
25 #include "DataAlgorithm.h"
26 #include "FunctionSpace.h"
27 #include "BinaryOp.h"
28 #include "UnaryOp.h"
29 #include "DataException.h"
30
31
32
33 extern "C" {
34 #include "DataC.h"
35 //#include <omp.h>
36 }
37
38 #ifdef _OPENMP
39 #include <omp.h>
40 #endif
41
42 #include "esysUtils/Esys_MPI.h"
43 #include <string>
44 #include <algorithm>
45 #include <sstream>
46
47 #include <boost/shared_ptr.hpp>
48 #include <boost/python/object.hpp>
49 #include <boost/python/tuple.hpp>
50
51 namespace escript {
52
53 //
54 // Forward declaration for various implementations of Data.
55 class DataConstant;
56 class DataTagged;
57 class DataExpanded;
58 class DataLazy;
59
60 /**
61 \brief
62 Data represents a collection of datapoints.
63
64 Description:
65 Internally, the datapoints are actually stored by a DataAbstract object.
66 The specific instance of DataAbstract used may vary over the lifetime
67 of the Data object.
68 Some methods on this class return references (eg getShape()).
69 These references should not be used after an operation which changes the underlying DataAbstract object.
70 Doing so will lead to invalid memory access.
71 This should not affect any methods exposed via boost::python.
72 */
73 class Data {
74
75 public:
76
77 // These typedefs allow function names to be cast to pointers
78 // to functions of the appropriate type when calling unaryOp etc.
79 typedef double (*UnaryDFunPtr)(double);
80 typedef double (*BinaryDFunPtr)(double,double);
81
82
83 /**
84 Constructors.
85 */
86
87 /**
88 \brief
89 Default constructor.
90 Creates a DataEmpty object.
91 */
92 ESCRIPT_DLL_API
93 Data();
94
95 /**
96 \brief
97 Copy constructor.
98 WARNING: Only performs a shallow copy.
99 */
100 ESCRIPT_DLL_API
101 Data(const Data& inData);
102
103 /**
104 \brief
105 Constructor from another Data object. If "what" is different from the
106 function space of inData the inData are tried to be interpolated to what,
107 otherwise a shallow copy of inData is returned.
108 */
109 ESCRIPT_DLL_API
110 Data(const Data& inData,
111 const FunctionSpace& what);
112
113 /**
114 \brief Copy Data from an existing vector
115 */
116
117 ESCRIPT_DLL_API
118 Data(const DataTypes::ValueType& value,
119 const DataTypes::ShapeType& shape,
120 const FunctionSpace& what=FunctionSpace(),
121 bool expanded=false);
122
123 /**
124 \brief
125 Constructor which creates a Data with points having the specified shape.
126
127 \param value - Input - Single value applied to all Data.
128 \param dataPointShape - Input - The shape of each data point.
129 \param what - Input - A description of what this data represents.
130 \param expanded - Input - Flag, if true fill the entire container with
131 the given value. Otherwise a more efficient storage
132 mechanism will be used.
133 */
134 ESCRIPT_DLL_API
135 Data(double value,
136 const DataTypes::ShapeType& dataPointShape=DataTypes::ShapeType(),
137 const FunctionSpace& what=FunctionSpace(),
138 bool expanded=false);
139
140 /**
141 \brief
142 Constructor which performs a deep copy of a region from another Data object.
143
144 \param inData - Input - Input Data object.
145 \param region - Input - Region to copy.
146 */
147 ESCRIPT_DLL_API
148 Data(const Data& inData,
149 const DataTypes::RegionType& region);
150
151 /**
152 \brief
153 Constructor which copies data from any object that can be treated like a python array/sequence.
154
155 \param value - Input - Input data.
156 \param what - Input - A description of what this data represents.
157 \param expanded - Input - Flag, if true fill the entire container with
158 the value. Otherwise a more efficient storage
159 mechanism will be used.
160 */
161 ESCRIPT_DLL_API
162 Data(const boost::python::object& value,
163 const FunctionSpace& what=FunctionSpace(),
164 bool expanded=false);
165
166 /**
167 \brief
168 Constructor which copies data from a wrapped array.
169
170 \param w - Input - Input data.
171 \param what - Input - A description of what this data represents.
172 \param expanded - Input - Flag, if true fill the entire container with
173 the value. Otherwise a more efficient storage
174 mechanism will be used.
175 */
176 ESCRIPT_DLL_API
177 Data(const WrappedArray& w, const FunctionSpace& what,
178 bool expanded=false);
179
180
181 /**
182 \brief
183 Constructor which creates a DataConstant.
184 Copies data from any object that can be treated like a python array/sequence.
185 All other parameters are copied from other.
186
187 \param value - Input - Input data.
188 \param other - Input - contains all other parameters.
189 */
190 ESCRIPT_DLL_API
191 Data(const boost::python::object& value,
192 const Data& other);
193
194 /**
195 \brief
196 Constructor which creates a DataConstant of "shape" with constant value.
197 */
198 ESCRIPT_DLL_API
199 Data(double value,
200 const boost::python::tuple& shape=boost::python::make_tuple(),
201 const FunctionSpace& what=FunctionSpace(),
202 bool expanded=false);
203
204 /**
205 \brief Create a Data using an existing DataAbstract. Warning: The new object assumes ownership of the pointer!
206 Once you have passed the pointer, do not delete it.
207 */
208 ESCRIPT_DLL_API
209 explicit Data(DataAbstract* underlyingdata);
210
211 /**
212 \brief Create a Data based on the supplied DataAbstract
213 */
214 ESCRIPT_DLL_API
215 explicit Data(DataAbstract_ptr underlyingdata);
216
217 /**
218 \brief
219 Destructor
220 */
221 ESCRIPT_DLL_API
222 ~Data();
223
224 /**
225 \brief Make this object a deep copy of "other".
226 */
227 ESCRIPT_DLL_API
228 void
229 copy(const Data& other);
230
231 /**
232 \brief Return a pointer to a deep copy of this object.
233 */
234 ESCRIPT_DLL_API
235 Data
236 copySelf();
237
238
239 /**
240 \brief produce a delayed evaluation version of this Data.
241 */
242 ESCRIPT_DLL_API
243 Data
244 delay();
245
246 /**
247 \brief convert the current data into lazy data.
248 */
249 ESCRIPT_DLL_API
250 void
251 delaySelf();
252
253
254 /**
255 Member access methods.
256 */
257
258 /**
259 \brief
260 switches on update protection
261
262 */
263 ESCRIPT_DLL_API
264 void
265 setProtection();
266
267 /**
268 \brief
269 Returns true, if the data object is protected against update
270
271 */
272 ESCRIPT_DLL_API
273 bool
274 isProtected() const;
275
276
277 /**
278 \brief
279 Return the value of a data point as a python tuple.
280 */
281 ESCRIPT_DLL_API
282 const boost::python::object
283 getValueOfDataPointAsTuple(int dataPointNo);
284
285 /**
286 \brief
287 sets the values of a data-point from a python object on this process
288 */
289 ESCRIPT_DLL_API
290 void
291 setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object);
292
293 /**
294 \brief
295 sets the values of a data-point from a array-like object on this process
296 */
297 ESCRIPT_DLL_API
298 void
299 setValueOfDataPointToArray(int dataPointNo, const boost::python::object&);
300
301 /**
302 \brief
303 sets the values of a data-point on this process
304 */
305 ESCRIPT_DLL_API
306 void
307 setValueOfDataPoint(int dataPointNo, const double);
308
309 /**
310 \brief Return a data point across all processors as a python tuple.
311 */
312 ESCRIPT_DLL_API
313 const boost::python::object
314 getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo);
315
316
317 /**
318 \brief Set the value of a global data point
319 */
320 ESCRIPT_DLL_API
321 void
322 setTupleForGlobalDataPoint(int id, int proc, boost::python::object);
323
324 /**
325 \brief
326 Return the tag number associated with the given data-point.
327
328 */
329 ESCRIPT_DLL_API
330 int
331 getTagNumber(int dpno);
332
333 /**
334 \brief
335 Return the C wrapper for the Data object.
336 */
337 ESCRIPT_DLL_API
338 escriptDataC
339 getDataC();
340
341
342
343 /**
344 \brief
345 Return the C wrapper for the Data object - const version.
346 */
347 ESCRIPT_DLL_API
348 escriptDataC
349 getDataC() const;
350
351
352 /**
353 \brief
354 Write the data as a string. For large amounts of data, a summary is printed.
355 */
356 ESCRIPT_DLL_API
357 std::string
358 toString() const;
359
360 /**
361 \brief
362 Whatever the current Data type make this into a DataExpanded.
363 */
364 ESCRIPT_DLL_API
365 void
366 expand();
367
368 /**
369 \brief
370 If possible convert this Data to DataTagged. This will only allow
371 Constant data to be converted to tagged. An attempt to convert
372 Expanded data to tagged will throw an exception.
373 */
374 ESCRIPT_DLL_API
375 void
376 tag();
377
378 /**
379 \brief If this data is lazy, then convert it to ready data.
380 What type of ready data depends on the expression. For example, Constant+Tagged==Tagged.
381 */
382 ESCRIPT_DLL_API
383 void
384 resolve();
385
386
387 /**
388 \brief Ensures data is ready for write access.
389 This means that the data will be resolved if lazy and will be copied if shared with another Data object.
390 \warning This method should only be called in single threaded sections of code. (It modifies m_data).
391 Do not create any Data objects from this one between calling requireWrite and getSampleDataRW.
392 Doing so might introduce additional sharing.
393 */
394 ESCRIPT_DLL_API
395 void
396 requireWrite();
397
398 /**
399 \brief
400 Return true if this Data is expanded.
401 \note To determine if a sample will contain separate values for each datapoint. Use actsExpanded instead.
402 */
403 ESCRIPT_DLL_API
404 bool
405 isExpanded() const;
406
407 /**
408 \brief
409 Return true if this Data is expanded or resolves to expanded.
410 That is, if it has a separate value for each datapoint in the sample.
411 */
412 ESCRIPT_DLL_API
413 bool
414 actsExpanded() const;
415
416
417 /**
418 \brief
419 Return true if this Data is tagged.
420 */
421 ESCRIPT_DLL_API
422 bool
423 isTagged() const;
424
425 /**
426 \brief
427 Return true if this Data is constant.
428 */
429 ESCRIPT_DLL_API
430 bool
431 isConstant() const;
432
433 /**
434 \brief Return true if this Data is lazy.
435 */
436 ESCRIPT_DLL_API
437 bool
438 isLazy() const;
439
440 /**
441 \brief Return true if this data is ready.
442 */
443 ESCRIPT_DLL_API
444 bool
445 isReady() const;
446
447 /**
448 \brief
449 Return true if this Data holds an instance of DataEmpty. This is _not_ the same as asking if the object
450 contains datapoints.
451 */
452 ESCRIPT_DLL_API
453 bool
454 isEmpty() const;
455
456 /**
457 \brief
458 Return the function space.
459 */
460 ESCRIPT_DLL_API
461 inline
462 const FunctionSpace&
463 getFunctionSpace() const
464 {
465 return m_data->getFunctionSpace();
466 }
467
468 /**
469 \brief
470 Return a copy of the function space.
471 */
472 ESCRIPT_DLL_API
473 const FunctionSpace
474 getCopyOfFunctionSpace() const;
475
476 /**
477 \brief
478 Return the domain.
479 */
480 ESCRIPT_DLL_API
481 inline
482 // const AbstractDomain&
483 const_Domain_ptr
484 getDomain() const
485 {
486 return getFunctionSpace().getDomain();
487 }
488
489
490 /**
491 \brief
492 Return the domain.
493 TODO: For internal use only. This should be removed.
494 */
495 ESCRIPT_DLL_API
496 inline
497 // const AbstractDomain&
498 Domain_ptr
499 getDomainPython() const
500 {
501 return getFunctionSpace().getDomainPython();
502 }
503
504 /**
505 \brief
506 Return a copy of the domain.
507 */
508 ESCRIPT_DLL_API
509 const AbstractDomain
510 getCopyOfDomain() const;
511
512 /**
513 \brief
514 Return the rank of the point data.
515 */
516 ESCRIPT_DLL_API
517 inline
518 unsigned int
519 getDataPointRank() const
520 {
521 return m_data->getRank();
522 }
523
524 /**
525 \brief
526 Return the number of data points
527 */
528 ESCRIPT_DLL_API
529 inline
530 int
531 getNumDataPoints() const
532 {
533 return getNumSamples() * getNumDataPointsPerSample();
534 }
535 /**
536 \brief
537 Return the number of samples.
538 */
539 ESCRIPT_DLL_API
540 inline
541 int
542 getNumSamples() const
543 {
544 return m_data->getNumSamples();
545 }
546
547 /**
548 \brief
549 Return the number of data points per sample.
550 */
551 ESCRIPT_DLL_API
552 inline
553 int
554 getNumDataPointsPerSample() const
555 {
556 return m_data->getNumDPPSample();
557 }
558
559
560 /**
561 \brief
562 Return the number of values in the shape for this object.
563 */
564 ESCRIPT_DLL_API
565 int
566 getNoValues() const
567 {
568 return m_data->getNoValues();
569 }
570
571
572 /**
573 \brief
574 dumps the object into a netCDF file
575 */
576 ESCRIPT_DLL_API
577 void
578 dump(const std::string fileName) const;
579
580 /**
581 \brief returns the values of the object as a list of tuples (one for each datapoint).
582
583 \param scalarastuple If true, scalar data will produce single valued tuples [(1,) (2,) ...]
584 If false, the result is a list of scalars [1, 2, ...]
585 */
586 ESCRIPT_DLL_API
587 const boost::python::object
588 toListOfTuples(bool scalarastuple=true);
589
590
591 /**
592 \brief
593 Return the sample data for the given sample no.
594 Please do not use this unless you NEED to access samples individually
595 \param sampleNo - Input - the given sample no.
596 \return pointer to the sample data.
597 */
598 ESCRIPT_DLL_API
599 inline
600 const DataAbstract::ValueType::value_type*
601 getSampleDataRO(DataAbstract::ValueType::size_type sampleNo);
602
603
604 /**
605 \brief
606 Return the sample data for the given sample no.
607 Please do not use this unless you NEED to access samples individually
608 \param sampleNo - Input - the given sample no.
609 \return pointer to the sample data.
610 */
611 ESCRIPT_DLL_API
612 inline
613 DataAbstract::ValueType::value_type*
614 getSampleDataRW(DataAbstract::ValueType::size_type sampleNo);
615
616
617 /**
618 \brief
619 Return the sample data for the given tag. If an attempt is made to
620 access data that isn't tagged an exception will be thrown.
621 \param tag - Input - the tag key.
622 */
623 ESCRIPT_DLL_API
624 inline
625 DataAbstract::ValueType::value_type*
626 getSampleDataByTag(int tag)
627 {
628 return m_data->getSampleDataByTag(tag);
629 }
630
631 /**
632 \brief
633 Return a reference into the DataVector which points to the specified data point.
634 \param sampleNo - Input -
635 \param dataPointNo - Input -
636 */
637 ESCRIPT_DLL_API
638 DataTypes::ValueType::const_reference
639 getDataPointRO(int sampleNo, int dataPointNo);
640
641 /**
642 \brief
643 Return a reference into the DataVector which points to the specified data point.
644 \param sampleNo - Input -
645 \param dataPointNo - Input -
646 */
647 ESCRIPT_DLL_API
648 DataTypes::ValueType::reference
649 getDataPointRW(int sampleNo, int dataPointNo);
650
651
652
653 /**
654 \brief
655 Return the offset for the given sample and point within the sample
656 */
657 ESCRIPT_DLL_API
658 inline
659 DataTypes::ValueType::size_type
660 getDataOffset(int sampleNo,
661 int dataPointNo)
662 {
663 return m_data->getPointOffset(sampleNo,dataPointNo);
664 }
665
666 /**
667 \brief
668 Return a reference to the data point shape.
669 */
670 ESCRIPT_DLL_API
671 inline
672 const DataTypes::ShapeType&
673 getDataPointShape() const
674 {
675 return m_data->getShape();
676 }
677
678 /**
679 \brief
680 Return the data point shape as a tuple of integers.
681 */
682 ESCRIPT_DLL_API
683 const boost::python::tuple
684 getShapeTuple() const;
685
686 /**
687 \brief
688 Return the size of the data point. It is the product of the
689 data point shape dimensions.
690 */
691 ESCRIPT_DLL_API
692 int
693 getDataPointSize() const;
694
695 /**
696 \brief
697 Return the number of doubles stored for this Data.
698 */
699 ESCRIPT_DLL_API
700 DataTypes::ValueType::size_type
701 getLength() const;
702
703 /**
704 \brief Return true if this object contains no samples.
705 This is not the same as isEmpty()
706 */
707 ESCRIPT_DLL_API
708 bool
709 hasNoSamples() const
710 {
711 return getLength()==0;
712 }
713
714 /**
715 \brief
716 Assign the given value to the tag assocciated with name. Implicitly converts this
717 object to type DataTagged. Throws an exception if this object
718 cannot be converted to a DataTagged object or name cannot be mapped onto a tag key.
719 \param name - Input - name of tag.
720 \param value - Input - Value to associate with given key.
721 */
722 ESCRIPT_DLL_API
723 void
724 setTaggedValueByName(std::string name,
725 const boost::python::object& value);
726
727 /**
728 \brief
729 Assign the given value to the tag. Implicitly converts this
730 object to type DataTagged if it is constant.
731
732 \param tagKey - Input - Integer key.
733 \param value - Input - Value to associate with given key.
734 ==>*
735 */
736 ESCRIPT_DLL_API
737 void
738 setTaggedValue(int tagKey,
739 const boost::python::object& value);
740
741 /**
742 \brief
743 Assign the given value to the tag. Implicitly converts this
744 object to type DataTagged if it is constant.
745
746 \param tagKey - Input - Integer key.
747 \param pointshape - Input - The shape of the value parameter
748 \param value - Input - Value to associate with given key.
749 \param dataOffset - Input - Offset of the begining of the point within the value parameter
750 */
751 ESCRIPT_DLL_API
752 void
753 setTaggedValueFromCPP(int tagKey,
754 const DataTypes::ShapeType& pointshape,
755 const DataTypes::ValueType& value,
756 int dataOffset=0);
757
758
759
760 /**
761 \brief
762 Copy other Data object into this Data object where mask is positive.
763 */
764 ESCRIPT_DLL_API
765 void
766 copyWithMask(const Data& other,
767 const Data& mask);
768
769 /**
770 Data object operation methods and operators.
771 */
772
773 /**
774 \brief
775 set all values to zero
776 *
777 */
778 ESCRIPT_DLL_API
779 void
780 setToZero();
781
782 /**
783 \brief
784 Interpolates this onto the given functionspace and returns
785 the result as a Data object.
786 *
787 */
788 ESCRIPT_DLL_API
789 Data
790 interpolate(const FunctionSpace& functionspace) const;
791
792 ESCRIPT_DLL_API
793 Data
794 interpolateFromTable3D(const WrappedArray& table, double Amin, double Astep,
795 double undef, Data& B, double Bmin, double Bstep, Data& C,
796 double Cmin, double Cstep, bool check_boundaries);
797
798 ESCRIPT_DLL_API
799 Data
800 interpolateFromTable2D(const WrappedArray& table, double Amin, double Astep,
801 double undef, Data& B, double Bmin, double Bstep,bool check_boundaries);
802
803 ESCRIPT_DLL_API
804 Data
805 interpolateFromTable1D(const WrappedArray& table, double Amin, double Astep,
806 double undef,bool check_boundaries);
807
808
809 ESCRIPT_DLL_API
810 Data
811 interpolateFromTable3DP(boost::python::object table, double Amin, double Astep,
812 Data& B, double Bmin, double Bstep, Data& C, double Cmin, double Cstep, double undef,bool check_boundaries);
813
814
815 ESCRIPT_DLL_API
816 Data
817 interpolateFromTable2DP(boost::python::object table, double Amin, double Astep,
818 Data& B, double Bmin, double Bstep, double undef,bool check_boundaries);
819
820 ESCRIPT_DLL_API
821 Data
822 interpolateFromTable1DP(boost::python::object table, double Amin, double Astep,
823 double undef,bool check_boundaries);
824
825 ESCRIPT_DLL_API
826 Data
827 nonuniforminterp(boost::python::object in, boost::python::object out, bool check_boundaries);
828
829 ESCRIPT_DLL_API
830 Data
831 nonuniformslope(boost::python::object in, boost::python::object out, bool check_boundaries);
832
833 /**
834 \brief
835 Calculates the gradient of the data at the data points of functionspace.
836 If functionspace is not present the function space of Function(getDomain()) is used.
837 *
838 */
839 ESCRIPT_DLL_API
840 Data
841 gradOn(const FunctionSpace& functionspace) const;
842
843 ESCRIPT_DLL_API
844 Data
845 grad() const;
846
847 /**
848 \brief
849 Calculate the integral over the function space domain as a python tuple.
850 */
851 ESCRIPT_DLL_API
852 boost::python::object
853 integrateToTuple_const() const;
854
855
856 /**
857 \brief
858 Calculate the integral over the function space domain as a python tuple.
859 */
860 ESCRIPT_DLL_API
861 boost::python::object
862 integrateToTuple();
863
864
865
866 /**
867 \brief
868 Returns 1./ Data object
869 *
870 */
871 ESCRIPT_DLL_API
872 Data
873 oneOver() const;
874 /**
875 \brief
876 Return a Data with a 1 for +ive values and a 0 for 0 or -ive values.
877 *
878 */
879 ESCRIPT_DLL_API
880 Data
881 wherePositive() const;
882
883 /**
884 \brief
885 Return a Data with a 1 for -ive values and a 0 for +ive or 0 values.
886 *
887 */
888 ESCRIPT_DLL_API
889 Data
890 whereNegative() const;
891
892 /**
893 \brief
894 Return a Data with a 1 for +ive or 0 values and a 0 for -ive values.
895 *
896 */
897 ESCRIPT_DLL_API
898 Data
899 whereNonNegative() const;
900
901 /**
902 \brief
903 Return a Data with a 1 for -ive or 0 values and a 0 for +ive values.
904 *
905 */
906 ESCRIPT_DLL_API
907 Data
908 whereNonPositive() const;
909
910 /**
911 \brief
912 Return a Data with a 1 for 0 values and a 0 for +ive or -ive values.
913 *
914 */
915 ESCRIPT_DLL_API
916 Data
917 whereZero(double tol=0.0) const;
918
919 /**
920 \brief
921 Return a Data with a 0 for 0 values and a 1 for +ive or -ive values.
922 *
923 */
924 ESCRIPT_DLL_API
925 Data
926 whereNonZero(double tol=0.0) const;
927
928 /**
929 \brief
930 Return the maximum absolute value of this Data object.
931
932 The method is not const because lazy data needs to be expanded before Lsup can be computed.
933 The _const form can be used when the Data object is const, however this will only work for
934 Data which is not Lazy.
935
936 For Data which contain no samples (or tagged Data for which no tags in use have a value)
937 zero is returned.
938 */
939 ESCRIPT_DLL_API
940 double
941 Lsup();
942
943 ESCRIPT_DLL_API
944 double
945 Lsup_const() const;
946
947
948 /**
949 \brief
950 Return the maximum value of this Data object.
951
952 The method is not const because lazy data needs to be expanded before sup can be computed.
953 The _const form can be used when the Data object is const, however this will only work for
954 Data which is not Lazy.
955
956 For Data which contain no samples (or tagged Data for which no tags in use have a value)
957 a large negative value is returned.
958 */
959 ESCRIPT_DLL_API
960 double
961 sup();
962
963 ESCRIPT_DLL_API
964 double
965 sup_const() const;
966
967
968 /**
969 \brief
970 Return the minimum value of this Data object.
971
972 The method is not const because lazy data needs to be expanded before inf can be computed.
973 The _const form can be used when the Data object is const, however this will only work for
974 Data which is not Lazy.
975
976 For Data which contain no samples (or tagged Data for which no tags in use have a value)
977 a large positive value is returned.
978 */
979 ESCRIPT_DLL_API
980 double
981 inf();
982
983 ESCRIPT_DLL_API
984 double
985 inf_const() const;
986
987
988
989 /**
990 \brief
991 Return the absolute value of each data point of this Data object.
992 *
993 */
994 ESCRIPT_DLL_API
995 Data
996 abs() const;
997
998 /**
999 \brief
1000 Return the maximum value of each data point of this Data object.
1001 *
1002 */
1003 ESCRIPT_DLL_API
1004 Data
1005 maxval() const;
1006
1007 /**
1008 \brief
1009 Return the minimum value of each data point of this Data object.
1010 *
1011 */
1012 ESCRIPT_DLL_API
1013 Data
1014 minval() const;
1015
1016 /**
1017 \brief
1018 Return the (sample number, data-point number) of the data point with
1019 the minimum component value in this Data object.
1020 \note If you are working in python, please consider using Locator
1021 instead of manually manipulating process and point IDs.
1022 */
1023 ESCRIPT_DLL_API
1024 const boost::python::tuple
1025 minGlobalDataPoint() const;
1026
1027 /**
1028 \brief
1029 Return the (sample number, data-point number) of the data point with
1030 the minimum component value in this Data object.
1031 \note If you are working in python, please consider using Locator
1032 instead of manually manipulating process and point IDs.
1033 */
1034 ESCRIPT_DLL_API
1035 const boost::python::tuple
1036 maxGlobalDataPoint() const;
1037
1038
1039
1040 /**
1041 \brief
1042 Return the sign of each data point of this Data object.
1043 -1 for negative values, zero for zero values, 1 for positive values.
1044 *
1045 */
1046 ESCRIPT_DLL_API
1047 Data
1048 sign() const;
1049
1050 /**
1051 \brief
1052 Return the symmetric part of a matrix which is half the matrix plus its transpose.
1053 *
1054 */
1055 ESCRIPT_DLL_API
1056 Data
1057 symmetric() const;
1058
1059 /**
1060 \brief
1061 Return the nonsymmetric part of a matrix which is half the matrix minus its transpose.
1062 *
1063 */
1064 ESCRIPT_DLL_API
1065 Data
1066 nonsymmetric() const;
1067
1068 /**
1069 \brief
1070 Return the trace of a matrix
1071 *
1072 */
1073 ESCRIPT_DLL_API
1074 Data
1075 trace(int axis_offset) const;
1076
1077 /**
1078 \brief
1079 Transpose each data point of this Data object around the given axis.
1080 *
1081 */
1082 ESCRIPT_DLL_API
1083 Data
1084 transpose(int axis_offset) const;
1085
1086 /**
1087 \brief
1088 Return the eigenvalues of the symmetric part at each data point of this Data object in increasing values.
1089 Currently this function is restricted to rank 2, square shape, and dimension 3.
1090 *
1091 */
1092 ESCRIPT_DLL_API
1093 Data
1094 eigenvalues() const;
1095
1096 /**
1097 \brief
1098 Return the eigenvalues and corresponding eigenvcetors of the symmetric part at each data point of this Data object.
1099 the eigenvalues are ordered in increasing size where eigenvalues with relative difference less than
1100 tol are treated as equal. The eigenvectors are orthogonal, normalized and the sclaed such that the
1101 first non-zero entry is positive.
1102 Currently this function is restricted to rank 2, square shape, and dimension 3
1103 *
1104 */
1105 ESCRIPT_DLL_API
1106 const boost::python::tuple
1107 eigenvalues_and_eigenvectors(const double tol=1.e-12) const;
1108
1109 /**
1110 \brief
1111 swaps the components axis0 and axis1
1112 *
1113 */
1114 ESCRIPT_DLL_API
1115 Data
1116 swapaxes(const int axis0, const int axis1) const;
1117
1118 /**
1119 \brief
1120 Return the error function erf of each data point of this Data object.
1121 *
1122 */
1123 ESCRIPT_DLL_API
1124 Data
1125 erf() const;
1126
1127 /**
1128 \brief
1129 Return the sin of each data point of this Data object.
1130 *
1131 */
1132 ESCRIPT_DLL_API
1133 Data
1134 sin() const;
1135
1136 /**
1137 \brief
1138 Return the cos of each data point of this Data object.
1139 *
1140 */
1141 ESCRIPT_DLL_API
1142 Data
1143 cos() const;
1144
1145 /**
1146 \brief
1147 Return the tan of each data point of this Data object.
1148 *
1149 */
1150 ESCRIPT_DLL_API
1151 Data
1152 tan() const;
1153
1154 /**
1155 \brief
1156 Return the asin of each data point of this Data object.
1157 *
1158 */
1159 ESCRIPT_DLL_API
1160 Data
1161 asin() const;
1162
1163 /**
1164 \brief
1165 Return the acos of each data point of this Data object.
1166 *
1167 */
1168 ESCRIPT_DLL_API
1169 Data
1170 acos() const;
1171
1172 /**
1173 \brief
1174 Return the atan of each data point of this Data object.
1175 *
1176 */
1177 ESCRIPT_DLL_API
1178 Data
1179 atan() const;
1180
1181 /**
1182 \brief
1183 Return the sinh of each data point of this Data object.
1184 *
1185 */
1186 ESCRIPT_DLL_API
1187 Data
1188 sinh() const;
1189
1190 /**
1191 \brief
1192 Return the cosh of each data point of this Data object.
1193 *
1194 */
1195 ESCRIPT_DLL_API
1196 Data
1197 cosh() const;
1198
1199 /**
1200 \brief
1201 Return the tanh of each data point of this Data object.
1202 *
1203 */
1204 ESCRIPT_DLL_API
1205 Data
1206 tanh() const;
1207
1208 /**
1209 \brief
1210 Return the asinh of each data point of this Data object.
1211 *
1212 */
1213 ESCRIPT_DLL_API
1214 Data
1215 asinh() const;
1216
1217 /**
1218 \brief
1219 Return the acosh of each data point of this Data object.
1220 *
1221 */
1222 ESCRIPT_DLL_API
1223 Data
1224 acosh() const;
1225
1226 /**
1227 \brief
1228 Return the atanh of each data point of this Data object.
1229 *
1230 */
1231 ESCRIPT_DLL_API
1232 Data
1233 atanh() const;
1234
1235 /**
1236 \brief
1237 Return the log to base 10 of each data point of this Data object.
1238 *
1239 */
1240 ESCRIPT_DLL_API
1241 Data
1242 log10() const;
1243
1244 /**
1245 \brief
1246 Return the natural log of each data point of this Data object.
1247 *
1248 */
1249 ESCRIPT_DLL_API
1250 Data
1251 log() const;
1252
1253 /**
1254 \brief
1255 Return the exponential function of each data point of this Data object.
1256 *
1257 */
1258 ESCRIPT_DLL_API
1259 Data
1260 exp() const;
1261
1262 /**
1263 \brief
1264 Return the square root of each data point of this Data object.
1265 *
1266 */
1267 ESCRIPT_DLL_API
1268 Data
1269 sqrt() const;
1270
1271 /**
1272 \brief
1273 Return the negation of each data point of this Data object.
1274 *
1275 */
1276 ESCRIPT_DLL_API
1277 Data
1278 neg() const;
1279
1280 /**
1281 \brief
1282 Return the identity of each data point of this Data object.
1283 Simply returns this object unmodified.
1284 *
1285 */
1286 ESCRIPT_DLL_API
1287 Data
1288 pos() const;
1289
1290 /**
1291 \brief
1292 Return the given power of each data point of this Data object.
1293
1294 \param right Input - the power to raise the object to.
1295 *
1296 */
1297 ESCRIPT_DLL_API
1298 Data
1299 powD(const Data& right) const;
1300
1301 /**
1302 \brief
1303 Return the given power of each data point of this boost python object.
1304
1305 \param right Input - the power to raise the object to.
1306 *
1307 */
1308 ESCRIPT_DLL_API
1309 Data
1310 powO(const boost::python::object& right) const;
1311
1312 /**
1313 \brief
1314 Return the given power of each data point of this boost python object.
1315
1316 \param left Input - the bases
1317 *
1318 */
1319
1320 ESCRIPT_DLL_API
1321 Data
1322 rpowO(const boost::python::object& left) const;
1323
1324 /**
1325 \brief
1326 Overloaded operator +=
1327 \param right - Input - The right hand side.
1328 *
1329 */
1330 ESCRIPT_DLL_API
1331 Data& operator+=(const Data& right);
1332 ESCRIPT_DLL_API
1333 Data& operator+=(const boost::python::object& right);
1334
1335 ESCRIPT_DLL_API
1336 Data& operator=(const Data& other);
1337
1338 /**
1339 \brief
1340 Overloaded operator -=
1341 \param right - Input - The right hand side.
1342 *
1343 */
1344 ESCRIPT_DLL_API
1345 Data& operator-=(const Data& right);
1346 ESCRIPT_DLL_API
1347 Data& operator-=(const boost::python::object& right);
1348
1349 /**
1350 \brief
1351 Overloaded operator *=
1352 \param right - Input - The right hand side.
1353 *
1354 */
1355 ESCRIPT_DLL_API
1356 Data& operator*=(const Data& right);
1357 ESCRIPT_DLL_API
1358 Data& operator*=(const boost::python::object& right);
1359
1360 /**
1361 \brief
1362 Overloaded operator /=
1363 \param right - Input - The right hand side.
1364 *
1365 */
1366 ESCRIPT_DLL_API
1367 Data& operator/=(const Data& right);
1368 ESCRIPT_DLL_API
1369 Data& operator/=(const boost::python::object& right);
1370
1371 /**
1372 \brief
1373 Newer style division operator for python
1374 */
1375 ESCRIPT_DLL_API
1376 Data truedivD(const Data& right);
1377
1378 /**
1379 \brief
1380 Newer style division operator for python
1381 */
1382 ESCRIPT_DLL_API
1383 Data truedivO(const boost::python::object& right);
1384
1385 /**
1386 \brief
1387 Newer style division operator for python
1388 */
1389 ESCRIPT_DLL_API
1390 Data rtruedivO(const boost::python::object& left);
1391
1392 /**
1393 \brief
1394 wrapper for python add operation
1395 */
1396 ESCRIPT_DLL_API
1397 boost::python::object __add__(const boost::python::object& right);
1398
1399
1400 /**
1401 \brief
1402 wrapper for python subtract operation
1403 */
1404 ESCRIPT_DLL_API
1405 boost::python::object __sub__(const boost::python::object& right);
1406
1407 /**
1408 \brief
1409 wrapper for python reverse subtract operation
1410 */
1411 ESCRIPT_DLL_API
1412 boost::python::object __rsub__(const boost::python::object& right);
1413
1414 /**
1415 \brief
1416 wrapper for python multiply operation
1417 */
1418 ESCRIPT_DLL_API
1419 boost::python::object __mul__(const boost::python::object& right);
1420
1421 /**
1422 \brief
1423 wrapper for python divide operation
1424 */
1425 ESCRIPT_DLL_API
1426 boost::python::object __div__(const boost::python::object& right);
1427
1428 /**
1429 \brief
1430 wrapper for python reverse divide operation
1431 */
1432 ESCRIPT_DLL_API
1433 boost::python::object __rdiv__(const boost::python::object& right);
1434
1435 /**
1436 \brief return inverse of matricies.
1437 */
1438 ESCRIPT_DLL_API
1439 Data
1440 matrixInverse() const;
1441
1442 /**
1443 \brief
1444 Returns true if this can be interpolated to functionspace.
1445 */
1446 ESCRIPT_DLL_API
1447 bool
1448 probeInterpolation(const FunctionSpace& functionspace) const;
1449
1450 /**
1451 Data object slicing methods.
1452 */
1453
1454 /**
1455 \brief
1456 Returns a slice from this Data object.
1457
1458 /description
1459 Implements the [] get operator in python.
1460 Calls getSlice.
1461
1462 \param key - Input - python slice tuple specifying
1463 slice to return.
1464 */
1465 ESCRIPT_DLL_API
1466 Data
1467 getItem(const boost::python::object& key) const;
1468
1469 /**
1470 \brief
1471 Copies slice from value into this Data object.
1472
1473 Implements the [] set operator in python.
1474 Calls setSlice.
1475
1476 \param key - Input - python slice tuple specifying
1477 slice to copy from value.
1478 \param value - Input - Data object to copy from.
1479 */
1480 ESCRIPT_DLL_API
1481 void
1482 setItemD(const boost::python::object& key,
1483 const Data& value);
1484
1485 ESCRIPT_DLL_API
1486 void
1487 setItemO(const boost::python::object& key,
1488 const boost::python::object& value);
1489
1490 // These following public methods should be treated as private.
1491
1492 /**
1493 \brief
1494 Perform the given unary operation on every element of every data point in
1495 this Data object.
1496 */
1497 template <class UnaryFunction>
1498 ESCRIPT_DLL_API
1499 inline
1500 void
1501 unaryOp2(UnaryFunction operation);
1502
1503 /**
1504 \brief
1505 Return a Data object containing the specified slice of
1506 this Data object.
1507 \param region - Input - Region to copy.
1508 *
1509 */
1510 ESCRIPT_DLL_API
1511 Data
1512 getSlice(const DataTypes::RegionType& region) const;
1513
1514 /**
1515 \brief
1516 Copy the specified slice from the given value into this
1517 Data object.
1518 \param value - Input - Data to copy from.
1519 \param region - Input - Region to copy.
1520 *
1521 */
1522 ESCRIPT_DLL_API
1523 void
1524 setSlice(const Data& value,
1525 const DataTypes::RegionType& region);
1526
1527 /**
1528 \brief
1529 print the data values to stdout. Used for debugging
1530 */
1531 ESCRIPT_DLL_API
1532 void
1533 print(void);
1534
1535 /**
1536 \brief
1537 return the MPI rank number of the local data
1538 MPI_COMM_WORLD is assumed and the result of MPI_Comm_size()
1539 is returned
1540 */
1541 ESCRIPT_DLL_API
1542 int
1543 get_MPIRank(void) const;
1544
1545 /**
1546 \brief
1547 return the MPI rank number of the local data
1548 MPI_COMM_WORLD is assumed and the result of MPI_Comm_rank()
1549 is returned
1550 */
1551 ESCRIPT_DLL_API
1552 int
1553 get_MPISize(void) const;
1554
1555 /**
1556 \brief
1557 return the MPI rank number of the local data
1558 MPI_COMM_WORLD is assumed and returned.
1559 */
1560 ESCRIPT_DLL_API
1561 MPI_Comm
1562 get_MPIComm(void) const;
1563
1564 /**
1565 \brief
1566 return the object produced by the factory, which is a DataConstant or DataExpanded
1567 TODO Ownership of this object should be explained in doco.
1568 */
1569 ESCRIPT_DLL_API
1570 DataAbstract*
1571 borrowData(void) const;
1572
1573 ESCRIPT_DLL_API
1574 DataAbstract_ptr
1575 borrowDataPtr(void) const;
1576
1577 ESCRIPT_DLL_API
1578 DataReady_ptr
1579 borrowReadyPtr(void) const;
1580
1581
1582
1583 /**
1584 \brief
1585 Return a pointer to the beginning of the datapoint at the specified offset.
1586 TODO Eventually these should be inlined.
1587 \param i - position(offset) in the underlying datastructure
1588 */
1589
1590 ESCRIPT_DLL_API
1591 DataTypes::ValueType::const_reference
1592 getDataAtOffsetRO(DataTypes::ValueType::size_type i);
1593
1594
1595 ESCRIPT_DLL_API
1596 DataTypes::ValueType::reference
1597 getDataAtOffsetRW(DataTypes::ValueType::size_type i);
1598
1599
1600 protected:
1601
1602 private:
1603
1604 template <class BinaryOp>
1605 double
1606 #ifdef ESYS_MPI
1607 lazyAlgWorker(double init, MPI_Op mpiop_type);
1608 #else
1609 lazyAlgWorker(double init);
1610 #endif
1611
1612 double
1613 LsupWorker() const;
1614
1615 double
1616 supWorker() const;
1617
1618 double
1619 infWorker() const;
1620
1621 boost::python::object
1622 integrateWorker() const;
1623
1624 void
1625 calc_minGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1626
1627 void
1628 calc_maxGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1629
1630 // For internal use in Data.cpp only!
1631 // other uses should call the main entry points and allow laziness
1632 Data
1633 minval_nonlazy() const;
1634
1635 // For internal use in Data.cpp only!
1636 Data
1637 maxval_nonlazy() const;
1638
1639
1640 /**
1641 \brief
1642 Check *this and the right operand are compatible. Throws
1643 an exception if they aren't.
1644 \param right - Input - The right hand side.
1645 */
1646 inline
1647 void
1648 operandCheck(const Data& right) const
1649 {
1650 return m_data->operandCheck(*(right.m_data.get()));
1651 }
1652
1653 /**
1654 \brief
1655 Perform the specified reduction algorithm on every element of every data point in
1656 this Data object according to the given function and return the single value result.
1657 */
1658 template <class BinaryFunction>
1659 inline
1660 double
1661 algorithm(BinaryFunction operation,
1662 double initial_value) const;
1663
1664 /**
1665 \brief
1666 Reduce each data-point in this Data object using the given operation. Return a Data
1667 object with the same number of data-points, but with each data-point containing only
1668 one value - the result of the reduction operation on the corresponding data-point in
1669 this Data object
1670 */
1671 template <class BinaryFunction>
1672 inline
1673 Data
1674 dp_algorithm(BinaryFunction operation,
1675 double initial_value) const;
1676
1677 /**
1678 \brief
1679 Perform the given binary operation on all of the data's elements.
1680 The underlying type of the right hand side (right) determines the final
1681 type of *this after the operation. For example if the right hand side
1682 is expanded *this will be expanded if necessary.
1683 RHS is a Data object.
1684 */
1685 template <class BinaryFunction>
1686 inline
1687 void
1688 binaryOp(const Data& right,
1689 BinaryFunction operation);
1690
1691 /**
1692 \brief
1693 Convert the data type of the RHS to match this.
1694 \param right - Input - data type to match.
1695 */
1696 void
1697 typeMatchLeft(Data& right) const;
1698
1699 /**
1700 \brief
1701 Convert the data type of this to match the RHS.
1702 \param right - Input - data type to match.
1703 */
1704 void
1705 typeMatchRight(const Data& right);
1706
1707 /**
1708 \brief
1709 Construct a Data object of the appropriate type.
1710 */
1711
1712 void
1713 initialise(const DataTypes::ValueType& value,
1714 const DataTypes::ShapeType& shape,
1715 const FunctionSpace& what,
1716 bool expanded);
1717
1718 void
1719 initialise(const WrappedArray& value,
1720 const FunctionSpace& what,
1721 bool expanded);
1722
1723 void
1724 initialise(const double value,
1725 const DataTypes::ShapeType& shape,
1726 const FunctionSpace& what,
1727 bool expanded);
1728
1729 //
1730 // flag to protect the data object against any update
1731 bool m_protected;
1732 mutable bool m_shared;
1733 bool m_lazy;
1734
1735 //
1736 // pointer to the actual data object
1737 // boost::shared_ptr<DataAbstract> m_data;
1738 DataAbstract_ptr m_data;
1739
1740 // If possible please use getReadyPtr instead.
1741 // But see warning below.
1742 const DataReady*
1743 getReady() const;
1744
1745 DataReady*
1746 getReady();
1747
1748
1749 // Be wary of using this for local operations since it (temporarily) increases reference count.
1750 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1751 // getReady() instead
1752 DataReady_ptr
1753 getReadyPtr();
1754
1755 const_DataReady_ptr
1756 getReadyPtr() const;
1757
1758
1759 /**
1760 \brief Update the Data's shared flag
1761 This indicates that the DataAbstract used by this object is now shared (or no longer shared).
1762 For internal use only.
1763 */
1764 void updateShareStatus(bool nowshared) const
1765 {
1766 m_shared=nowshared; // m_shared is mutable
1767 }
1768
1769 // In the isShared() method below:
1770 // A problem would occur if m_data (the address pointed to) were being modified
1771 // while the call m_data->is_shared is being executed.
1772 //
1773 // Q: So why do I think this code can be thread safe/correct?
1774 // A: We need to make some assumptions.
1775 // 1. We assume it is acceptable to return true under some conditions when we aren't shared.
1776 // 2. We assume that no constructions or assignments which will share previously unshared
1777 // will occur while this call is executing. This is consistent with the way Data:: and C are written.
1778 //
1779 // This means that the only transition we need to consider, is when a previously shared object is
1780 // not shared anymore. ie. the other objects have been destroyed or a deep copy has been made.
1781 // In those cases the m_shared flag changes to false after m_data has completed changing.
1782 // For any threads executing before the flag switches they will assume the object is still shared.
1783 bool isShared() const
1784 {
1785 return m_shared;
1786 /* if (m_shared) return true;
1787 if (m_data->isShared())
1788 {
1789 updateShareStatus(true);
1790 return true;
1791 }
1792 return false;*/
1793 }
1794
1795 void forceResolve()
1796 {
1797 if (isLazy())
1798 {
1799 #ifdef _OPENMP
1800 if (omp_in_parallel())
1801 { // Yes this is throwing an exception out of an omp thread which is forbidden.
1802 throw DataException("Please do not call forceResolve() in a parallel region.");
1803 }
1804 #endif
1805 resolve();
1806 }
1807 }
1808
1809 /**
1810 \brief if another object is sharing out member data make a copy to work with instead.
1811 This code should only be called from single threaded sections of code.
1812 */
1813 void exclusiveWrite()
1814 {
1815 #ifdef _OPENMP
1816 if (omp_in_parallel())
1817 {
1818 // *((int*)0)=17;
1819 throw DataException("Programming error. Please do not run exclusiveWrite() in multi-threaded sections.");
1820 }
1821 #endif
1822 forceResolve();
1823 if (isShared())
1824 {
1825 DataAbstract* t=m_data->deepCopy();
1826 set_m_data(DataAbstract_ptr(t));
1827 }
1828 }
1829
1830 /**
1831 \brief checks if caller can have exclusive write to the object
1832 */
1833 void checkExclusiveWrite()
1834 {
1835 if (isLazy() || isShared())
1836 {
1837 throw DataException("Programming error. ExclusiveWrite required - please call requireWrite()");
1838 }
1839 }
1840
1841 /**
1842 \brief Modify the data abstract hosted by this Data object
1843 For internal use only.
1844 Passing a pointer to null is permitted (do this in the destructor)
1845 \warning Only to be called in single threaded code or inside a single/critical section. This method needs to be atomic.
1846 */
1847 void set_m_data(DataAbstract_ptr p);
1848
1849 friend class DataAbstract; // To allow calls to updateShareStatus
1850 friend class TestDomain; // so its getX will work quickly
1851 #ifdef IKNOWWHATIMDOING
1852 friend ESCRIPT_DLL_API Data applyBinaryCFunction(boost::python::object cfunc, boost::python::tuple shape, escript::Data& d, escript::Data& e);
1853 #endif
1854 friend ESCRIPT_DLL_API Data condEval(escript::Data& mask, escript::Data& trueval, escript::Data& falseval);
1855 friend ESCRIPT_DLL_API Data randomData(const boost::python::tuple& shape, const FunctionSpace& what, long seed);
1856
1857 };
1858
1859
1860 #ifdef IKNOWWHATIMDOING
1861 ESCRIPT_DLL_API
1862 Data
1863 applyBinaryCFunction(boost::python::object func, boost::python::tuple shape, escript::Data& d, escript::Data& e);
1864 #endif
1865
1866
1867 ESCRIPT_DLL_API
1868 Data
1869 condEval(escript::Data& mask, escript::Data& trueval, escript::Data& falseval);
1870
1871
1872
1873 /**
1874 \brief Create a new Expanded Data object filled with (not very) random data.
1875 */
1876 ESCRIPT_DLL_API
1877 Data randomData(const boost::python::tuple& shape,
1878 const FunctionSpace& what,
1879 long seed);
1880
1881
1882 } // end namespace escript
1883
1884
1885 // No, this is not supposed to be at the top of the file
1886 // DataAbstact needs to be declared first, then DataReady needs to be fully declared
1887 // so that I can dynamic cast between them below.
1888 #include "DataReady.h"
1889 #include "DataLazy.h"
1890
1891 namespace escript
1892 {
1893
1894 inline
1895 const DataReady*
1896 Data::getReady() const
1897 {
1898 const DataReady* dr=dynamic_cast<const DataReady*>(m_data.get());
1899 EsysAssert((dr!=0), "Error - casting to DataReady.");
1900 return dr;
1901 }
1902
1903 inline
1904 DataReady*
1905 Data::getReady()
1906 {
1907 DataReady* dr=dynamic_cast<DataReady*>(m_data.get());
1908 EsysAssert((dr!=0), "Error - casting to DataReady.");
1909 return dr;
1910 }
1911
1912 // Be wary of using this for local operations since it (temporarily) increases reference count.
1913 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1914 // getReady() instead
1915 inline
1916 DataReady_ptr
1917 Data::getReadyPtr()
1918 {
1919 DataReady_ptr dr=boost::dynamic_pointer_cast<DataReady>(m_data);
1920 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1921 return dr;
1922 }
1923
1924
1925 inline
1926 const_DataReady_ptr
1927 Data::getReadyPtr() const
1928 {
1929 const_DataReady_ptr dr=boost::dynamic_pointer_cast<const DataReady>(m_data);
1930 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1931 return dr;
1932 }
1933
1934 inline
1935 DataAbstract::ValueType::value_type*
1936 Data::getSampleDataRW(DataAbstract::ValueType::size_type sampleNo)
1937 {
1938 if (isLazy())
1939 {
1940 throw DataException("Error, attempt to acquire RW access to lazy data. Please call requireWrite() first.");
1941 }
1942 return getReady()->getSampleDataRW(sampleNo);
1943 }
1944
1945 inline
1946 const DataAbstract::ValueType::value_type*
1947 Data::getSampleDataRO(DataAbstract::ValueType::size_type sampleNo)
1948 {
1949 DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
1950 if (l!=0)
1951 {
1952 size_t offset=0;
1953 const DataTypes::ValueType* res=l->resolveSample(sampleNo,offset);
1954 return &((*res)[offset]);
1955 }
1956 return getReady()->getSampleDataRO(sampleNo);
1957 }
1958
1959
1960
1961 /**
1962 Modify a filename for MPI parallel output to multiple files
1963 */
1964 char *Escript_MPI_appendRankToFileName(const char *, int, int);
1965
1966 /**
1967 Binary Data object operators.
1968 */
1969 inline double rpow(double x,double y)
1970 {
1971 return pow(y,x);
1972 }
1973
1974 /**
1975 \brief
1976 Operator+
1977 Takes two Data objects.
1978 */
1979 ESCRIPT_DLL_API Data operator+(const Data& left, const Data& right);
1980
1981 /**
1982 \brief
1983 Operator-
1984 Takes two Data objects.
1985 */
1986 ESCRIPT_DLL_API Data operator-(const Data& left, const Data& right);
1987
1988 /**
1989 \brief
1990 Operator*
1991 Takes two Data objects.
1992 */
1993 ESCRIPT_DLL_API Data operator*(const Data& left, const Data& right);
1994
1995 /**
1996 \brief
1997 Operator/
1998 Takes two Data objects.
1999 */
2000 ESCRIPT_DLL_API Data operator/(const Data& left, const Data& right);
2001
2002 /**
2003 \brief
2004 Operator+
2005 Takes LHS Data object and RHS python::object.
2006 python::object must be convertable to Data type.
2007 */
2008 ESCRIPT_DLL_API Data operator+(const Data& left, const boost::python::object& right);
2009
2010 /**
2011 \brief
2012 Operator-
2013 Takes LHS Data object and RHS python::object.
2014 python::object must be convertable to Data type.
2015 */
2016 ESCRIPT_DLL_API Data operator-(const Data& left, const boost::python::object& right);
2017
2018 /**
2019 \brief
2020 Operator*
2021 Takes LHS Data object and RHS python::object.
2022 python::object must be convertable to Data type.
2023 */
2024 ESCRIPT_DLL_API Data operator*(const Data& left, const boost::python::object& right);
2025
2026 /**
2027 \brief
2028 Operator/
2029 Takes LHS Data object and RHS python::object.
2030 python::object must be convertable to Data type.
2031 */
2032 ESCRIPT_DLL_API Data operator/(const Data& left, const boost::python::object& right);
2033
2034 /**
2035 \brief
2036 Operator+
2037 Takes LHS python::object and RHS Data object.
2038 python::object must be convertable to Data type.
2039 */
2040 ESCRIPT_DLL_API Data operator+(const boost::python::object& left, const Data& right);
2041
2042 /**
2043 \brief
2044 Operator-
2045 Takes LHS python::object and RHS Data object.
2046 python::object must be convertable to Data type.
2047 */
2048 ESCRIPT_DLL_API Data operator-(const boost::python::object& left, const Data& right);
2049
2050 /**
2051 \brief
2052 Operator*
2053 Takes LHS python::object and RHS Data object.
2054 python::object must be convertable to Data type.
2055 */
2056 ESCRIPT_DLL_API Data operator*(const boost::python::object& left, const Data& right);
2057
2058 /**
2059 \brief
2060 Operator/
2061 Takes LHS python::object and RHS Data object.
2062 python::object must be convertable to Data type.
2063 */
2064 ESCRIPT_DLL_API Data operator/(const boost::python::object& left, const Data& right);
2065
2066
2067
2068 /**
2069 \brief
2070 Output operator
2071 */
2072 ESCRIPT_DLL_API std::ostream& operator<<(std::ostream& o, const Data& data);
2073
2074 /**
2075 \brief
2076 Compute a tensor product of two Data objects
2077 \param arg_0 - Input - Data object
2078 \param arg_1 - Input - Data object
2079 \param axis_offset - Input - axis offset
2080 \param transpose - Input - 0: transpose neither, 1: transpose arg0, 2: transpose arg1
2081 */
2082 ESCRIPT_DLL_API
2083 Data
2084 C_GeneralTensorProduct(Data& arg_0,
2085 Data& arg_1,
2086 int axis_offset=0,
2087 int transpose=0);
2088
2089 /**
2090 \brief
2091 Operator/
2092 Takes RHS Data object.
2093 */
2094 inline
2095 Data
2096 Data::truedivD(const Data& right)
2097 {
2098 return *this / right;
2099 }
2100
2101 /**
2102 \brief
2103 Operator/
2104 Takes RHS python::object.
2105 */
2106 inline
2107 Data
2108 Data::truedivO(const boost::python::object& right)
2109 {
2110 Data tmp(right, getFunctionSpace(), false);
2111 return truedivD(tmp);
2112 }
2113
2114 /**
2115 \brief
2116 Operator/
2117 Takes LHS python::object.
2118 */
2119 inline
2120 Data
2121 Data::rtruedivO(const boost::python::object& left)
2122 {
2123 Data tmp(left, getFunctionSpace(), false);
2124 return tmp.truedivD(*this);
2125 }
2126
2127 /**
2128 \brief
2129 Perform the given binary operation with this and right as operands.
2130 Right is a Data object.
2131 */
2132 template <class BinaryFunction>
2133 inline
2134 void
2135 Data::binaryOp(const Data& right,
2136 BinaryFunction operation)
2137 {
2138 //
2139 // if this has a rank of zero promote it to the rank of the RHS
2140 if (getDataPointRank()==0 && right.getDataPointRank()!=0) {
2141 throw DataException("Error - attempt to update rank zero object with object with rank bigger than zero.");
2142 }
2143
2144 if (isLazy() || right.isLazy())
2145 {
2146 throw DataException("Programmer error - attempt to call binaryOp with Lazy Data.");
2147 }
2148 //
2149 // initially make the temporary a shallow copy
2150 Data tempRight(right);
2151
2152 if (getFunctionSpace()!=right.getFunctionSpace()) {
2153 if (right.probeInterpolation(getFunctionSpace())) {
2154 //
2155 // an interpolation is required so create a new Data
2156 tempRight=Data(right,this->getFunctionSpace());
2157 } else if (probeInterpolation(right.getFunctionSpace())) {
2158 //
2159 // interpolate onto the RHS function space
2160 Data tempLeft(*this,right.getFunctionSpace());
2161 // m_data=tempLeft.m_data;
2162 set_m_data(tempLeft.m_data);
2163 }
2164 }
2165 operandCheck(tempRight);
2166 //
2167 // ensure this has the right type for the RHS
2168 typeMatchRight(tempRight);
2169 //
2170 // Need to cast to the concrete types so that the correct binaryOp
2171 // is called.
2172 if (isExpanded()) {
2173 //
2174 // Expanded data will be done in parallel, the right hand side can be
2175 // of any data type
2176 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2177 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2178 escript::binaryOp(*leftC,*(tempRight.getReady()),operation);
2179 } else if (isTagged()) {
2180 //
2181 // Tagged data is operated on serially, the right hand side can be
2182 // either DataConstant or DataTagged
2183 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2184 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2185 if (right.isTagged()) {
2186 DataTagged* rightC=dynamic_cast<DataTagged*>(tempRight.m_data.get());
2187 EsysAssert((rightC!=0), "Programming error - casting to DataTagged.");
2188 escript::binaryOp(*leftC,*rightC,operation);
2189 } else {
2190 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2191 EsysAssert((rightC!=0), "Programming error - casting to DataConstant.");
2192 escript::binaryOp(*leftC,*rightC,operation);
2193 }
2194 } else if (isConstant()) {
2195 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2196 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2197 EsysAssert((leftC!=0 && rightC!=0), "Programming error - casting to DataConstant.");
2198 escript::binaryOp(*leftC,*rightC,operation);
2199 }
2200 }
2201
2202 /**
2203 \brief
2204 Perform the given Data object reduction algorithm on this and return the result.
2205 Given operation combines each element of each data point, thus argument
2206 object (*this) is a rank n Data object, and returned object is a scalar.
2207 Calls escript::algorithm.
2208 */
2209 template <class BinaryFunction>
2210 inline
2211 double
2212 Data::algorithm(BinaryFunction operation, double initial_value) const
2213 {
2214 if (isExpanded()) {
2215 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2216 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2217 return escript::algorithm(*leftC,operation,initial_value);
2218 } else if (isTagged()) {
2219 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2220 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2221 return escript::algorithm(*leftC,operation,initial_value);
2222 } else if (isConstant()) {
2223 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2224 EsysAssert((leftC!=0), "Programming error - casting to DataConstant.");
2225 return escript::algorithm(*leftC,operation,initial_value);
2226 } else if (isEmpty()) {
2227 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2228 } else if (isLazy()) {
2229 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2230 } else {
2231 throw DataException("Error - Data encapsulates an unknown type.");
2232 }
2233 }
2234
2235 /**
2236 \brief
2237 Perform the given data point reduction algorithm on data and return the result.
2238 Given operation combines each element within each data point into a scalar,
2239 thus argument object is a rank n Data object, and returned object is a
2240 rank 0 Data object.
2241 Calls escript::dp_algorithm.
2242 */
2243 template <class BinaryFunction>
2244 inline
2245 Data
2246 Data::dp_algorithm(BinaryFunction operation, double initial_value) const
2247 {
2248 if (isEmpty()) {
2249 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2250 }
2251 else if (isExpanded()) {
2252 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2253 DataExpanded* dataE=dynamic_cast<DataExpanded*>(m_data.get());
2254 DataExpanded* resultE=dynamic_cast<DataExpanded*>(result.m_data.get());
2255 EsysAssert((dataE!=0), "Programming error - casting data to DataExpanded.");
2256 EsysAssert((resultE!=0), "Programming error - casting result to DataExpanded.");
2257 escript::dp_algorithm(*dataE,*resultE,operation,initial_value);
2258 return result;
2259 }
2260 else if (isTagged()) {
2261 DataTagged* dataT=dynamic_cast<DataTagged*>(m_data.get());
2262 EsysAssert((dataT!=0), "Programming error - casting data to DataTagged.");
2263 DataTypes::ValueType defval(1);
2264 defval[0]=0;
2265 DataTagged* resultT=new DataTagged(getFunctionSpace(), DataTypes::scalarShape, defval, dataT);
2266 escript::dp_algorithm(*dataT,*resultT,operation,initial_value);
2267 return Data(resultT); // note: the Data object now owns the resultT pointer
2268 }
2269 else if (isConstant()) {
2270 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2271 DataConstant* dataC=dynamic_cast<DataConstant*>(m_data.get());
2272 DataConstant* resultC=dynamic_cast<DataConstant*>(result.m_data.get());
2273 EsysAssert((dataC!=0), "Programming error - casting data to DataConstant.");
2274 EsysAssert((resultC!=0), "Programming error - casting result to DataConstant.");
2275 escript::dp_algorithm(*dataC,*resultC,operation,initial_value);
2276 return result;
2277 } else if (isLazy()) {
2278 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2279 } else {
2280 throw DataException("Error - Data encapsulates an unknown type.");
2281 }
2282 }
2283
2284 /**
2285 \brief
2286 Compute a tensor operation with two Data objects
2287 \param arg_0 - Input - Data object
2288 \param arg_1 - Input - Data object
2289 \param operation - Input - Binary op functor
2290 */
2291 template <typename BinaryFunction>
2292 inline
2293 Data
2294 C_TensorBinaryOperation(Data const &arg_0,
2295 Data const &arg_1,
2296 BinaryFunction operation)
2297 {
2298 if (arg_0.isEmpty() || arg_1.isEmpty())
2299 {
2300 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2301 }
2302 if (arg_0.isLazy() || arg_1.isLazy())
2303 {
2304 throw DataException("Error - Operations not permitted on lazy data.");
2305 }
2306 // Interpolate if necessary and find an appropriate function space
2307 Data arg_0_Z, arg_1_Z;
2308 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2309 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2310 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2311 arg_1_Z = Data(arg_1);
2312 }
2313 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2314 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2315 arg_0_Z =Data(arg_0);
2316 }
2317 else {
2318 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible function spaces.");
2319 }
2320 } else {
2321 arg_0_Z = Data(arg_0);
2322 arg_1_Z = Data(arg_1);
2323 }
2324 // Get rank and shape of inputs
2325 int rank0 = arg_0_Z.getDataPointRank();
2326 int rank1 = arg_1_Z.getDataPointRank();
2327 DataTypes::ShapeType shape0 = arg_0_Z.getDataPointShape();
2328 DataTypes::ShapeType shape1 = arg_1_Z.getDataPointShape();
2329 int size0 = arg_0_Z.getDataPointSize();
2330 int size1 = arg_1_Z.getDataPointSize();
2331 // Declare output Data object
2332 Data res;
2333
2334 if (shape0 == shape1) {
2335 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2336 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2337 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2338 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2339 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2340
2341 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2342 }
2343 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2344
2345 // Prepare the DataConstant input
2346 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2347
2348 // Borrow DataTagged input from Data object
2349 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2350
2351 // Prepare a DataTagged output 2
2352 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2353 res.tag();
2354 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2355
2356 // Prepare offset into DataConstant
2357 int offset_0 = tmp_0->getPointOffset(0,0);
2358 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2359
2360 // Get the pointers to the actual data
2361 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2362 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2363
2364 // Compute a result for the default
2365 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2366 // Compute a result for each tag
2367 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2368 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2369 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2370 tmp_2->addTag(i->first);
2371 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2372 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2373
2374 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2375 }
2376
2377 }
2378 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2379 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2380 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2381 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2382 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2383
2384 int sampleNo_1,dataPointNo_1;
2385 int numSamples_1 = arg_1_Z.getNumSamples();
2386 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2387 int offset_0 = tmp_0->getPointOffset(0,0);
2388 res.requireWrite();
2389 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2390 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2391 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2392 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2393 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2394 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2395 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2396 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2397 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2398 }
2399 }
2400
2401 }
2402 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2403 // Borrow DataTagged input from Data object
2404 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2405
2406 // Prepare the DataConstant input
2407 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2408
2409 // Prepare a DataTagged output 2
2410 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2411 res.tag();
2412 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2413
2414 // Prepare offset into DataConstant
2415 int offset_1 = tmp_1->getPointOffset(0,0);
2416
2417 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2418 // Get the pointers to the actual data
2419 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2420 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2421 // Compute a result for the default
2422 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2423 // Compute a result for each tag
2424 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2425 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2426 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2427 tmp_2->addTag(i->first);
2428 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2429 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2430 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2431 }
2432
2433 }
2434 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2435 // Borrow DataTagged input from Data object
2436 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2437
2438 // Borrow DataTagged input from Data object
2439 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2440
2441 // Prepare a DataTagged output 2
2442 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2443 res.tag(); // DataTagged output
2444 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2445
2446 // Get the pointers to the actual data
2447 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2448 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2449 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2450
2451 // Compute a result for the default
2452 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2453 // Merge the tags
2454 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2455 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2456 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2457 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2458 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2459 }
2460 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2461 tmp_2->addTag(i->first);
2462 }
2463 // Compute a result for each tag
2464 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2465 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2466
2467 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2468 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2469 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2470
2471 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2472 }
2473
2474 }
2475 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2476 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2477 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2478 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2479 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2480 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2481
2482 int sampleNo_0,dataPointNo_0;
2483 int numSamples_0 = arg_0_Z.getNumSamples();
2484 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2485 res.requireWrite();
2486 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2487 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2488 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2489 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2490 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2491 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2492 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2493 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2494 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2495 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2496 }
2497 }
2498
2499 }
2500 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2501 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2502 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2503 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2504 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2505
2506 int sampleNo_0,dataPointNo_0;
2507 int numSamples_0 = arg_0_Z.getNumSamples();
2508 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2509 int offset_1 = tmp_1->getPointOffset(0,0);
2510 res.requireWrite();
2511 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2512 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2513 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2514 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2515 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2516
2517 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2518 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2519 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2520
2521
2522 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2523 }
2524 }
2525
2526 }
2527 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2528 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2529 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2530 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2531 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2532 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2533
2534 int sampleNo_0,dataPointNo_0;
2535 int numSamples_0 = arg_0_Z.getNumSamples();
2536 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2537 res.requireWrite();
2538 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2539 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2540 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2541 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2542 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2543 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2544 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2545 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2546 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2547 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2548 }
2549 }
2550
2551 }
2552 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2553 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2554 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2555 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2556 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2557 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2558
2559 int sampleNo_0,dataPointNo_0;
2560 int numSamples_0 = arg_0_Z.getNumSamples();
2561 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2562 res.requireWrite();
2563 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2564 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2565 dataPointNo_0=0;
2566 // for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2567 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2568 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2569 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2570 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2571 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2572 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2573 tensor_binary_operation(size0*numDataPointsPerSample_0, ptr_0, ptr_1, ptr_2, operation);
2574 // }
2575 }
2576
2577 }
2578 else {
2579 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2580 }
2581
2582 } else if (0 == rank0) {
2583 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2584 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataConstant output
2585 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2586 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2587 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2588 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2589 }
2590 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2591
2592 // Prepare the DataConstant input
2593 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2594
2595 // Borrow DataTagged input from Data object
2596 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2597
2598 // Prepare a DataTagged output 2
2599 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataTagged output
2600 res.tag();
2601 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2602
2603 // Prepare offset into DataConstant
2604 int offset_0 = tmp_0->getPointOffset(0,0);
2605 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2606
2607 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2608 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2609
2610 // Compute a result for the default
2611 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2612 // Compute a result for each tag
2613 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2614 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2615 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2616 tmp_2->addTag(i->first);
2617 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2618 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2619 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2620 }
2621
2622 }
2623 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2624
2625 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2626 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2627 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2628 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2629
2630 int sampleNo_1;
2631 int numSamples_1 = arg_1_Z.getNumSamples();
2632 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2633 int offset_0 = tmp_0->getPointOffset(0,0);
2634 const double *ptr_src = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2635 double ptr_0 = ptr_src[0];
2636 int size = size1*numDataPointsPerSample_1;
2637 res.requireWrite();
2638 #pragma omp parallel for private(sampleNo_1) schedule(static)
2639 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2640 // for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2641 int offset_1 = tmp_1->getPointOffset(sampleNo_1,0);
2642 int offset_2 = tmp_2->getPointOffset(sampleNo_1,0);
2643 // const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2644 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2645 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2646 tensor_binary_operation(size, ptr_0, ptr_1, ptr_2, operation);
2647
2648 // }
2649 }
2650
2651 }
2652 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2653
2654 // Borrow DataTagged input from Data object
2655 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2656
2657 // Prepare the DataConstant input
2658 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2659
2660 // Prepare a DataTagged output 2
2661 res = Data(0.0, shape1, arg_0_Z.getFunctionSpace()); // DataTagged output
2662 res.tag();
2663 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2664
2665 // Prepare offset into DataConstant
2666 int offset_1 = tmp_1->getPointOffset(0,0);
2667 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2668
2669 // Get the pointers to the actual data
2670 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2671 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2672
2673
2674 // Compute a result for the default
2675 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2676 // Compute a result for each tag
2677 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2678 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2679 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2680 tmp_2->addTag(i->first);
2681 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2682 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2683
2684 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2685 }
2686
2687 }
2688 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2689
2690 // Borrow DataTagged input from Data object
2691 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2692
2693 // Borrow DataTagged input from Data object
2694 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2695
2696 // Prepare a DataTagged output 2
2697 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());
2698 res.tag(); // DataTagged output
2699 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2700
2701 // Get the pointers to the actual data
2702 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2703 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2704 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2705
2706 // Compute a result for the default
2707 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2708 // Merge the tags
2709 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2710 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2711 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2712 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2713 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2714 }
2715 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2716 tmp_2->addTag(i->first);
2717 }
2718 // Compute a result for each tag
2719 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2720 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2721 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2722 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2723 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2724
2725 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2726 }
2727
2728 }
2729 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2730
2731 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2732 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2733 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2734 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2735 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2736
2737 int sampleNo_0,dataPointNo_0;
2738 int numSamples_0 = arg_0_Z.getNumSamples();
2739 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2740 res.requireWrite();
2741 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2742 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2743 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2744 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2745 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2746 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2747 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2748 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2749 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2750 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2751 }
2752 }
2753
2754 }
2755 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2756 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2757 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2758 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2759 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2760
2761 int sampleNo_0,dataPointNo_0;
2762 int numSamples_0 = arg_0_Z.getNumSamples();
2763 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2764 int offset_1 = tmp_1->getPointOffset(0,0);
2765 res.requireWrite();
2766 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2767 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2768 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2769 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2770 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2771 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2772 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2773 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2774 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2775 }
2776 }
2777
2778
2779 }
2780 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2781
2782 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2783 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2784 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2785 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2786 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2787
2788 int sampleNo_0,dataPointNo_0;
2789 int numSamples_0 = arg_0_Z.getNumSamples();
2790 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2791 res.requireWrite();
2792 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2793 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2794 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2795 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2796 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2797 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2798 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2799 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2800 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2801 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2802 }
2803 }
2804
2805 }
2806 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2807
2808 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2809 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2810 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2811 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2812 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2813
2814 int sampleNo_0,dataPointNo_0;
2815 int numSamples_0 = arg_0_Z.getNumSamples();
2816 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2817 res.requireWrite();
2818 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2819 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2820 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2821 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2822 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2823 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2824 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2825 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2826 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2827 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2828 }
2829 }
2830
2831 }
2832 else {
2833 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2834 }
2835
2836 } else if (0 == rank1) {
2837 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2838 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2839 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2840 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2841 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2842 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2843 }
2844 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2845
2846 // Prepare the DataConstant input
2847 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2848
2849 // Borrow DataTagged input from Data object
2850 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2851
2852 // Prepare a DataTagged output 2
2853 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2854 res.tag();
2855 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2856
2857 // Prepare offset into DataConstant
2858 int offset_0 = tmp_0->getPointOffset(0,0);
2859 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2860
2861 //Get the pointers to the actual data
2862 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2863 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2864
2865 // Compute a result for the default
2866 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2867 // Compute a result for each tag
2868 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2869 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2870 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2871 tmp_2->addTag(i->first);
2872 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2873 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2874 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2875 }
2876 }
2877 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2878
2879 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2880 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2881 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2882 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2883
2884 int sampleNo_1,dataPointNo_1;
2885 int numSamples_1 = arg_1_Z.getNumSamples();
2886 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2887 int offset_0 = tmp_0->getPointOffset(0,0);
2888 res.requireWrite();
2889 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2890 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2891 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2892 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2893 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2894 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2895 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2896 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2897 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2898 }
2899 }
2900
2901 }
2902 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2903
2904 // Borrow DataTagged input from Data object
2905 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2906
2907 // Prepare the DataConstant input
2908 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2909
2910 // Prepare a DataTagged output 2
2911 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2912 res.tag();
2913 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2914
2915 // Prepare offset into DataConstant
2916 int offset_1 = tmp_1->getPointOffset(0,0);
2917 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2918 // Get the pointers to the actual data
2919 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2920 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2921 // Compute a result for the default
2922 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2923 // Compute a result for each tag
2924 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2925 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2926 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2927 tmp_2->addTag(i->first);
2928 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2929 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2930 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2931 }
2932
2933 }
2934 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2935
2936 // Borrow DataTagged input from Data object
2937 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2938
2939 // Borrow DataTagged input from Data object
2940 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2941
2942 // Prepare a DataTagged output 2
2943 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2944 res.tag(); // DataTagged output
2945 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2946
2947 // Get the pointers to the actual data
2948 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2949 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2950 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2951
2952 // Compute a result for the default
2953 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2954 // Merge the tags
2955 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2956 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2957 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2958 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2959 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2960 }
2961 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2962 tmp_2->addTag(i->first);
2963 }
2964 // Compute a result for each tag
2965 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2966 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2967 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2968 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2969 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2970 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2971 }
2972
2973 }
2974 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2975
2976 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2977 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2978 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2979 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2980 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2981
2982 int sampleNo_0,dataPointNo_0;
2983 int numSamples_0 = arg_0_Z.getNumSamples();
2984 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2985 res.requireWrite();
2986 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2987 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2988 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2989 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2990 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2991 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2992 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2993 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2994 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2995 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2996 }
2997 }
2998
2999 }
3000 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
3001 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3002 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3003 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
3004 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3005
3006 int sampleNo_0;
3007 int numSamples_0 = arg_0_Z.getNumSamples();
3008 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3009 int offset_1 = tmp_1->getPointOffset(0,0);
3010 const double *ptr_src = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3011 double ptr_1 = ptr_src[0];
3012 int size = size0 * numDataPointsPerSample_0;
3013 res.requireWrite();
3014 #pragma omp parallel for private(sampleNo_0) schedule(static)
3015 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3016 // for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3017 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0);
3018 int offset_2 = tmp_2->getPointOffset(sampleNo_0,0);
3019 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3020 // const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3021 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3022 tensor_binary_operation(size, ptr_0, ptr_1, ptr_2, operation);
3023 // }
3024 }
3025
3026
3027 }
3028 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
3029
3030 // After finding a common function space above the two inputs have the same numSamples and num DPPS
3031 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3032 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3033 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
3034 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3035
3036 int sampleNo_0,dataPointNo_0;
3037 int numSamples_0 = arg_0_Z.getNumSamples();
3038 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3039 res.requireWrite();
3040 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3041 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3042 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
3043 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3044 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3045 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
3046 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3047 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3048 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3049 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3050 }
3051 }
3052
3053 }
3054 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
3055
3056 // After finding a common function space above the two inputs have the same numSamples and num DPPS
3057 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3058 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3059 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
3060 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3061
3062 int sampleNo_0,dataPointNo_0;
3063 int numSamples_0 = arg_0_Z.getNumSamples();
3064 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3065 res.requireWrite();
3066 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3067 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3068 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3069 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
3070 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
3071 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3072 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3073 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3074 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3075 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3076 }
3077 }
3078
3079 }
3080 else {
3081 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
3082 }
3083
3084 } else {
3085 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible shapes");
3086 }
3087
3088 return res;
3089 }
3090
3091 template <typename UnaryFunction>
3092 Data
3093 C_TensorUnaryOperation(Data const &arg_0,
3094 UnaryFunction operation)
3095 {
3096 if (arg_0.isEmpty()) // do this before we attempt to interpolate
3097 {
3098 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
3099 }
3100 if (arg_0.isLazy())
3101 {
3102 throw DataException("Error - Operations not permitted on lazy data.");
3103 }
3104 // Interpolate if necessary and find an appropriate function space
3105 Data arg_0_Z = Data(arg_0);
3106
3107 // Get rank and shape of inputs
3108 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
3109 int size0 = arg_0_Z.getDataPointSize();
3110
3111 // Declare output Data object
3112 Data res;
3113
3114 if (arg_0_Z.isConstant()) {
3115 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataConstant output
3116 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
3117 double *ptr_2 = &(res.getDataAtOffsetRW(0));
3118 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
3119 }
3120 else if (arg_0_Z.isTagged()) {
3121
3122 // Borrow DataTagged input from Data object
3123 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
3124
3125 // Prepare a DataTagged output 2
3126 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
3127 res.tag();
3128 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
3129
3130 // Get the pointers to the actual data
3131 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
3132 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
3133 // Compute a result for the default
3134 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
3135 // Compute a result for each tag
3136 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
3137 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
3138 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
3139 tmp_2->addTag(i->first);
3140 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
3141 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
3142 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
3143 }
3144
3145 }
3146 else if (arg_0_Z.isExpanded()) {
3147
3148 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace(),true); // DataExpanded output
3149 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3150 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3151
3152 int sampleNo_0,dataPointNo_0;
3153 int numSamples_0 = arg_0_Z.getNumSamples();
3154 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3155 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3156 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3157 dataPointNo_0=0;
3158 // for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3159 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
3160 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3161 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3162 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3163 tensor_unary_operation(size0*numDataPointsPerSample_0, ptr_0, ptr_2, operation);
3164 // }
3165 }
3166 }
3167 else {
3168 throw DataException("Error - C_TensorUnaryOperation: unknown combination of inputs");
3169 }
3170
3171 return res;
3172 }
3173
3174 }
3175 #endif

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26