/[escript]/trunk/escript/src/Data.h
ViewVC logotype

Contents of /trunk/escript/src/Data.h

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3506 - (show annotations)
Wed May 11 01:59:45 2011 UTC (8 years, 6 months ago) by jfenwick
File MIME type: text/plain
File size: 95549 byte(s)
Use VSL if MKLRANDOM is defined.
Use different seeds for different ranks/threads.
Use different seeds for successive calls with no seed given.

1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2010 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 /** \file Data.h */
16
17 #ifndef DATA_H
18 #define DATA_H
19 #include "system_dep.h"
20
21 #include "DataTypes.h"
22 #include "DataAbstract.h"
23 #include "DataAlgorithm.h"
24 #include "FunctionSpace.h"
25 #include "BinaryOp.h"
26 #include "UnaryOp.h"
27 #include "DataException.h"
28
29
30
31 extern "C" {
32 #include "DataC.h"
33 //#include <omp.h>
34 }
35
36 #ifdef _OPENMP
37 #include <omp.h>
38 #endif
39
40 #include "esysUtils/Esys_MPI.h"
41 #include <string>
42 #include <algorithm>
43 #include <sstream>
44
45 #include <boost/shared_ptr.hpp>
46 #include <boost/python/object.hpp>
47 #include <boost/python/tuple.hpp>
48
49 namespace escript {
50
51 //
52 // Forward declaration for various implementations of Data.
53 class DataConstant;
54 class DataTagged;
55 class DataExpanded;
56 class DataLazy;
57
58 /**
59 \brief
60 Data represents a collection of datapoints.
61
62 Description:
63 Internally, the datapoints are actually stored by a DataAbstract object.
64 The specific instance of DataAbstract used may vary over the lifetime
65 of the Data object.
66 Some methods on this class return references (eg getShape()).
67 These references should not be used after an operation which changes the underlying DataAbstract object.
68 Doing so will lead to invalid memory access.
69 This should not affect any methods exposed via boost::python.
70 */
71 class Data {
72
73 public:
74
75 // These typedefs allow function names to be cast to pointers
76 // to functions of the appropriate type when calling unaryOp etc.
77 typedef double (*UnaryDFunPtr)(double);
78 typedef double (*BinaryDFunPtr)(double,double);
79
80
81 /**
82 Constructors.
83 */
84
85 /**
86 \brief
87 Default constructor.
88 Creates a DataEmpty object.
89 */
90 ESCRIPT_DLL_API
91 Data();
92
93 /**
94 \brief
95 Copy constructor.
96 WARNING: Only performs a shallow copy.
97 */
98 ESCRIPT_DLL_API
99 Data(const Data& inData);
100
101 /**
102 \brief
103 Constructor from another Data object. If "what" is different from the
104 function space of inData the inData are tried to be interpolated to what,
105 otherwise a shallow copy of inData is returned.
106 */
107 ESCRIPT_DLL_API
108 Data(const Data& inData,
109 const FunctionSpace& what);
110
111 /**
112 \brief Copy Data from an existing vector
113 */
114
115 ESCRIPT_DLL_API
116 Data(const DataTypes::ValueType& value,
117 const DataTypes::ShapeType& shape,
118 const FunctionSpace& what=FunctionSpace(),
119 bool expanded=false);
120
121 /**
122 \brief
123 Constructor which creates a Data with points having the specified shape.
124
125 \param value - Input - Single value applied to all Data.
126 \param dataPointShape - Input - The shape of each data point.
127 \param what - Input - A description of what this data represents.
128 \param expanded - Input - Flag, if true fill the entire container with
129 the given value. Otherwise a more efficient storage
130 mechanism will be used.
131 */
132 ESCRIPT_DLL_API
133 Data(double value,
134 const DataTypes::ShapeType& dataPointShape=DataTypes::ShapeType(),
135 const FunctionSpace& what=FunctionSpace(),
136 bool expanded=false);
137
138 /**
139 \brief
140 Constructor which performs a deep copy of a region from another Data object.
141
142 \param inData - Input - Input Data object.
143 \param region - Input - Region to copy.
144 */
145 ESCRIPT_DLL_API
146 Data(const Data& inData,
147 const DataTypes::RegionType& region);
148
149 /**
150 \brief
151 Constructor which copies data from any object that can be treated like a python array/sequence.
152
153 \param value - Input - Input data.
154 \param what - Input - A description of what this data represents.
155 \param expanded - Input - Flag, if true fill the entire container with
156 the value. Otherwise a more efficient storage
157 mechanism will be used.
158 */
159 ESCRIPT_DLL_API
160 Data(const boost::python::object& value,
161 const FunctionSpace& what=FunctionSpace(),
162 bool expanded=false);
163
164 /**
165 \brief
166 Constructor which copies data from a wrapped array.
167
168 \param value - Input - Input data.
169 \param what - Input - A description of what this data represents.
170 \param expanded - Input - Flag, if true fill the entire container with
171 the value. Otherwise a more efficient storage
172 mechanism will be used.
173 */
174 ESCRIPT_DLL_API
175 Data(const WrappedArray& w, const FunctionSpace& what,
176 bool expanded=false);
177
178
179 /**
180 \brief
181 Constructor which creates a DataConstant.
182 Copies data from any object that can be treated like a python array/sequence.
183 All other parameters are copied from other.
184
185 \param value - Input - Input data.
186 \param other - Input - contains all other parameters.
187 */
188 ESCRIPT_DLL_API
189 Data(const boost::python::object& value,
190 const Data& other);
191
192 /**
193 \brief
194 Constructor which creates a DataConstant of "shape" with constant value.
195 */
196 ESCRIPT_DLL_API
197 Data(double value,
198 const boost::python::tuple& shape=boost::python::make_tuple(),
199 const FunctionSpace& what=FunctionSpace(),
200 bool expanded=false);
201
202 /**
203 \brief Create a Data using an existing DataAbstract. Warning: The new object assumes ownership of the pointer!
204 Once you have passed the pointer, do not delete it.
205 */
206 ESCRIPT_DLL_API
207 explicit Data(DataAbstract* underlyingdata);
208
209 /**
210 \brief Create a Data based on the supplied DataAbstract
211 */
212 ESCRIPT_DLL_API
213 explicit Data(DataAbstract_ptr underlyingdata);
214
215 /**
216 \brief
217 Destructor
218 */
219 ESCRIPT_DLL_API
220 ~Data();
221
222 /**
223 \brief Make this object a deep copy of "other".
224 */
225 ESCRIPT_DLL_API
226 void
227 copy(const Data& other);
228
229 /**
230 \brief Return a pointer to a deep copy of this object.
231 */
232 ESCRIPT_DLL_API
233 Data
234 copySelf();
235
236
237 /**
238 \brief produce a delayed evaluation version of this Data.
239 */
240 ESCRIPT_DLL_API
241 Data
242 delay();
243
244 /**
245 \brief convert the current data into lazy data.
246 */
247 ESCRIPT_DLL_API
248 void
249 delaySelf();
250
251
252 /**
253 Member access methods.
254 */
255
256 /**
257 \brief
258 switches on update protection
259
260 */
261 ESCRIPT_DLL_API
262 void
263 setProtection();
264
265 /**
266 \brief
267 Returns true, if the data object is protected against update
268
269 */
270 ESCRIPT_DLL_API
271 bool
272 isProtected() const;
273
274
275 /**
276 \brief
277 Return the value of a data point as a python tuple.
278 */
279 ESCRIPT_DLL_API
280 const boost::python::object
281 getValueOfDataPointAsTuple(int dataPointNo);
282
283 /**
284 \brief
285 sets the values of a data-point from a python object on this process
286 */
287 ESCRIPT_DLL_API
288 void
289 setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object);
290
291 /**
292 \brief
293 sets the values of a data-point from a array-like object on this process
294 */
295 ESCRIPT_DLL_API
296 void
297 setValueOfDataPointToArray(int dataPointNo, const boost::python::object&);
298
299 /**
300 \brief
301 sets the values of a data-point on this process
302 */
303 ESCRIPT_DLL_API
304 void
305 setValueOfDataPoint(int dataPointNo, const double);
306
307 /**
308 \brief Return a data point across all processors as a python tuple.
309 */
310 ESCRIPT_DLL_API
311 const boost::python::object
312 getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo);
313
314 /**
315 \brief
316 Return the tag number associated with the given data-point.
317
318 */
319 ESCRIPT_DLL_API
320 int
321 getTagNumber(int dpno);
322
323 /**
324 \brief
325 Return the C wrapper for the Data object.
326 */
327 ESCRIPT_DLL_API
328 escriptDataC
329 getDataC();
330
331
332
333 /**
334 \brief
335 Return the C wrapper for the Data object - const version.
336 */
337 ESCRIPT_DLL_API
338 escriptDataC
339 getDataC() const;
340
341
342 /**
343 \brief
344 Write the data as a string. For large amounts of data, a summary is printed.
345 */
346 ESCRIPT_DLL_API
347 std::string
348 toString() const;
349
350 /**
351 \brief
352 Whatever the current Data type make this into a DataExpanded.
353 */
354 ESCRIPT_DLL_API
355 void
356 expand();
357
358 /**
359 \brief
360 If possible convert this Data to DataTagged. This will only allow
361 Constant data to be converted to tagged. An attempt to convert
362 Expanded data to tagged will throw an exception.
363 */
364 ESCRIPT_DLL_API
365 void
366 tag();
367
368 /**
369 \brief If this data is lazy, then convert it to ready data.
370 What type of ready data depends on the expression. For example, Constant+Tagged==Tagged.
371 */
372 ESCRIPT_DLL_API
373 void
374 resolve();
375
376
377 /**
378 \brief Ensures data is ready for write access.
379 This means that the data will be resolved if lazy and will be copied if shared with another Data object.
380 \warning This method should only be called in single threaded sections of code. (It modifies m_data).
381 Do not create any Data objects from this one between calling requireWrite and getSampleDataRW.
382 Doing so might introduce additional sharing.
383 */
384 ESCRIPT_DLL_API
385 void
386 requireWrite();
387
388 /**
389 \brief
390 Return true if this Data is expanded.
391 \note To determine if a sample will contain separate values for each datapoint. Use actsExpanded instead.
392 */
393 ESCRIPT_DLL_API
394 bool
395 isExpanded() const;
396
397 /**
398 \brief
399 Return true if this Data is expanded or resolves to expanded.
400 That is, if it has a separate value for each datapoint in the sample.
401 */
402 ESCRIPT_DLL_API
403 bool
404 actsExpanded() const;
405
406
407 /**
408 \brief
409 Return true if this Data is tagged.
410 */
411 ESCRIPT_DLL_API
412 bool
413 isTagged() const;
414
415 /**
416 \brief
417 Return true if this Data is constant.
418 */
419 ESCRIPT_DLL_API
420 bool
421 isConstant() const;
422
423 /**
424 \brief Return true if this Data is lazy.
425 */
426 ESCRIPT_DLL_API
427 bool
428 isLazy() const;
429
430 /**
431 \brief Return true if this data is ready.
432 */
433 ESCRIPT_DLL_API
434 bool
435 isReady() const;
436
437 /**
438 \brief
439 Return true if this Data holds an instance of DataEmpty. This is _not_ the same as asking if the object
440 contains datapoints.
441 */
442 ESCRIPT_DLL_API
443 bool
444 isEmpty() const;
445
446 /**
447 \brief
448 Return the function space.
449 */
450 ESCRIPT_DLL_API
451 inline
452 const FunctionSpace&
453 getFunctionSpace() const
454 {
455 return m_data->getFunctionSpace();
456 }
457
458 /**
459 \brief
460 Return a copy of the function space.
461 */
462 ESCRIPT_DLL_API
463 const FunctionSpace
464 getCopyOfFunctionSpace() const;
465
466 /**
467 \brief
468 Return the domain.
469 */
470 ESCRIPT_DLL_API
471 inline
472 // const AbstractDomain&
473 const_Domain_ptr
474 getDomain() const
475 {
476 return getFunctionSpace().getDomain();
477 }
478
479
480 /**
481 \brief
482 Return the domain.
483 TODO: For internal use only. This should be removed.
484 */
485 ESCRIPT_DLL_API
486 inline
487 // const AbstractDomain&
488 Domain_ptr
489 getDomainPython() const
490 {
491 return getFunctionSpace().getDomainPython();
492 }
493
494 /**
495 \brief
496 Return a copy of the domain.
497 */
498 ESCRIPT_DLL_API
499 const AbstractDomain
500 getCopyOfDomain() const;
501
502 /**
503 \brief
504 Return the rank of the point data.
505 */
506 ESCRIPT_DLL_API
507 inline
508 unsigned int
509 getDataPointRank() const
510 {
511 return m_data->getRank();
512 }
513
514 /**
515 \brief
516 Return the number of data points
517 */
518 ESCRIPT_DLL_API
519 inline
520 int
521 getNumDataPoints() const
522 {
523 return getNumSamples() * getNumDataPointsPerSample();
524 }
525 /**
526 \brief
527 Return the number of samples.
528 */
529 ESCRIPT_DLL_API
530 inline
531 int
532 getNumSamples() const
533 {
534 return m_data->getNumSamples();
535 }
536
537 /**
538 \brief
539 Return the number of data points per sample.
540 */
541 ESCRIPT_DLL_API
542 inline
543 int
544 getNumDataPointsPerSample() const
545 {
546 return m_data->getNumDPPSample();
547 }
548
549
550 /**
551 \brief
552 Return the number of values in the shape for this object.
553 */
554 ESCRIPT_DLL_API
555 int
556 getNoValues() const
557 {
558 return m_data->getNoValues();
559 }
560
561
562 /**
563 \brief
564 dumps the object into a netCDF file
565 */
566 ESCRIPT_DLL_API
567 void
568 dump(const std::string fileName) const;
569
570 /**
571 \brief returns the values of the object as a list of tuples (one for each datapoint).
572
573 \param scalarastuple If true, scalar data will produce single valued tuples [(1,) (2,) ...]
574 If false, the result is a list of scalars [1, 2, ...]
575 */
576 ESCRIPT_DLL_API
577 const boost::python::object
578 toListOfTuples(bool scalarastuple=true);
579
580
581 /**
582 \brief
583 Return the sample data for the given sample no.
584 Please do not use this unless you NEED to access samples individually
585 \param sampleNo - Input - the given sample no.
586 \return pointer to the sample data.
587 */
588 ESCRIPT_DLL_API
589 inline
590 const DataAbstract::ValueType::value_type*
591 getSampleDataRO(DataAbstract::ValueType::size_type sampleNo);
592
593
594 /**
595 \brief
596 Return the sample data for the given sample no.
597 Please do not use this unless you NEED to access samples individually
598 \param sampleNo - Input - the given sample no.
599 \return pointer to the sample data.
600 */
601 ESCRIPT_DLL_API
602 inline
603 DataAbstract::ValueType::value_type*
604 getSampleDataRW(DataAbstract::ValueType::size_type sampleNo);
605
606
607 /**
608 \brief
609 Return the sample data for the given tag. If an attempt is made to
610 access data that isn't tagged an exception will be thrown.
611 \param tag - Input - the tag key.
612 */
613 ESCRIPT_DLL_API
614 inline
615 DataAbstract::ValueType::value_type*
616 getSampleDataByTag(int tag)
617 {
618 return m_data->getSampleDataByTag(tag);
619 }
620
621 /**
622 \brief
623 Return a reference into the DataVector which points to the specified data point.
624 \param sampleNo - Input -
625 \param dataPointNo - Input -
626 */
627 ESCRIPT_DLL_API
628 DataTypes::ValueType::const_reference
629 getDataPointRO(int sampleNo, int dataPointNo);
630
631 /**
632 \brief
633 Return a reference into the DataVector which points to the specified data point.
634 \param sampleNo - Input -
635 \param dataPointNo - Input -
636 */
637 ESCRIPT_DLL_API
638 DataTypes::ValueType::reference
639 getDataPointRW(int sampleNo, int dataPointNo);
640
641
642
643 /**
644 \brief
645 Return the offset for the given sample and point within the sample
646 */
647 ESCRIPT_DLL_API
648 inline
649 DataTypes::ValueType::size_type
650 getDataOffset(int sampleNo,
651 int dataPointNo)
652 {
653 return m_data->getPointOffset(sampleNo,dataPointNo);
654 }
655
656 /**
657 \brief
658 Return a reference to the data point shape.
659 */
660 ESCRIPT_DLL_API
661 inline
662 const DataTypes::ShapeType&
663 getDataPointShape() const
664 {
665 return m_data->getShape();
666 }
667
668 /**
669 \brief
670 Return the data point shape as a tuple of integers.
671 */
672 ESCRIPT_DLL_API
673 const boost::python::tuple
674 getShapeTuple() const;
675
676 /**
677 \brief
678 Return the size of the data point. It is the product of the
679 data point shape dimensions.
680 */
681 ESCRIPT_DLL_API
682 int
683 getDataPointSize() const;
684
685 /**
686 \brief
687 Return the number of doubles stored for this Data.
688 */
689 ESCRIPT_DLL_API
690 DataTypes::ValueType::size_type
691 getLength() const;
692
693 /**
694 \brief Return true if this object contains no samples.
695 This is not the same as isEmpty()
696 */
697 ESCRIPT_DLL_API
698 bool
699 hasNoSamples() const
700 {
701 return getLength()==0;
702 }
703
704 /**
705 \brief
706 Assign the given value to the tag assocciated with name. Implicitly converts this
707 object to type DataTagged. Throws an exception if this object
708 cannot be converted to a DataTagged object or name cannot be mapped onto a tag key.
709 \param name - Input - name of tag.
710 \param value - Input - Value to associate with given key.
711 */
712 ESCRIPT_DLL_API
713 void
714 setTaggedValueByName(std::string name,
715 const boost::python::object& value);
716
717 /**
718 \brief
719 Assign the given value to the tag. Implicitly converts this
720 object to type DataTagged if it is constant.
721
722 \param tagKey - Input - Integer key.
723 \param value - Input - Value to associate with given key.
724 ==>*
725 */
726 ESCRIPT_DLL_API
727 void
728 setTaggedValue(int tagKey,
729 const boost::python::object& value);
730
731 /**
732 \brief
733 Assign the given value to the tag. Implicitly converts this
734 object to type DataTagged if it is constant.
735
736 \param tagKey - Input - Integer key.
737 \param pointshape - Input - The shape of the value parameter
738 \param value - Input - Value to associate with given key.
739 \param dataOffset - Input - Offset of the begining of the point within the value parameter
740 */
741 ESCRIPT_DLL_API
742 void
743 setTaggedValueFromCPP(int tagKey,
744 const DataTypes::ShapeType& pointshape,
745 const DataTypes::ValueType& value,
746 int dataOffset=0);
747
748
749
750 /**
751 \brief
752 Copy other Data object into this Data object where mask is positive.
753 */
754 ESCRIPT_DLL_API
755 void
756 copyWithMask(const Data& other,
757 const Data& mask);
758
759 /**
760 Data object operation methods and operators.
761 */
762
763 /**
764 \brief
765 set all values to zero
766 *
767 */
768 ESCRIPT_DLL_API
769 void
770 setToZero();
771
772 /**
773 \brief
774 Interpolates this onto the given functionspace and returns
775 the result as a Data object.
776 *
777 */
778 ESCRIPT_DLL_API
779 Data
780 interpolate(const FunctionSpace& functionspace) const;
781
782 ESCRIPT_DLL_API
783 Data
784 interpolateFromTable3D(const WrappedArray& table, double Amin, double Astep,
785 double undef, Data& B, double Bmin, double Bstep, Data& C,
786 double Cmin, double Cstep, bool check_boundaries);
787
788 ESCRIPT_DLL_API
789 Data
790 interpolateFromTable2D(const WrappedArray& table, double Amin, double Astep,
791 double undef, Data& B, double Bmin, double Bstep,bool check_boundaries);
792
793 ESCRIPT_DLL_API
794 Data
795 interpolateFromTable1D(const WrappedArray& table, double Amin, double Astep,
796 double undef,bool check_boundaries);
797
798
799 ESCRIPT_DLL_API
800 Data
801 interpolateFromTable3DP(boost::python::object table, double Amin, double Astep,
802 Data& B, double Bmin, double Bstep, Data& C, double Cmin, double Cstep, double undef,bool check_boundaries);
803
804
805 ESCRIPT_DLL_API
806 Data
807 interpolateFromTable2DP(boost::python::object table, double Amin, double Astep,
808 Data& B, double Bmin, double Bstep, double undef,bool check_boundaries);
809
810 ESCRIPT_DLL_API
811 Data
812 interpolateFromTable1DP(boost::python::object table, double Amin, double Astep,
813 double undef,bool check_boundaries);
814
815 /**
816 \brief
817 Calculates the gradient of the data at the data points of functionspace.
818 If functionspace is not present the function space of Function(getDomain()) is used.
819 *
820 */
821 ESCRIPT_DLL_API
822 Data
823 gradOn(const FunctionSpace& functionspace) const;
824
825 ESCRIPT_DLL_API
826 Data
827 grad() const;
828
829 /**
830 \brief
831 Calculate the integral over the function space domain as a python tuple.
832 */
833 ESCRIPT_DLL_API
834 boost::python::object
835 integrateToTuple_const() const;
836
837
838 /**
839 \brief
840 Calculate the integral over the function space domain as a python tuple.
841 */
842 ESCRIPT_DLL_API
843 boost::python::object
844 integrateToTuple();
845
846
847
848 /**
849 \brief
850 Returns 1./ Data object
851 *
852 */
853 ESCRIPT_DLL_API
854 Data
855 oneOver() const;
856 /**
857 \brief
858 Return a Data with a 1 for +ive values and a 0 for 0 or -ive values.
859 *
860 */
861 ESCRIPT_DLL_API
862 Data
863 wherePositive() const;
864
865 /**
866 \brief
867 Return a Data with a 1 for -ive values and a 0 for +ive or 0 values.
868 *
869 */
870 ESCRIPT_DLL_API
871 Data
872 whereNegative() const;
873
874 /**
875 \brief
876 Return a Data with a 1 for +ive or 0 values and a 0 for -ive values.
877 *
878 */
879 ESCRIPT_DLL_API
880 Data
881 whereNonNegative() const;
882
883 /**
884 \brief
885 Return a Data with a 1 for -ive or 0 values and a 0 for +ive values.
886 *
887 */
888 ESCRIPT_DLL_API
889 Data
890 whereNonPositive() const;
891
892 /**
893 \brief
894 Return a Data with a 1 for 0 values and a 0 for +ive or -ive values.
895 *
896 */
897 ESCRIPT_DLL_API
898 Data
899 whereZero(double tol=0.0) const;
900
901 /**
902 \brief
903 Return a Data with a 0 for 0 values and a 1 for +ive or -ive values.
904 *
905 */
906 ESCRIPT_DLL_API
907 Data
908 whereNonZero(double tol=0.0) const;
909
910 /**
911 \brief
912 Return the maximum absolute value of this Data object.
913
914 The method is not const because lazy data needs to be expanded before Lsup can be computed.
915 The _const form can be used when the Data object is const, however this will only work for
916 Data which is not Lazy.
917
918 For Data which contain no samples (or tagged Data for which no tags in use have a value)
919 zero is returned.
920 */
921 ESCRIPT_DLL_API
922 double
923 Lsup();
924
925 ESCRIPT_DLL_API
926 double
927 Lsup_const() const;
928
929
930 /**
931 \brief
932 Return the maximum value of this Data object.
933
934 The method is not const because lazy data needs to be expanded before sup can be computed.
935 The _const form can be used when the Data object is const, however this will only work for
936 Data which is not Lazy.
937
938 For Data which contain no samples (or tagged Data for which no tags in use have a value)
939 a large negative value is returned.
940 */
941 ESCRIPT_DLL_API
942 double
943 sup();
944
945 ESCRIPT_DLL_API
946 double
947 sup_const() const;
948
949
950 /**
951 \brief
952 Return the minimum value of this Data object.
953
954 The method is not const because lazy data needs to be expanded before inf can be computed.
955 The _const form can be used when the Data object is const, however this will only work for
956 Data which is not Lazy.
957
958 For Data which contain no samples (or tagged Data for which no tags in use have a value)
959 a large positive value is returned.
960 */
961 ESCRIPT_DLL_API
962 double
963 inf();
964
965 ESCRIPT_DLL_API
966 double
967 inf_const() const;
968
969
970
971 /**
972 \brief
973 Return the absolute value of each data point of this Data object.
974 *
975 */
976 ESCRIPT_DLL_API
977 Data
978 abs() const;
979
980 /**
981 \brief
982 Return the maximum value of each data point of this Data object.
983 *
984 */
985 ESCRIPT_DLL_API
986 Data
987 maxval() const;
988
989 /**
990 \brief
991 Return the minimum value of each data point of this Data object.
992 *
993 */
994 ESCRIPT_DLL_API
995 Data
996 minval() const;
997
998 /**
999 \brief
1000 Return the (sample number, data-point number) of the data point with
1001 the minimum component value in this Data object.
1002 \note If you are working in python, please consider using Locator
1003 instead of manually manipulating process and point IDs.
1004 */
1005 ESCRIPT_DLL_API
1006 const boost::python::tuple
1007 minGlobalDataPoint() const;
1008
1009 /**
1010 \brief
1011 Return the (sample number, data-point number) of the data point with
1012 the minimum component value in this Data object.
1013 \note If you are working in python, please consider using Locator
1014 instead of manually manipulating process and point IDs.
1015 */
1016 ESCRIPT_DLL_API
1017 const boost::python::tuple
1018 maxGlobalDataPoint() const;
1019
1020
1021
1022 /**
1023 \brief
1024 Return the sign of each data point of this Data object.
1025 -1 for negative values, zero for zero values, 1 for positive values.
1026 *
1027 */
1028 ESCRIPT_DLL_API
1029 Data
1030 sign() const;
1031
1032 /**
1033 \brief
1034 Return the symmetric part of a matrix which is half the matrix plus its transpose.
1035 *
1036 */
1037 ESCRIPT_DLL_API
1038 Data
1039 symmetric() const;
1040
1041 /**
1042 \brief
1043 Return the nonsymmetric part of a matrix which is half the matrix minus its transpose.
1044 *
1045 */
1046 ESCRIPT_DLL_API
1047 Data
1048 nonsymmetric() const;
1049
1050 /**
1051 \brief
1052 Return the trace of a matrix
1053 *
1054 */
1055 ESCRIPT_DLL_API
1056 Data
1057 trace(int axis_offset) const;
1058
1059 /**
1060 \brief
1061 Transpose each data point of this Data object around the given axis.
1062 *
1063 */
1064 ESCRIPT_DLL_API
1065 Data
1066 transpose(int axis_offset) const;
1067
1068 /**
1069 \brief
1070 Return the eigenvalues of the symmetric part at each data point of this Data object in increasing values.
1071 Currently this function is restricted to rank 2, square shape, and dimension 3.
1072 *
1073 */
1074 ESCRIPT_DLL_API
1075 Data
1076 eigenvalues() const;
1077
1078 /**
1079 \brief
1080 Return the eigenvalues and corresponding eigenvcetors of the symmetric part at each data point of this Data object.
1081 the eigenvalues are ordered in increasing size where eigenvalues with relative difference less than
1082 tol are treated as equal. The eigenvectors are orthogonal, normalized and the sclaed such that the
1083 first non-zero entry is positive.
1084 Currently this function is restricted to rank 2, square shape, and dimension 3
1085 *
1086 */
1087 ESCRIPT_DLL_API
1088 const boost::python::tuple
1089 eigenvalues_and_eigenvectors(const double tol=1.e-12) const;
1090
1091 /**
1092 \brief
1093 swaps the components axis0 and axis1
1094 *
1095 */
1096 ESCRIPT_DLL_API
1097 Data
1098 swapaxes(const int axis0, const int axis1) const;
1099
1100 /**
1101 \brief
1102 Return the error function erf of each data point of this Data object.
1103 *
1104 */
1105 ESCRIPT_DLL_API
1106 Data
1107 erf() const;
1108
1109 /**
1110 \brief
1111 Return the sin of each data point of this Data object.
1112 *
1113 */
1114 ESCRIPT_DLL_API
1115 Data
1116 sin() const;
1117
1118 /**
1119 \brief
1120 Return the cos of each data point of this Data object.
1121 *
1122 */
1123 ESCRIPT_DLL_API
1124 Data
1125 cos() const;
1126
1127 /**
1128 \brief
1129 Return the tan of each data point of this Data object.
1130 *
1131 */
1132 ESCRIPT_DLL_API
1133 Data
1134 tan() const;
1135
1136 /**
1137 \brief
1138 Return the asin of each data point of this Data object.
1139 *
1140 */
1141 ESCRIPT_DLL_API
1142 Data
1143 asin() const;
1144
1145 /**
1146 \brief
1147 Return the acos of each data point of this Data object.
1148 *
1149 */
1150 ESCRIPT_DLL_API
1151 Data
1152 acos() const;
1153
1154 /**
1155 \brief
1156 Return the atan of each data point of this Data object.
1157 *
1158 */
1159 ESCRIPT_DLL_API
1160 Data
1161 atan() const;
1162
1163 /**
1164 \brief
1165 Return the sinh of each data point of this Data object.
1166 *
1167 */
1168 ESCRIPT_DLL_API
1169 Data
1170 sinh() const;
1171
1172 /**
1173 \brief
1174 Return the cosh of each data point of this Data object.
1175 *
1176 */
1177 ESCRIPT_DLL_API
1178 Data
1179 cosh() const;
1180
1181 /**
1182 \brief
1183 Return the tanh of each data point of this Data object.
1184 *
1185 */
1186 ESCRIPT_DLL_API
1187 Data
1188 tanh() const;
1189
1190 /**
1191 \brief
1192 Return the asinh of each data point of this Data object.
1193 *
1194 */
1195 ESCRIPT_DLL_API
1196 Data
1197 asinh() const;
1198
1199 /**
1200 \brief
1201 Return the acosh of each data point of this Data object.
1202 *
1203 */
1204 ESCRIPT_DLL_API
1205 Data
1206 acosh() const;
1207
1208 /**
1209 \brief
1210 Return the atanh of each data point of this Data object.
1211 *
1212 */
1213 ESCRIPT_DLL_API
1214 Data
1215 atanh() const;
1216
1217 /**
1218 \brief
1219 Return the log to base 10 of each data point of this Data object.
1220 *
1221 */
1222 ESCRIPT_DLL_API
1223 Data
1224 log10() const;
1225
1226 /**
1227 \brief
1228 Return the natural log of each data point of this Data object.
1229 *
1230 */
1231 ESCRIPT_DLL_API
1232 Data
1233 log() const;
1234
1235 /**
1236 \brief
1237 Return the exponential function of each data point of this Data object.
1238 *
1239 */
1240 ESCRIPT_DLL_API
1241 Data
1242 exp() const;
1243
1244 /**
1245 \brief
1246 Return the square root of each data point of this Data object.
1247 *
1248 */
1249 ESCRIPT_DLL_API
1250 Data
1251 sqrt() const;
1252
1253 /**
1254 \brief
1255 Return the negation of each data point of this Data object.
1256 *
1257 */
1258 ESCRIPT_DLL_API
1259 Data
1260 neg() const;
1261
1262 /**
1263 \brief
1264 Return the identity of each data point of this Data object.
1265 Simply returns this object unmodified.
1266 *
1267 */
1268 ESCRIPT_DLL_API
1269 Data
1270 pos() const;
1271
1272 /**
1273 \brief
1274 Return the given power of each data point of this Data object.
1275
1276 \param right Input - the power to raise the object to.
1277 *
1278 */
1279 ESCRIPT_DLL_API
1280 Data
1281 powD(const Data& right) const;
1282
1283 /**
1284 \brief
1285 Return the given power of each data point of this boost python object.
1286
1287 \param right Input - the power to raise the object to.
1288 *
1289 */
1290 ESCRIPT_DLL_API
1291 Data
1292 powO(const boost::python::object& right) const;
1293
1294 /**
1295 \brief
1296 Return the given power of each data point of this boost python object.
1297
1298 \param left Input - the bases
1299 *
1300 */
1301
1302 ESCRIPT_DLL_API
1303 Data
1304 rpowO(const boost::python::object& left) const;
1305
1306 /**
1307 \brief
1308 writes the object to a file in the DX file format
1309 */
1310 ESCRIPT_DLL_API
1311 void
1312 saveDX(std::string fileName) const;
1313
1314 /**
1315 \brief
1316 writes the object to a file in the VTK file format
1317 */
1318 ESCRIPT_DLL_API
1319 void
1320 saveVTK(std::string fileName) const;
1321
1322
1323
1324 /**
1325 \brief
1326 Overloaded operator +=
1327 \param right - Input - The right hand side.
1328 *
1329 */
1330 ESCRIPT_DLL_API
1331 Data& operator+=(const Data& right);
1332 ESCRIPT_DLL_API
1333 Data& operator+=(const boost::python::object& right);
1334
1335 ESCRIPT_DLL_API
1336 Data& operator=(const Data& other);
1337
1338 /**
1339 \brief
1340 Overloaded operator -=
1341 \param right - Input - The right hand side.
1342 *
1343 */
1344 ESCRIPT_DLL_API
1345 Data& operator-=(const Data& right);
1346 ESCRIPT_DLL_API
1347 Data& operator-=(const boost::python::object& right);
1348
1349 /**
1350 \brief
1351 Overloaded operator *=
1352 \param right - Input - The right hand side.
1353 *
1354 */
1355 ESCRIPT_DLL_API
1356 Data& operator*=(const Data& right);
1357 ESCRIPT_DLL_API
1358 Data& operator*=(const boost::python::object& right);
1359
1360 /**
1361 \brief
1362 Overloaded operator /=
1363 \param right - Input - The right hand side.
1364 *
1365 */
1366 ESCRIPT_DLL_API
1367 Data& operator/=(const Data& right);
1368 ESCRIPT_DLL_API
1369 Data& operator/=(const boost::python::object& right);
1370
1371 /**
1372 \brief
1373 Newer style division operator for python
1374 */
1375 ESCRIPT_DLL_API
1376 Data truedivD(const Data& right);
1377
1378 /**
1379 \brief
1380 Newer style division operator for python
1381 */
1382 ESCRIPT_DLL_API
1383 Data truedivO(const boost::python::object& right);
1384
1385
1386 /**
1387 \brief
1388 wrapper for python add operation
1389 */
1390 ESCRIPT_DLL_API
1391 boost::python::object __add__(const boost::python::object& right);
1392
1393
1394 /**
1395 \brief
1396 wrapper for python subtract operation
1397 */
1398 ESCRIPT_DLL_API
1399 boost::python::object __sub__(const boost::python::object& right);
1400
1401 /**
1402 \brief
1403 wrapper for python reverse subtract operation
1404 */
1405 ESCRIPT_DLL_API
1406 boost::python::object __rsub__(const boost::python::object& right);
1407
1408 /**
1409 \brief
1410 wrapper for python multiply operation
1411 */
1412 ESCRIPT_DLL_API
1413 boost::python::object __mul__(const boost::python::object& right);
1414
1415 /**
1416 \brief
1417 wrapper for python divide operation
1418 */
1419 ESCRIPT_DLL_API
1420 boost::python::object __div__(const boost::python::object& right);
1421
1422 /**
1423 \brief
1424 wrapper for python reverse divide operation
1425 */
1426 ESCRIPT_DLL_API
1427 boost::python::object __rdiv__(const boost::python::object& right);
1428
1429 /**
1430 \brief return inverse of matricies.
1431 */
1432 ESCRIPT_DLL_API
1433 Data
1434 matrixInverse() const;
1435
1436 /**
1437 \brief
1438 Returns true if this can be interpolated to functionspace.
1439 */
1440 ESCRIPT_DLL_API
1441 bool
1442 probeInterpolation(const FunctionSpace& functionspace) const;
1443
1444 /**
1445 Data object slicing methods.
1446 */
1447
1448 /**
1449 \brief
1450 Returns a slice from this Data object.
1451
1452 /description
1453 Implements the [] get operator in python.
1454 Calls getSlice.
1455
1456 \param key - Input - python slice tuple specifying
1457 slice to return.
1458 */
1459 ESCRIPT_DLL_API
1460 Data
1461 getItem(const boost::python::object& key) const;
1462
1463 /**
1464 \brief
1465 Copies slice from value into this Data object.
1466
1467 Implements the [] set operator in python.
1468 Calls setSlice.
1469
1470 \param key - Input - python slice tuple specifying
1471 slice to copy from value.
1472 \param value - Input - Data object to copy from.
1473 */
1474 ESCRIPT_DLL_API
1475 void
1476 setItemD(const boost::python::object& key,
1477 const Data& value);
1478
1479 ESCRIPT_DLL_API
1480 void
1481 setItemO(const boost::python::object& key,
1482 const boost::python::object& value);
1483
1484 // These following public methods should be treated as private.
1485
1486 /**
1487 \brief
1488 Perform the given unary operation on every element of every data point in
1489 this Data object.
1490 */
1491 template <class UnaryFunction>
1492 ESCRIPT_DLL_API
1493 inline
1494 void
1495 unaryOp2(UnaryFunction operation);
1496
1497 /**
1498 \brief
1499 Return a Data object containing the specified slice of
1500 this Data object.
1501 \param region - Input - Region to copy.
1502 *
1503 */
1504 ESCRIPT_DLL_API
1505 Data
1506 getSlice(const DataTypes::RegionType& region) const;
1507
1508 /**
1509 \brief
1510 Copy the specified slice from the given value into this
1511 Data object.
1512 \param value - Input - Data to copy from.
1513 \param region - Input - Region to copy.
1514 *
1515 */
1516 ESCRIPT_DLL_API
1517 void
1518 setSlice(const Data& value,
1519 const DataTypes::RegionType& region);
1520
1521 /**
1522 \brief
1523 print the data values to stdout. Used for debugging
1524 */
1525 ESCRIPT_DLL_API
1526 void
1527 print(void);
1528
1529 /**
1530 \brief
1531 return the MPI rank number of the local data
1532 MPI_COMM_WORLD is assumed and the result of MPI_Comm_size()
1533 is returned
1534 */
1535 ESCRIPT_DLL_API
1536 int
1537 get_MPIRank(void) const;
1538
1539 /**
1540 \brief
1541 return the MPI rank number of the local data
1542 MPI_COMM_WORLD is assumed and the result of MPI_Comm_rank()
1543 is returned
1544 */
1545 ESCRIPT_DLL_API
1546 int
1547 get_MPISize(void) const;
1548
1549 /**
1550 \brief
1551 return the MPI rank number of the local data
1552 MPI_COMM_WORLD is assumed and returned.
1553 */
1554 ESCRIPT_DLL_API
1555 MPI_Comm
1556 get_MPIComm(void) const;
1557
1558 /**
1559 \brief
1560 return the object produced by the factory, which is a DataConstant or DataExpanded
1561 TODO Ownership of this object should be explained in doco.
1562 */
1563 ESCRIPT_DLL_API
1564 DataAbstract*
1565 borrowData(void) const;
1566
1567 ESCRIPT_DLL_API
1568 DataAbstract_ptr
1569 borrowDataPtr(void) const;
1570
1571 ESCRIPT_DLL_API
1572 DataReady_ptr
1573 borrowReadyPtr(void) const;
1574
1575
1576
1577 /**
1578 \brief
1579 Return a pointer to the beginning of the datapoint at the specified offset.
1580 TODO Eventually these should be inlined.
1581 \param i - position(offset) in the underlying datastructure
1582 */
1583
1584 ESCRIPT_DLL_API
1585 DataTypes::ValueType::const_reference
1586 getDataAtOffsetRO(DataTypes::ValueType::size_type i);
1587
1588
1589 ESCRIPT_DLL_API
1590 DataTypes::ValueType::reference
1591 getDataAtOffsetRW(DataTypes::ValueType::size_type i);
1592
1593
1594 protected:
1595
1596 private:
1597
1598 template <class BinaryOp>
1599 double
1600 #ifdef ESYS_MPI
1601 lazyAlgWorker(double init, MPI_Op mpiop_type);
1602 #else
1603 lazyAlgWorker(double init);
1604 #endif
1605
1606 double
1607 LsupWorker() const;
1608
1609 double
1610 supWorker() const;
1611
1612 double
1613 infWorker() const;
1614
1615 boost::python::object
1616 integrateWorker() const;
1617
1618 void
1619 calc_minGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1620
1621 void
1622 calc_maxGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1623
1624 // For internal use in Data.cpp only!
1625 // other uses should call the main entry points and allow laziness
1626 Data
1627 minval_nonlazy() const;
1628
1629 // For internal use in Data.cpp only!
1630 Data
1631 maxval_nonlazy() const;
1632
1633
1634 /**
1635 \brief
1636 Check *this and the right operand are compatible. Throws
1637 an exception if they aren't.
1638 \param right - Input - The right hand side.
1639 */
1640 inline
1641 void
1642 operandCheck(const Data& right) const
1643 {
1644 return m_data->operandCheck(*(right.m_data.get()));
1645 }
1646
1647 /**
1648 \brief
1649 Perform the specified reduction algorithm on every element of every data point in
1650 this Data object according to the given function and return the single value result.
1651 */
1652 template <class BinaryFunction>
1653 inline
1654 double
1655 algorithm(BinaryFunction operation,
1656 double initial_value) const;
1657
1658 /**
1659 \brief
1660 Reduce each data-point in this Data object using the given operation. Return a Data
1661 object with the same number of data-points, but with each data-point containing only
1662 one value - the result of the reduction operation on the corresponding data-point in
1663 this Data object
1664 */
1665 template <class BinaryFunction>
1666 inline
1667 Data
1668 dp_algorithm(BinaryFunction operation,
1669 double initial_value) const;
1670
1671 /**
1672 \brief
1673 Perform the given binary operation on all of the data's elements.
1674 The underlying type of the right hand side (right) determines the final
1675 type of *this after the operation. For example if the right hand side
1676 is expanded *this will be expanded if necessary.
1677 RHS is a Data object.
1678 */
1679 template <class BinaryFunction>
1680 inline
1681 void
1682 binaryOp(const Data& right,
1683 BinaryFunction operation);
1684
1685 /**
1686 \brief
1687 Convert the data type of the RHS to match this.
1688 \param right - Input - data type to match.
1689 */
1690 void
1691 typeMatchLeft(Data& right) const;
1692
1693 /**
1694 \brief
1695 Convert the data type of this to match the RHS.
1696 \param right - Input - data type to match.
1697 */
1698 void
1699 typeMatchRight(const Data& right);
1700
1701 /**
1702 \brief
1703 Construct a Data object of the appropriate type.
1704 */
1705
1706 void
1707 initialise(const DataTypes::ValueType& value,
1708 const DataTypes::ShapeType& shape,
1709 const FunctionSpace& what,
1710 bool expanded);
1711
1712 void
1713 initialise(const WrappedArray& value,
1714 const FunctionSpace& what,
1715 bool expanded);
1716
1717 void
1718 initialise(const double value,
1719 const DataTypes::ShapeType& shape,
1720 const FunctionSpace& what,
1721 bool expanded);
1722
1723 //
1724 // flag to protect the data object against any update
1725 bool m_protected;
1726 mutable bool m_shared;
1727 bool m_lazy;
1728
1729 //
1730 // pointer to the actual data object
1731 // boost::shared_ptr<DataAbstract> m_data;
1732 DataAbstract_ptr m_data;
1733
1734 // If possible please use getReadyPtr instead.
1735 // But see warning below.
1736 const DataReady*
1737 getReady() const;
1738
1739 DataReady*
1740 getReady();
1741
1742
1743 // Be wary of using this for local operations since it (temporarily) increases reference count.
1744 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1745 // getReady() instead
1746 DataReady_ptr
1747 getReadyPtr();
1748
1749 const_DataReady_ptr
1750 getReadyPtr() const;
1751
1752
1753 /**
1754 \brief Update the Data's shared flag
1755 This indicates that the DataAbstract used by this object is now shared (or no longer shared).
1756 For internal use only.
1757 */
1758 void updateShareStatus(bool nowshared) const
1759 {
1760 m_shared=nowshared; // m_shared is mutable
1761 }
1762
1763 // In the isShared() method below:
1764 // A problem would occur if m_data (the address pointed to) were being modified
1765 // while the call m_data->is_shared is being executed.
1766 //
1767 // Q: So why do I think this code can be thread safe/correct?
1768 // A: We need to make some assumptions.
1769 // 1. We assume it is acceptable to return true under some conditions when we aren't shared.
1770 // 2. We assume that no constructions or assignments which will share previously unshared
1771 // will occur while this call is executing. This is consistent with the way Data:: and C are written.
1772 //
1773 // This means that the only transition we need to consider, is when a previously shared object is
1774 // not shared anymore. ie. the other objects have been destroyed or a deep copy has been made.
1775 // In those cases the m_shared flag changes to false after m_data has completed changing.
1776 // For any threads executing before the flag switches they will assume the object is still shared.
1777 bool isShared() const
1778 {
1779 return m_shared;
1780 /* if (m_shared) return true;
1781 if (m_data->isShared())
1782 {
1783 updateShareStatus(true);
1784 return true;
1785 }
1786 return false;*/
1787 }
1788
1789 void forceResolve()
1790 {
1791 if (isLazy())
1792 {
1793 #ifdef _OPENMP
1794 if (omp_in_parallel())
1795 { // Yes this is throwing an exception out of an omp thread which is forbidden.
1796 throw DataException("Please do not call forceResolve() in a parallel region.");
1797 }
1798 #endif
1799 resolve();
1800 }
1801 }
1802
1803 /**
1804 \brief if another object is sharing out member data make a copy to work with instead.
1805 This code should only be called from single threaded sections of code.
1806 */
1807 void exclusiveWrite()
1808 {
1809 #ifdef _OPENMP
1810 if (omp_in_parallel())
1811 {
1812 // *((int*)0)=17;
1813 throw DataException("Programming error. Please do not run exclusiveWrite() in multi-threaded sections.");
1814 }
1815 #endif
1816 forceResolve();
1817 if (isShared())
1818 {
1819 DataAbstract* t=m_data->deepCopy();
1820 set_m_data(DataAbstract_ptr(t));
1821 }
1822 }
1823
1824 /**
1825 \brief checks if caller can have exclusive write to the object
1826 */
1827 void checkExclusiveWrite()
1828 {
1829 if (isLazy() || isShared())
1830 {
1831 throw DataException("Programming error. ExclusiveWrite required - please call requireWrite()");
1832 }
1833 }
1834
1835 /**
1836 \brief Modify the data abstract hosted by this Data object
1837 For internal use only.
1838 Passing a pointer to null is permitted (do this in the destructor)
1839 \warning Only to be called in single threaded code or inside a single/critical section. This method needs to be atomic.
1840 */
1841 void set_m_data(DataAbstract_ptr p);
1842
1843 friend class DataAbstract; // To allow calls to updateShareStatus
1844 friend class TestDomain; // so its getX will work quickly
1845 #ifdef IKNOWWHATIMDOING
1846 friend ESCRIPT_DLL_API Data applyBinaryCFunction(boost::python::object cfunc, boost::python::tuple shape, escript::Data& d, escript::Data& e);
1847 #endif
1848 friend ESCRIPT_DLL_API Data condEval(escript::Data& mask, escript::Data& trueval, escript::Data& falseval);
1849 friend ESCRIPT_DLL_API Data randomData(const boost::python::tuple& shape, const FunctionSpace& what, long seed);
1850
1851 };
1852
1853
1854 #ifdef IKNOWWHATIMDOING
1855 ESCRIPT_DLL_API
1856 Data
1857 applyBinaryCFunction(boost::python::object func, boost::python::tuple shape, escript::Data& d, escript::Data& e);
1858 #endif
1859
1860
1861 ESCRIPT_DLL_API
1862 Data
1863 condEval(escript::Data& mask, escript::Data& trueval, escript::Data& falseval);
1864
1865
1866
1867 /**
1868 \brief Create a new Expanded Data object filled with (not very) random data.
1869 */
1870 ESCRIPT_DLL_API
1871 Data randomData(const boost::python::tuple& shape,
1872 const FunctionSpace& what,
1873 long seed);
1874
1875
1876 } // end namespace escript
1877
1878
1879 // No, this is not supposed to be at the top of the file
1880 // DataAbstact needs to be declared first, then DataReady needs to be fully declared
1881 // so that I can dynamic cast between them below.
1882 #include "DataReady.h"
1883 #include "DataLazy.h"
1884
1885 namespace escript
1886 {
1887
1888 inline
1889 const DataReady*
1890 Data::getReady() const
1891 {
1892 const DataReady* dr=dynamic_cast<const DataReady*>(m_data.get());
1893 EsysAssert((dr!=0), "Error - casting to DataReady.");
1894 return dr;
1895 }
1896
1897 inline
1898 DataReady*
1899 Data::getReady()
1900 {
1901 DataReady* dr=dynamic_cast<DataReady*>(m_data.get());
1902 EsysAssert((dr!=0), "Error - casting to DataReady.");
1903 return dr;
1904 }
1905
1906 // Be wary of using this for local operations since it (temporarily) increases reference count.
1907 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1908 // getReady() instead
1909 inline
1910 DataReady_ptr
1911 Data::getReadyPtr()
1912 {
1913 DataReady_ptr dr=boost::dynamic_pointer_cast<DataReady>(m_data);
1914 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1915 return dr;
1916 }
1917
1918
1919 inline
1920 const_DataReady_ptr
1921 Data::getReadyPtr() const
1922 {
1923 const_DataReady_ptr dr=boost::dynamic_pointer_cast<const DataReady>(m_data);
1924 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1925 return dr;
1926 }
1927
1928 inline
1929 DataAbstract::ValueType::value_type*
1930 Data::getSampleDataRW(DataAbstract::ValueType::size_type sampleNo)
1931 {
1932 if (isLazy())
1933 {
1934 throw DataException("Error, attempt to acquire RW access to lazy data. Please call requireWrite() first.");
1935 }
1936 return getReady()->getSampleDataRW(sampleNo);
1937 }
1938
1939 inline
1940 const DataAbstract::ValueType::value_type*
1941 Data::getSampleDataRO(DataAbstract::ValueType::size_type sampleNo)
1942 {
1943 DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
1944 if (l!=0)
1945 {
1946 size_t offset=0;
1947 const DataTypes::ValueType* res=l->resolveSample(sampleNo,offset);
1948 return &((*res)[offset]);
1949 }
1950 return getReady()->getSampleDataRO(sampleNo);
1951 }
1952
1953
1954
1955 /**
1956 Modify a filename for MPI parallel output to multiple files
1957 */
1958 char *Escript_MPI_appendRankToFileName(const char *, int, int);
1959
1960 /**
1961 Binary Data object operators.
1962 */
1963 inline double rpow(double x,double y)
1964 {
1965 return pow(y,x);
1966 }
1967
1968 /**
1969 \brief
1970 Operator+
1971 Takes two Data objects.
1972 */
1973 ESCRIPT_DLL_API Data operator+(const Data& left, const Data& right);
1974
1975 /**
1976 \brief
1977 Operator-
1978 Takes two Data objects.
1979 */
1980 ESCRIPT_DLL_API Data operator-(const Data& left, const Data& right);
1981
1982 /**
1983 \brief
1984 Operator*
1985 Takes two Data objects.
1986 */
1987 ESCRIPT_DLL_API Data operator*(const Data& left, const Data& right);
1988
1989 /**
1990 \brief
1991 Operator/
1992 Takes two Data objects.
1993 */
1994 ESCRIPT_DLL_API Data operator/(const Data& left, const Data& right);
1995
1996 /**
1997 \brief
1998 Operator+
1999 Takes LHS Data object and RHS python::object.
2000 python::object must be convertable to Data type.
2001 */
2002 ESCRIPT_DLL_API Data operator+(const Data& left, const boost::python::object& right);
2003
2004 /**
2005 \brief
2006 Operator-
2007 Takes LHS Data object and RHS python::object.
2008 python::object must be convertable to Data type.
2009 */
2010 ESCRIPT_DLL_API Data operator-(const Data& left, const boost::python::object& right);
2011
2012 /**
2013 \brief
2014 Operator*
2015 Takes LHS Data object and RHS python::object.
2016 python::object must be convertable to Data type.
2017 */
2018 ESCRIPT_DLL_API Data operator*(const Data& left, const boost::python::object& right);
2019
2020 /**
2021 \brief
2022 Operator/
2023 Takes LHS Data object and RHS python::object.
2024 python::object must be convertable to Data type.
2025 */
2026 ESCRIPT_DLL_API Data operator/(const Data& left, const boost::python::object& right);
2027
2028 /**
2029 \brief
2030 Operator+
2031 Takes LHS python::object and RHS Data object.
2032 python::object must be convertable to Data type.
2033 */
2034 ESCRIPT_DLL_API Data operator+(const boost::python::object& left, const Data& right);
2035
2036 /**
2037 \brief
2038 Operator-
2039 Takes LHS python::object and RHS Data object.
2040 python::object must be convertable to Data type.
2041 */
2042 ESCRIPT_DLL_API Data operator-(const boost::python::object& left, const Data& right);
2043
2044 /**
2045 \brief
2046 Operator*
2047 Takes LHS python::object and RHS Data object.
2048 python::object must be convertable to Data type.
2049 */
2050 ESCRIPT_DLL_API Data operator*(const boost::python::object& left, const Data& right);
2051
2052 /**
2053 \brief
2054 Operator/
2055 Takes LHS python::object and RHS Data object.
2056 python::object must be convertable to Data type.
2057 */
2058 ESCRIPT_DLL_API Data operator/(const boost::python::object& left, const Data& right);
2059
2060
2061
2062 /**
2063 \brief
2064 Output operator
2065 */
2066 ESCRIPT_DLL_API std::ostream& operator<<(std::ostream& o, const Data& data);
2067
2068 /**
2069 \brief
2070 Compute a tensor product of two Data objects
2071 \param arg_0 - Input - Data object
2072 \param arg_1 - Input - Data object
2073 \param axis_offset - Input - axis offset
2074 \param transpose - Input - 0: transpose neither, 1: transpose arg0, 2: transpose arg1
2075 */
2076 ESCRIPT_DLL_API
2077 Data
2078 C_GeneralTensorProduct(Data& arg_0,
2079 Data& arg_1,
2080 int axis_offset=0,
2081 int transpose=0);
2082
2083 /**
2084 \brief
2085 Operator/
2086 Takes RHS Data object.
2087 */
2088 inline
2089 Data
2090 Data::truedivD(const Data& right)
2091 {
2092 return *this / right;
2093 }
2094
2095 /**
2096 \brief
2097 Operator/
2098 Takes RHS python::object.
2099 */
2100 inline
2101 Data
2102 Data::truedivO(const boost::python::object& right)
2103 {
2104 return *this / right;
2105 }
2106
2107 /**
2108 \brief
2109 Perform the given binary operation with this and right as operands.
2110 Right is a Data object.
2111 */
2112 template <class BinaryFunction>
2113 inline
2114 void
2115 Data::binaryOp(const Data& right,
2116 BinaryFunction operation)
2117 {
2118 //
2119 // if this has a rank of zero promote it to the rank of the RHS
2120 if (getDataPointRank()==0 && right.getDataPointRank()!=0) {
2121 throw DataException("Error - attempt to update rank zero object with object with rank bigger than zero.");
2122 }
2123
2124 if (isLazy() || right.isLazy())
2125 {
2126 throw DataException("Programmer error - attempt to call binaryOp with Lazy Data.");
2127 }
2128 //
2129 // initially make the temporary a shallow copy
2130 Data tempRight(right);
2131
2132 if (getFunctionSpace()!=right.getFunctionSpace()) {
2133 if (right.probeInterpolation(getFunctionSpace())) {
2134 //
2135 // an interpolation is required so create a new Data
2136 tempRight=Data(right,this->getFunctionSpace());
2137 } else if (probeInterpolation(right.getFunctionSpace())) {
2138 //
2139 // interpolate onto the RHS function space
2140 Data tempLeft(*this,right.getFunctionSpace());
2141 // m_data=tempLeft.m_data;
2142 set_m_data(tempLeft.m_data);
2143 }
2144 }
2145 operandCheck(tempRight);
2146 //
2147 // ensure this has the right type for the RHS
2148 typeMatchRight(tempRight);
2149 //
2150 // Need to cast to the concrete types so that the correct binaryOp
2151 // is called.
2152 if (isExpanded()) {
2153 //
2154 // Expanded data will be done in parallel, the right hand side can be
2155 // of any data type
2156 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2157 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2158 escript::binaryOp(*leftC,*(tempRight.getReady()),operation);
2159 } else if (isTagged()) {
2160 //
2161 // Tagged data is operated on serially, the right hand side can be
2162 // either DataConstant or DataTagged
2163 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2164 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2165 if (right.isTagged()) {
2166 DataTagged* rightC=dynamic_cast<DataTagged*>(tempRight.m_data.get());
2167 EsysAssert((rightC!=0), "Programming error - casting to DataTagged.");
2168 escript::binaryOp(*leftC,*rightC,operation);
2169 } else {
2170 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2171 EsysAssert((rightC!=0), "Programming error - casting to DataConstant.");
2172 escript::binaryOp(*leftC,*rightC,operation);
2173 }
2174 } else if (isConstant()) {
2175 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2176 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2177 EsysAssert((leftC!=0 && rightC!=0), "Programming error - casting to DataConstant.");
2178 escript::binaryOp(*leftC,*rightC,operation);
2179 }
2180 }
2181
2182 /**
2183 \brief
2184 Perform the given Data object reduction algorithm on this and return the result.
2185 Given operation combines each element of each data point, thus argument
2186 object (*this) is a rank n Data object, and returned object is a scalar.
2187 Calls escript::algorithm.
2188 */
2189 template <class BinaryFunction>
2190 inline
2191 double
2192 Data::algorithm(BinaryFunction operation, double initial_value) const
2193 {
2194 if (isExpanded()) {
2195 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2196 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2197 return escript::algorithm(*leftC,operation,initial_value);
2198 } else if (isTagged()) {
2199 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2200 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2201 return escript::algorithm(*leftC,operation,initial_value);
2202 } else if (isConstant()) {
2203 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2204 EsysAssert((leftC!=0), "Programming error - casting to DataConstant.");
2205 return escript::algorithm(*leftC,operation,initial_value);
2206 } else if (isEmpty()) {
2207 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2208 } else if (isLazy()) {
2209 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2210 } else {
2211 throw DataException("Error - Data encapsulates an unknown type.");
2212 }
2213 }
2214
2215 /**
2216 \brief
2217 Perform the given data point reduction algorithm on data and return the result.
2218 Given operation combines each element within each data point into a scalar,
2219 thus argument object is a rank n Data object, and returned object is a
2220 rank 0 Data object.
2221 Calls escript::dp_algorithm.
2222 */
2223 template <class BinaryFunction>
2224 inline
2225 Data
2226 Data::dp_algorithm(BinaryFunction operation, double initial_value) const
2227 {
2228 if (isEmpty()) {
2229 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2230 }
2231 else if (isExpanded()) {
2232 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2233 DataExpanded* dataE=dynamic_cast<DataExpanded*>(m_data.get());
2234 DataExpanded* resultE=dynamic_cast<DataExpanded*>(result.m_data.get());
2235 EsysAssert((dataE!=0), "Programming error - casting data to DataExpanded.");
2236 EsysAssert((resultE!=0), "Programming error - casting result to DataExpanded.");
2237 escript::dp_algorithm(*dataE,*resultE,operation,initial_value);
2238 return result;
2239 }
2240 else if (isTagged()) {
2241 DataTagged* dataT=dynamic_cast<DataTagged*>(m_data.get());
2242 EsysAssert((dataT!=0), "Programming error - casting data to DataTagged.");
2243 DataTypes::ValueType defval(1);
2244 defval[0]=0;
2245 DataTagged* resultT=new DataTagged(getFunctionSpace(), DataTypes::scalarShape, defval, dataT);
2246 escript::dp_algorithm(*dataT,*resultT,operation,initial_value);
2247 return Data(resultT); // note: the Data object now owns the resultT pointer
2248 }
2249 else if (isConstant()) {
2250 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2251 DataConstant* dataC=dynamic_cast<DataConstant*>(m_data.get());
2252 DataConstant* resultC=dynamic_cast<DataConstant*>(result.m_data.get());
2253 EsysAssert((dataC!=0), "Programming error - casting data to DataConstant.");
2254 EsysAssert((resultC!=0), "Programming error - casting result to DataConstant.");
2255 escript::dp_algorithm(*dataC,*resultC,operation,initial_value);
2256 return result;
2257 } else if (isLazy()) {
2258 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2259 } else {
2260 throw DataException("Error - Data encapsulates an unknown type.");
2261 }
2262 }
2263
2264 /**
2265 \brief
2266 Compute a tensor operation with two Data objects
2267 \param arg_0 - Input - Data object
2268 \param arg_1 - Input - Data object
2269 \param operation - Input - Binary op functor
2270 */
2271 template <typename BinaryFunction>
2272 inline
2273 Data
2274 C_TensorBinaryOperation(Data const &arg_0,
2275 Data const &arg_1,
2276 BinaryFunction operation)
2277 {
2278 if (arg_0.isEmpty() || arg_1.isEmpty())
2279 {
2280 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2281 }
2282 if (arg_0.isLazy() || arg_1.isLazy())
2283 {
2284 throw DataException("Error - Operations not permitted on lazy data.");
2285 }
2286 // Interpolate if necessary and find an appropriate function space
2287 Data arg_0_Z, arg_1_Z;
2288 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2289 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2290 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2291 arg_1_Z = Data(arg_1);
2292 }
2293 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2294 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2295 arg_0_Z =Data(arg_0);
2296 }
2297 else {
2298 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible function spaces.");
2299 }
2300 } else {
2301 arg_0_Z = Data(arg_0);
2302 arg_1_Z = Data(arg_1);
2303 }
2304 // Get rank and shape of inputs
2305 int rank0 = arg_0_Z.getDataPointRank();
2306 int rank1 = arg_1_Z.getDataPointRank();
2307 DataTypes::ShapeType shape0 = arg_0_Z.getDataPointShape();
2308 DataTypes::ShapeType shape1 = arg_1_Z.getDataPointShape();
2309 int size0 = arg_0_Z.getDataPointSize();
2310 int size1 = arg_1_Z.getDataPointSize();
2311 // Declare output Data object
2312 Data res;
2313
2314 if (shape0 == shape1) {
2315 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2316 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2317 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2318 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2319 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2320
2321 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2322 }
2323 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2324
2325 // Prepare the DataConstant input
2326 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2327
2328 // Borrow DataTagged input from Data object
2329 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2330
2331 // Prepare a DataTagged output 2
2332 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2333 res.tag();
2334 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2335
2336 // Prepare offset into DataConstant
2337 int offset_0 = tmp_0->getPointOffset(0,0);
2338 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2339
2340 // Get the pointers to the actual data
2341 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2342 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2343
2344 // Compute a result for the default
2345 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2346 // Compute a result for each tag
2347 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2348 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2349 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2350 tmp_2->addTag(i->first);
2351 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2352 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2353
2354 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2355 }
2356
2357 }
2358 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2359 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2360 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2361 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2362 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2363
2364 int sampleNo_1,dataPointNo_1;
2365 int numSamples_1 = arg_1_Z.getNumSamples();
2366 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2367 int offset_0 = tmp_0->getPointOffset(0,0);
2368 res.requireWrite();
2369 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2370 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2371 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2372 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2373 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2374 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2375 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2376 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2377 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2378 }
2379 }
2380
2381 }
2382 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2383 // Borrow DataTagged input from Data object
2384 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2385
2386 // Prepare the DataConstant input
2387 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2388
2389 // Prepare a DataTagged output 2
2390 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2391 res.tag();
2392 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2393
2394 // Prepare offset into DataConstant
2395 int offset_1 = tmp_1->getPointOffset(0,0);
2396
2397 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2398 // Get the pointers to the actual data
2399 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2400 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2401 // Compute a result for the default
2402 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2403 // Compute a result for each tag
2404 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2405 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2406 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2407 tmp_2->addTag(i->first);
2408 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2409 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2410 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2411 }
2412
2413 }
2414 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2415 // Borrow DataTagged input from Data object
2416 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2417
2418 // Borrow DataTagged input from Data object
2419 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2420
2421 // Prepare a DataTagged output 2
2422 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2423 res.tag(); // DataTagged output
2424 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2425
2426 // Get the pointers to the actual data
2427 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2428 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2429 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2430
2431 // Compute a result for the default
2432 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2433 // Merge the tags
2434 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2435 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2436 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2437 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2438 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2439 }
2440 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2441 tmp_2->addTag(i->first);
2442 }
2443 // Compute a result for each tag
2444 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2445 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2446
2447 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2448 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2449 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2450
2451 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2452 }
2453
2454 }
2455 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2456 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2457 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2458 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2459 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2460 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2461
2462 int sampleNo_0,dataPointNo_0;
2463 int numSamples_0 = arg_0_Z.getNumSamples();
2464 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2465 res.requireWrite();
2466 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2467 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2468 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2469 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2470 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2471 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2472 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2473 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2474 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2475 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2476 }
2477 }
2478
2479 }
2480 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2481 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2482 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2483 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2484 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2485
2486 int sampleNo_0,dataPointNo_0;
2487 int numSamples_0 = arg_0_Z.getNumSamples();
2488 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2489 int offset_1 = tmp_1->getPointOffset(0,0);
2490 res.requireWrite();
2491 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2492 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2493 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2494 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2495 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2496
2497 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2498 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2499 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2500
2501
2502 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2503 }
2504 }
2505
2506 }
2507 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2508 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2509 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2510 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2511 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2512 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2513
2514 int sampleNo_0,dataPointNo_0;
2515 int numSamples_0 = arg_0_Z.getNumSamples();
2516 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2517 res.requireWrite();
2518 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2519 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2520 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2521 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2522 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2523 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2524 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2525 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2526 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2527 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2528 }
2529 }
2530
2531 }
2532 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2533 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2534 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2535 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2536 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2537 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2538
2539 int sampleNo_0,dataPointNo_0;
2540 int numSamples_0 = arg_0_Z.getNumSamples();
2541 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2542 res.requireWrite();
2543 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2544 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2545 dataPointNo_0=0;
2546 // for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2547 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2548 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2549 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2550 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2551 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2552 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2553 tensor_binary_operation(size0*numDataPointsPerSample_0, ptr_0, ptr_1, ptr_2, operation);
2554 // }
2555 }
2556
2557 }
2558 else {
2559 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2560 }
2561
2562 } else if (0 == rank0) {
2563 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2564 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataConstant output
2565 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2566 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2567 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2568 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2569 }
2570 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2571
2572 // Prepare the DataConstant input
2573 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2574
2575 // Borrow DataTagged input from Data object
2576 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2577
2578 // Prepare a DataTagged output 2
2579 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataTagged output
2580 res.tag();
2581 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2582
2583 // Prepare offset into DataConstant
2584 int offset_0 = tmp_0->getPointOffset(0,0);
2585 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2586
2587 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2588 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2589
2590 // Compute a result for the default
2591 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2592 // Compute a result for each tag
2593 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2594 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2595 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2596 tmp_2->addTag(i->first);
2597 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2598 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2599 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2600 }
2601
2602 }
2603 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2604
2605 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2606 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2607 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2608 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2609
2610 int sampleNo_1;
2611 int numSamples_1 = arg_1_Z.getNumSamples();
2612 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2613 int offset_0 = tmp_0->getPointOffset(0,0);
2614 const double *ptr_src = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2615 double ptr_0 = ptr_src[0];
2616 int size = size1*numDataPointsPerSample_1;
2617 res.requireWrite();
2618 #pragma omp parallel for private(sampleNo_1) schedule(static)
2619 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2620 // for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2621 int offset_1 = tmp_1->getPointOffset(sampleNo_1,0);
2622 int offset_2 = tmp_2->getPointOffset(sampleNo_1,0);
2623 // const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2624 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2625 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2626 tensor_binary_operation(size, ptr_0, ptr_1, ptr_2, operation);
2627
2628 // }
2629 }
2630
2631 }
2632 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2633
2634 // Borrow DataTagged input from Data object
2635 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2636
2637 // Prepare the DataConstant input
2638 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2639
2640 // Prepare a DataTagged output 2
2641 res = Data(0.0, shape1, arg_0_Z.getFunctionSpace()); // DataTagged output
2642 res.tag();
2643 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2644
2645 // Prepare offset into DataConstant
2646 int offset_1 = tmp_1->getPointOffset(0,0);
2647 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2648
2649 // Get the pointers to the actual data
2650 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2651 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2652
2653
2654 // Compute a result for the default
2655 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2656 // Compute a result for each tag
2657 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2658 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2659 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2660 tmp_2->addTag(i->first);
2661 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2662 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2663
2664 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2665 }
2666
2667 }
2668 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2669
2670 // Borrow DataTagged input from Data object
2671 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2672
2673 // Borrow DataTagged input from Data object
2674 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2675
2676 // Prepare a DataTagged output 2
2677 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());
2678 res.tag(); // DataTagged output
2679 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2680
2681 // Get the pointers to the actual data
2682 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2683 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2684 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2685
2686 // Compute a result for the default
2687 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2688 // Merge the tags
2689 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2690 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2691 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2692 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2693 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2694 }
2695 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2696 tmp_2->addTag(i->first);
2697 }
2698 // Compute a result for each tag
2699 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2700 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2701 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2702 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2703 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2704
2705 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2706 }
2707
2708 }
2709 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2710
2711 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2712 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2713 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2714 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2715 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2716
2717 int sampleNo_0,dataPointNo_0;
2718 int numSamples_0 = arg_0_Z.getNumSamples();
2719 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2720 res.requireWrite();
2721 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2722 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2723 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2724 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2725 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2726 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2727 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2728 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2729 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2730 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2731 }
2732 }
2733
2734 }
2735 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2736 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2737 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2738 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2739 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2740
2741 int sampleNo_0,dataPointNo_0;
2742 int numSamples_0 = arg_0_Z.getNumSamples();
2743 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2744 int offset_1 = tmp_1->getPointOffset(0,0);
2745 res.requireWrite();
2746 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2747 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2748 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2749 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2750 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2751 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2752 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2753 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2754 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2755 }
2756 }
2757
2758
2759 }
2760 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2761
2762 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2763 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2764 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2765 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2766 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2767
2768 int sampleNo_0,dataPointNo_0;
2769 int numSamples_0 = arg_0_Z.getNumSamples();
2770 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2771 res.requireWrite();
2772 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2773 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2774 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2775 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2776 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2777 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2778 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2779 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2780 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2781 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2782 }
2783 }
2784
2785 }
2786 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2787
2788 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2789 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2790 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2791 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2792 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2793
2794 int sampleNo_0,dataPointNo_0;
2795 int numSamples_0 = arg_0_Z.getNumSamples();
2796 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2797 res.requireWrite();
2798 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2799 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2800 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2801 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2802 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2803 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2804 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2805 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2806 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2807 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2808 }
2809 }
2810
2811 }
2812 else {
2813 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2814 }
2815
2816 } else if (0 == rank1) {
2817 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2818 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2819 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2820 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2821 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2822 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2823 }
2824 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2825
2826 // Prepare the DataConstant input
2827 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2828
2829 // Borrow DataTagged input from Data object
2830 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2831
2832 // Prepare a DataTagged output 2
2833 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2834 res.tag();
2835 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2836
2837 // Prepare offset into DataConstant
2838 int offset_0 = tmp_0->getPointOffset(0,0);
2839 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2840
2841 //Get the pointers to the actual data
2842 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2843 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2844
2845 // Compute a result for the default
2846 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2847 // Compute a result for each tag
2848 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2849 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2850 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2851 tmp_2->addTag(i->first);
2852 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2853 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2854 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2855 }
2856 }
2857 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2858
2859 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2860 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2861 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2862 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2863
2864 int sampleNo_1,dataPointNo_1;
2865 int numSamples_1 = arg_1_Z.getNumSamples();
2866 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2867 int offset_0 = tmp_0->getPointOffset(0,0);
2868 res.requireWrite();
2869 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2870 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2871 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2872 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2873 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2874 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2875 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2876 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2877 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2878 }
2879 }
2880
2881 }
2882 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2883
2884 // Borrow DataTagged input from Data object
2885 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2886
2887 // Prepare the DataConstant input
2888 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2889
2890 // Prepare a DataTagged output 2
2891 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2892 res.tag();
2893 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2894
2895 // Prepare offset into DataConstant
2896 int offset_1 = tmp_1->getPointOffset(0,0);
2897 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2898 // Get the pointers to the actual data
2899 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2900 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2901 // Compute a result for the default
2902 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2903 // Compute a result for each tag
2904 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2905 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2906 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2907 tmp_2->addTag(i->first);
2908 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2909 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2910 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2911 }
2912
2913 }
2914 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2915
2916 // Borrow DataTagged input from Data object
2917 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2918
2919 // Borrow DataTagged input from Data object
2920 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2921
2922 // Prepare a DataTagged output 2
2923 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2924 res.tag(); // DataTagged output
2925 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2926
2927 // Get the pointers to the actual data
2928 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2929 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2930 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2931
2932 // Compute a result for the default
2933 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2934 // Merge the tags
2935 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2936 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2937 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2938 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2939 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2940 }
2941 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2942 tmp_2->addTag(i->first);
2943 }
2944 // Compute a result for each tag
2945 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2946 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2947 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2948 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2949 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2950 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2951 }
2952
2953 }
2954 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2955
2956 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2957 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2958 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2959 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2960 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2961
2962 int sampleNo_0,dataPointNo_0;
2963 int numSamples_0 = arg_0_Z.getNumSamples();
2964 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2965 res.requireWrite();
2966 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2967 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2968 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2969 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2970 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2971 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2972 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2973 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2974 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2975 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2976 }
2977 }
2978
2979 }
2980 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2981 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2982 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2983 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2984 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2985
2986 int sampleNo_0;
2987 int numSamples_0 = arg_0_Z.getNumSamples();
2988 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2989 int offset_1 = tmp_1->getPointOffset(0,0);
2990 const double *ptr_src = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2991 double ptr_1 = ptr_src[0];
2992 int size = size0 * numDataPointsPerSample_0;
2993 res.requireWrite();
2994 #pragma omp parallel for private(sampleNo_0) schedule(static)
2995 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2996 // for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2997 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0);
2998 int offset_2 = tmp_2->getPointOffset(sampleNo_0,0);
2999 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3000 // const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3001 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3002 tensor_binary_operation(size, ptr_0, ptr_1, ptr_2, operation);
3003 // }
3004 }
3005
3006
3007 }
3008 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
3009
3010 // After finding a common function space above the two inputs have the same numSamples and num DPPS
3011 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3012 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3013 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
3014 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3015
3016 int sampleNo_0,dataPointNo_0;
3017 int numSamples_0 = arg_0_Z.getNumSamples();
3018 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3019 res.requireWrite();
3020 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3021 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3022 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
3023 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3024 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3025 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
3026 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3027 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3028 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3029 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3030 }
3031 }
3032
3033 }
3034 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
3035
3036 // After finding a common function space above the two inputs have the same numSamples and num DPPS
3037 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3038 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3039 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
3040 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3041
3042 int sampleNo_0,dataPointNo_0;
3043 int numSamples_0 = arg_0_Z.getNumSamples();
3044 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3045 res.requireWrite();
3046 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3047 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3048 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3049 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
3050 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
3051 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3052 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3053 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3054 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3055 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3056 }
3057 }
3058
3059 }
3060 else {
3061 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
3062 }
3063
3064 } else {
3065 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible shapes");
3066 }
3067
3068 return res;
3069 }
3070
3071 template <typename UnaryFunction>
3072 Data
3073 C_TensorUnaryOperation(Data const &arg_0,
3074 UnaryFunction operation)
3075 {
3076 if (arg_0.isEmpty()) // do this before we attempt to interpolate
3077 {
3078 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
3079 }
3080 if (arg_0.isLazy())
3081 {
3082 throw DataException("Error - Operations not permitted on lazy data.");
3083 }
3084 // Interpolate if necessary and find an appropriate function space
3085 Data arg_0_Z = Data(arg_0);
3086
3087 // Get rank and shape of inputs
3088 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
3089 int size0 = arg_0_Z.getDataPointSize();
3090
3091 // Declare output Data object
3092 Data res;
3093
3094 if (arg_0_Z.isConstant()) {
3095 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataConstant output
3096 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
3097 double *ptr_2 = &(res.getDataAtOffsetRW(0));
3098 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
3099 }
3100 else if (arg_0_Z.isTagged()) {
3101
3102 // Borrow DataTagged input from Data object
3103 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
3104
3105 // Prepare a DataTagged output 2
3106 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
3107 res.tag();
3108 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
3109
3110 // Get the pointers to the actual data
3111 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
3112 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
3113 // Compute a result for the default
3114 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
3115 // Compute a result for each tag
3116 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
3117 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
3118 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
3119 tmp_2->addTag(i->first);
3120 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
3121 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
3122 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
3123 }
3124
3125 }
3126 else if (arg_0_Z.isExpanded()) {
3127
3128 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace(),true); // DataExpanded output
3129 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3130 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3131
3132 int sampleNo_0,dataPointNo_0;
3133 int numSamples_0 = arg_0_Z.getNumSamples();
3134 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3135 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3136 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3137 dataPointNo_0=0;
3138 // for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3139 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
3140 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3141 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3142 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3143 tensor_unary_operation(size0*numDataPointsPerSample_0, ptr_0, ptr_2, operation);
3144 // }
3145 }
3146 }
3147 else {
3148 throw DataException("Error - C_TensorUnaryOperation: unknown combination of inputs");
3149 }
3150
3151 return res;
3152 }
3153
3154 }
3155 #endif

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26