/[escript]/branches/subworld2/escriptcore/src/Data.h
ViewVC logotype

Contents of /branches/subworld2/escriptcore/src/Data.h

Parent Directory Parent Directory | Revision Log Revision Log


Revision 5504 - (show annotations)
Wed Mar 4 22:58:13 2015 UTC (4 years, 1 month ago) by jfenwick
File MIME type: text/plain
File size: 98674 byte(s)
Again with a more up to date copy


1
2 /*****************************************************************************
3 *
4 * Copyright (c) 2003-2015 by University of Queensland
5 * http://www.uq.edu.au
6 *
7 * Primary Business: Queensland, Australia
8 * Licensed under the Open Software License version 3.0
9 * http://www.opensource.org/licenses/osl-3.0.php
10 *
11 * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12 * Development 2012-2013 by School of Earth Sciences
13 * Development from 2014 by Centre for Geoscience Computing (GeoComp)
14 *
15 *****************************************************************************/
16
17
18 /** \file Data.h */
19
20 #ifndef DATA_H
21 #define DATA_H
22 #include "system_dep.h"
23
24 #include "DataTypes.h"
25 #include "DataAbstract.h"
26 #include "DataAlgorithm.h"
27 #include "FunctionSpace.h"
28 #include "BinaryOp.h"
29 #include "UnaryOp.h"
30 #include "DataException.h"
31
32 #ifdef _OPENMP
33 #include <omp.h>
34 #endif
35
36 #include "esysUtils/Esys_MPI.h"
37 #include <string>
38 #include <algorithm>
39 #include <sstream>
40
41 #include <boost/shared_ptr.hpp>
42 #include <boost/python/object.hpp>
43 #include <boost/python/tuple.hpp>
44
45 namespace escript {
46
47 //
48 // Forward declaration for various implementations of Data.
49 class DataConstant;
50 class DataTagged;
51 class DataExpanded;
52 class DataLazy;
53
54 /**
55 \brief
56 Data represents a collection of datapoints.
57
58 Description:
59 Internally, the datapoints are actually stored by a DataAbstract object.
60 The specific instance of DataAbstract used may vary over the lifetime
61 of the Data object.
62 Some methods on this class return references (eg getShape()).
63 These references should not be used after an operation which changes the underlying DataAbstract object.
64 Doing so will lead to invalid memory access.
65 This should not affect any methods exposed via boost::python.
66 */
67 class Data {
68
69 public:
70
71 // These typedefs allow function names to be cast to pointers
72 // to functions of the appropriate type when calling unaryOp etc.
73 typedef double (*UnaryDFunPtr)(double);
74 typedef double (*BinaryDFunPtr)(double,double);
75
76
77 /**
78 Constructors.
79 */
80
81 /**
82 \brief
83 Default constructor.
84 Creates a DataEmpty object.
85 */
86 ESCRIPT_DLL_API
87 Data();
88
89 /**
90 \brief
91 Copy constructor.
92 WARNING: Only performs a shallow copy.
93 */
94 ESCRIPT_DLL_API
95 Data(const Data& inData);
96
97 /**
98 \brief
99 Constructor from another Data object. If "what" is different from the
100 function space of inData the inData are tried to be interpolated to what,
101 otherwise a shallow copy of inData is returned.
102 */
103 ESCRIPT_DLL_API
104 Data(const Data& inData,
105 const FunctionSpace& what);
106
107 /**
108 \brief Copy Data from an existing vector
109 */
110
111 ESCRIPT_DLL_API
112 Data(const DataTypes::ValueType& value,
113 const DataTypes::ShapeType& shape,
114 const FunctionSpace& what=FunctionSpace(),
115 bool expanded=false);
116
117 /**
118 \brief
119 Constructor which creates a Data with points having the specified shape.
120
121 \param value - Input - Single value applied to all Data.
122 \param dataPointShape - Input - The shape of each data point.
123 \param what - Input - A description of what this data represents.
124 \param expanded - Input - Flag, if true fill the entire container with
125 the given value. Otherwise a more efficient storage
126 mechanism will be used.
127 */
128 ESCRIPT_DLL_API
129 Data(double value,
130 const DataTypes::ShapeType& dataPointShape=DataTypes::ShapeType(),
131 const FunctionSpace& what=FunctionSpace(),
132 bool expanded=false);
133
134 /**
135 \brief
136 Constructor which performs a deep copy of a region from another Data object.
137
138 \param inData - Input - Input Data object.
139 \param region - Input - Region to copy.
140 */
141 ESCRIPT_DLL_API
142 Data(const Data& inData,
143 const DataTypes::RegionType& region);
144
145 /**
146 \brief
147 Constructor which copies data from any object that can be treated like a python array/sequence.
148
149 \param value - Input - Input data.
150 \param what - Input - A description of what this data represents.
151 \param expanded - Input - Flag, if true fill the entire container with
152 the value. Otherwise a more efficient storage
153 mechanism will be used.
154 */
155 ESCRIPT_DLL_API
156 Data(const boost::python::object& value,
157 const FunctionSpace& what=FunctionSpace(),
158 bool expanded=false);
159
160 /**
161 \brief
162 Constructor which copies data from a wrapped array.
163
164 \param w - Input - Input data.
165 \param what - Input - A description of what this data represents.
166 \param expanded - Input - Flag, if true fill the entire container with
167 the value. Otherwise a more efficient storage
168 mechanism will be used.
169 */
170 ESCRIPT_DLL_API
171 Data(const WrappedArray& w, const FunctionSpace& what,
172 bool expanded=false);
173
174
175 /**
176 \brief
177 Constructor which creates a DataConstant.
178 Copies data from any object that can be treated like a python array/sequence.
179 All other parameters are copied from other.
180
181 \param value - Input - Input data.
182 \param other - Input - contains all other parameters.
183 */
184 ESCRIPT_DLL_API
185 Data(const boost::python::object& value,
186 const Data& other);
187
188 /**
189 \brief
190 Constructor which creates a DataConstant of "shape" with constant value.
191 */
192 ESCRIPT_DLL_API
193 Data(double value,
194 const boost::python::tuple& shape=boost::python::make_tuple(),
195 const FunctionSpace& what=FunctionSpace(),
196 bool expanded=false);
197
198 /**
199 \brief Create a Data using an existing DataAbstract. Warning: The new object assumes ownership of the pointer!
200 Once you have passed the pointer, do not delete it.
201 */
202 ESCRIPT_DLL_API
203 explicit Data(DataAbstract* underlyingdata);
204
205 /**
206 \brief Create a Data based on the supplied DataAbstract
207 */
208 ESCRIPT_DLL_API
209 explicit Data(DataAbstract_ptr underlyingdata);
210
211 /**
212 \brief
213 Destructor
214 */
215 ESCRIPT_DLL_API
216 ~Data();
217
218 /**
219 \brief Make this object a deep copy of "other".
220 */
221 ESCRIPT_DLL_API
222 void
223 copy(const Data& other);
224
225 /**
226 \brief Return a pointer to a deep copy of this object.
227 */
228 ESCRIPT_DLL_API
229 Data
230 copySelf();
231
232
233 /**
234 \brief produce a delayed evaluation version of this Data.
235 */
236 ESCRIPT_DLL_API
237 Data
238 delay();
239
240 /**
241 \brief convert the current data into lazy data.
242 */
243 ESCRIPT_DLL_API
244 void
245 delaySelf();
246
247
248 /**
249 Member access methods.
250 */
251
252 /**
253 \brief
254 switches on update protection
255
256 */
257 ESCRIPT_DLL_API
258 void
259 setProtection();
260
261 /**
262 \brief
263 Returns true, if the data object is protected against update
264
265 */
266 ESCRIPT_DLL_API
267 bool
268 isProtected() const;
269
270
271 /**
272 \brief
273 Return the value of a data point as a python tuple.
274 */
275 ESCRIPT_DLL_API
276 const boost::python::object
277 getValueOfDataPointAsTuple(int dataPointNo);
278
279 /**
280 \brief
281 sets the values of a data-point from a python object on this process
282 */
283 ESCRIPT_DLL_API
284 void
285 setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object);
286
287 /**
288 \brief
289 sets the values of a data-point from a array-like object on this process
290 */
291 ESCRIPT_DLL_API
292 void
293 setValueOfDataPointToArray(int dataPointNo, const boost::python::object&);
294
295 /**
296 \brief
297 sets the values of a data-point on this process
298 */
299 ESCRIPT_DLL_API
300 void
301 setValueOfDataPoint(int dataPointNo, const double);
302
303 /**
304 \brief Return a data point across all processors as a python tuple.
305 */
306 ESCRIPT_DLL_API
307 const boost::python::object
308 getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo);
309
310
311 /**
312 \brief Set the value of a global data point
313 */
314 ESCRIPT_DLL_API
315 void
316 setTupleForGlobalDataPoint(int id, int proc, boost::python::object);
317
318 /**
319 \brief
320 Return the tag number associated with the given data-point.
321
322 */
323 ESCRIPT_DLL_API
324 int
325 getTagNumber(int dpno);
326
327
328 /**
329 \brief
330 Write the data as a string. For large amounts of data, a summary is printed.
331 */
332 ESCRIPT_DLL_API
333 std::string
334 toString() const;
335
336 /**
337 \brief
338 Whatever the current Data type make this into a DataExpanded.
339 */
340 ESCRIPT_DLL_API
341 void
342 expand();
343
344 /**
345 \brief
346 If possible convert this Data to DataTagged. This will only allow
347 Constant data to be converted to tagged. An attempt to convert
348 Expanded data to tagged will throw an exception.
349 */
350 ESCRIPT_DLL_API
351 void
352 tag();
353
354 /**
355 \brief If this data is lazy, then convert it to ready data.
356 What type of ready data depends on the expression. For example, Constant+Tagged==Tagged.
357 */
358 ESCRIPT_DLL_API
359 void
360 resolve();
361
362 /**
363 \brief returns return true if data contains NaN.
364 \warning This is dependent on the ability to reliably detect NaNs on your compiler.
365 See the nancheck function in LocalOps for details.
366 */
367 ESCRIPT_DLL_API
368 bool
369 hasNaN();
370
371 /**
372 \brief replaces all NaN values with value
373 */
374 ESCRIPT_DLL_API
375 void
376 replaceNaN(double value);
377
378 /**
379 \brief Ensures data is ready for write access.
380 This means that the data will be resolved if lazy and will be copied if shared with another Data object.
381 \warning This method should only be called in single threaded sections of code. (It modifies m_data).
382 Do not create any Data objects from this one between calling requireWrite and getSampleDataRW.
383 Doing so might introduce additional sharing.
384 */
385 ESCRIPT_DLL_API
386 void
387 requireWrite();
388
389 /**
390 \brief
391 Return true if this Data is expanded.
392 \note To determine if a sample will contain separate values for each datapoint. Use actsExpanded instead.
393 */
394 ESCRIPT_DLL_API
395 bool
396 isExpanded() const;
397
398 /**
399 \brief
400 Return true if this Data is expanded or resolves to expanded.
401 That is, if it has a separate value for each datapoint in the sample.
402 */
403 ESCRIPT_DLL_API
404 bool
405 actsExpanded() const;
406
407
408 /**
409 \brief
410 Return true if this Data is tagged.
411 */
412 ESCRIPT_DLL_API
413 bool
414 isTagged() const;
415
416 /**
417 \brief
418 Return true if this Data is constant.
419 */
420 ESCRIPT_DLL_API
421 bool
422 isConstant() const;
423
424 /**
425 \brief Return true if this Data is lazy.
426 */
427 ESCRIPT_DLL_API
428 bool
429 isLazy() const;
430
431 /**
432 \brief Return true if this data is ready.
433 */
434 ESCRIPT_DLL_API
435 bool
436 isReady() const;
437
438 /**
439 \brief
440 Return true if this Data holds an instance of DataEmpty. This is _not_ the same as asking if the object
441 contains datapoints.
442 */
443 ESCRIPT_DLL_API
444 bool
445 isEmpty() const;
446
447 /**
448 \brief
449 Return the function space.
450 */
451 ESCRIPT_DLL_API
452 inline
453 const FunctionSpace&
454 getFunctionSpace() const
455 {
456 return m_data->getFunctionSpace();
457 }
458
459 /**
460 \brief
461 Return the domain.
462 */
463 ESCRIPT_DLL_API
464 inline
465 // const AbstractDomain&
466 const_Domain_ptr
467 getDomain() const
468 {
469 return getFunctionSpace().getDomain();
470 }
471
472
473 /**
474 \brief
475 Return the domain.
476 TODO: For internal use only. This should be removed.
477 */
478 ESCRIPT_DLL_API
479 inline
480 // const AbstractDomain&
481 Domain_ptr
482 getDomainPython() const
483 {
484 return getFunctionSpace().getDomainPython();
485 }
486
487 /**
488 \brief
489 Return the rank of the point data.
490 */
491 ESCRIPT_DLL_API
492 inline
493 unsigned int
494 getDataPointRank() const
495 {
496 return m_data->getRank();
497 }
498
499 /**
500 \brief
501 Return the number of data points
502 */
503 ESCRIPT_DLL_API
504 inline
505 int
506 getNumDataPoints() const
507 {
508 return getNumSamples() * getNumDataPointsPerSample();
509 }
510 /**
511 \brief
512 Return the number of samples.
513 */
514 ESCRIPT_DLL_API
515 inline
516 int
517 getNumSamples() const
518 {
519 return m_data->getNumSamples();
520 }
521
522 /**
523 \brief
524 Return the number of data points per sample.
525 */
526 ESCRIPT_DLL_API
527 inline
528 int
529 getNumDataPointsPerSample() const
530 {
531 return m_data->getNumDPPSample();
532 }
533
534 /**
535 \brief
536 Returns true if the number of data points per sample and the number of
537 samples match the respective argument. DataEmpty always returns true.
538 */
539 ESCRIPT_DLL_API
540 inline
541 bool numSamplesEqual(int numDataPointsPerSample, int numSamples) const
542 {
543 return (isEmpty() ||
544 (numDataPointsPerSample==getNumDataPointsPerSample() && numSamples==getNumSamples()));
545 }
546
547 /**
548 \brief
549 Returns true if the shape matches the vector (dimensions[0],...,
550 dimensions[rank-1]). DataEmpty always returns true.
551 */
552 inline
553 bool isDataPointShapeEqual(int rank, const int* dimensions) const
554 {
555 if (isEmpty())
556 return true;
557 const DataTypes::ShapeType givenShape(&dimensions[0],&dimensions[rank]);
558 return (getDataPointShape()==givenShape);
559 }
560
561 /**
562 \brief
563 Return the number of values in the shape for this object.
564 */
565 ESCRIPT_DLL_API
566 int
567 getNoValues() const
568 {
569 return m_data->getNoValues();
570 }
571
572
573 /**
574 \brief
575 dumps the object into a netCDF file
576 */
577 ESCRIPT_DLL_API
578 void
579 dump(const std::string fileName) const;
580
581 /**
582 \brief returns the values of the object as a list of tuples (one for each datapoint).
583
584 \param scalarastuple If true, scalar data will produce single valued tuples [(1,) (2,) ...]
585 If false, the result is a list of scalars [1, 2, ...]
586 */
587 ESCRIPT_DLL_API
588 const boost::python::object
589 toListOfTuples(bool scalarastuple=true);
590
591
592 /**
593 \brief
594 Return the sample data for the given sample no.
595 Please do not use this unless you NEED to access samples individually
596 \param sampleNo - Input - the given sample no.
597 \return pointer to the sample data.
598 */
599 ESCRIPT_DLL_API
600 inline
601 const DataAbstract::ValueType::value_type*
602 getSampleDataRO(DataAbstract::ValueType::size_type sampleNo) const;
603
604
605 /**
606 \brief
607 Return the sample data for the given sample no.
608 Please do not use this unless you NEED to access samples individually
609 \param sampleNo - Input - the given sample no.
610 \return pointer to the sample data.
611 */
612 ESCRIPT_DLL_API
613 inline
614 DataAbstract::ValueType::value_type*
615 getSampleDataRW(DataAbstract::ValueType::size_type sampleNo);
616
617
618 /**
619 \brief
620 Return a pointer to the beginning of the underlying data
621 \warning please avoid using this method since it by-passes possible lazy improvements. May be removed without notice.
622 \return pointer to the data.
623 */
624 ESCRIPT_DLL_API
625 inline
626 const DataAbstract::ValueType::value_type*
627 getDataRO() const;
628
629 /**
630 \brief
631 Return the sample data for the given tag. If an attempt is made to
632 access data that isn't tagged an exception will be thrown.
633 \param tag - Input - the tag key.
634 */
635 ESCRIPT_DLL_API
636 inline
637 DataAbstract::ValueType::value_type*
638 getSampleDataByTag(int tag)
639 {
640 return m_data->getSampleDataByTag(tag);
641 }
642
643 /**
644 \brief
645 Return a reference into the DataVector which points to the specified data point.
646 \param sampleNo - Input -
647 \param dataPointNo - Input -
648 */
649 ESCRIPT_DLL_API
650 DataTypes::ValueType::const_reference
651 getDataPointRO(int sampleNo, int dataPointNo);
652
653 /**
654 \brief
655 Return a reference into the DataVector which points to the specified data point.
656 \param sampleNo - Input -
657 \param dataPointNo - Input -
658 */
659 ESCRIPT_DLL_API
660 DataTypes::ValueType::reference
661 getDataPointRW(int sampleNo, int dataPointNo);
662
663
664
665 /**
666 \brief
667 Return the offset for the given sample and point within the sample
668 */
669 ESCRIPT_DLL_API
670 inline
671 DataTypes::ValueType::size_type
672 getDataOffset(int sampleNo,
673 int dataPointNo)
674 {
675 return m_data->getPointOffset(sampleNo,dataPointNo);
676 }
677
678 /**
679 \brief
680 Return a reference to the data point shape.
681 */
682 ESCRIPT_DLL_API
683 inline
684 const DataTypes::ShapeType&
685 getDataPointShape() const
686 {
687 return m_data->getShape();
688 }
689
690 /**
691 \brief
692 Return the data point shape as a tuple of integers.
693 */
694 ESCRIPT_DLL_API
695 const boost::python::tuple
696 getShapeTuple() const;
697
698 /**
699 \brief
700 Return the size of the data point. It is the product of the
701 data point shape dimensions.
702 */
703 ESCRIPT_DLL_API
704 int
705 getDataPointSize() const;
706
707 /**
708 \brief
709 Return the number of doubles stored for this Data.
710 */
711 ESCRIPT_DLL_API
712 DataTypes::ValueType::size_type
713 getLength() const;
714
715 /**
716 \brief Return true if this object contains no samples.
717 This is not the same as isEmpty()
718 */
719 ESCRIPT_DLL_API
720 bool
721 hasNoSamples() const
722 {
723 return getLength()==0;
724 }
725
726 /**
727 \brief
728 Assign the given value to the tag assocciated with name. Implicitly converts this
729 object to type DataTagged. Throws an exception if this object
730 cannot be converted to a DataTagged object or name cannot be mapped onto a tag key.
731 \param name - Input - name of tag.
732 \param value - Input - Value to associate with given key.
733 */
734 ESCRIPT_DLL_API
735 void
736 setTaggedValueByName(std::string name,
737 const boost::python::object& value);
738
739 /**
740 \brief
741 Assign the given value to the tag. Implicitly converts this
742 object to type DataTagged if it is constant.
743
744 \param tagKey - Input - Integer key.
745 \param value - Input - Value to associate with given key.
746 ==>*
747 */
748 ESCRIPT_DLL_API
749 void
750 setTaggedValue(int tagKey,
751 const boost::python::object& value);
752
753 /**
754 \brief
755 Assign the given value to the tag. Implicitly converts this
756 object to type DataTagged if it is constant.
757
758 \param tagKey - Input - Integer key.
759 \param pointshape - Input - The shape of the value parameter
760 \param value - Input - Value to associate with given key.
761 \param dataOffset - Input - Offset of the begining of the point within the value parameter
762 */
763 ESCRIPT_DLL_API
764 void
765 setTaggedValueFromCPP(int tagKey,
766 const DataTypes::ShapeType& pointshape,
767 const DataTypes::ValueType& value,
768 int dataOffset=0);
769
770
771
772 /**
773 \brief
774 Copy other Data object into this Data object where mask is positive.
775 */
776 ESCRIPT_DLL_API
777 void
778 copyWithMask(const Data& other,
779 const Data& mask);
780
781 /**
782 Data object operation methods and operators.
783 */
784
785 /**
786 \brief
787 set all values to zero
788 *
789 */
790 ESCRIPT_DLL_API
791 void
792 setToZero();
793
794 /**
795 \brief
796 Interpolates this onto the given functionspace and returns
797 the result as a Data object.
798 *
799 */
800 ESCRIPT_DLL_API
801 Data
802 interpolate(const FunctionSpace& functionspace) const;
803
804 ESCRIPT_DLL_API
805 Data
806 interpolateFromTable3D(const WrappedArray& table, double Amin, double Astep,
807 double undef, Data& B, double Bmin, double Bstep, Data& C,
808 double Cmin, double Cstep, bool check_boundaries);
809
810 ESCRIPT_DLL_API
811 Data
812 interpolateFromTable2D(const WrappedArray& table, double Amin, double Astep,
813 double undef, Data& B, double Bmin, double Bstep,bool check_boundaries);
814
815 ESCRIPT_DLL_API
816 Data
817 interpolateFromTable1D(const WrappedArray& table, double Amin, double Astep,
818 double undef,bool check_boundaries);
819
820
821 ESCRIPT_DLL_API
822 Data
823 interpolateFromTable3DP(boost::python::object table, double Amin, double Astep,
824 Data& B, double Bmin, double Bstep, Data& C, double Cmin, double Cstep, double undef,bool check_boundaries);
825
826
827 ESCRIPT_DLL_API
828 Data
829 interpolateFromTable2DP(boost::python::object table, double Amin, double Astep,
830 Data& B, double Bmin, double Bstep, double undef,bool check_boundaries);
831
832 ESCRIPT_DLL_API
833 Data
834 interpolateFromTable1DP(boost::python::object table, double Amin, double Astep,
835 double undef,bool check_boundaries);
836
837 ESCRIPT_DLL_API
838 Data
839 nonuniforminterp(boost::python::object in, boost::python::object out, bool check_boundaries);
840
841 ESCRIPT_DLL_API
842 Data
843 nonuniformslope(boost::python::object in, boost::python::object out, bool check_boundaries);
844
845 /**
846 \brief
847 Calculates the gradient of the data at the data points of functionspace.
848 If functionspace is not present the function space of Function(getDomain()) is used.
849 *
850 */
851 ESCRIPT_DLL_API
852 Data
853 gradOn(const FunctionSpace& functionspace) const;
854
855 ESCRIPT_DLL_API
856 Data
857 grad() const;
858
859 /**
860 \brief
861 Calculate the integral over the function space domain as a python tuple.
862 */
863 ESCRIPT_DLL_API
864 boost::python::object
865 integrateToTuple_const() const;
866
867
868 /**
869 \brief
870 Calculate the integral over the function space domain as a python tuple.
871 */
872 ESCRIPT_DLL_API
873 boost::python::object
874 integrateToTuple();
875
876
877
878 /**
879 \brief
880 Returns 1./ Data object
881 *
882 */
883 ESCRIPT_DLL_API
884 Data
885 oneOver() const;
886 /**
887 \brief
888 Return a Data with a 1 for +ive values and a 0 for 0 or -ive values.
889 *
890 */
891 ESCRIPT_DLL_API
892 Data
893 wherePositive() const;
894
895 /**
896 \brief
897 Return a Data with a 1 for -ive values and a 0 for +ive or 0 values.
898 *
899 */
900 ESCRIPT_DLL_API
901 Data
902 whereNegative() const;
903
904 /**
905 \brief
906 Return a Data with a 1 for +ive or 0 values and a 0 for -ive values.
907 *
908 */
909 ESCRIPT_DLL_API
910 Data
911 whereNonNegative() const;
912
913 /**
914 \brief
915 Return a Data with a 1 for -ive or 0 values and a 0 for +ive values.
916 *
917 */
918 ESCRIPT_DLL_API
919 Data
920 whereNonPositive() const;
921
922 /**
923 \brief
924 Return a Data with a 1 for 0 values and a 0 for +ive or -ive values.
925 *
926 */
927 ESCRIPT_DLL_API
928 Data
929 whereZero(double tol=0.0) const;
930
931 /**
932 \brief
933 Return a Data with a 0 for 0 values and a 1 for +ive or -ive values.
934 *
935 */
936 ESCRIPT_DLL_API
937 Data
938 whereNonZero(double tol=0.0) const;
939
940 /**
941 \brief
942 Return the maximum absolute value of this Data object.
943
944 The method is not const because lazy data needs to be expanded before Lsup can be computed.
945 The _const form can be used when the Data object is const, however this will only work for
946 Data which is not Lazy.
947
948 For Data which contain no samples (or tagged Data for which no tags in use have a value)
949 zero is returned.
950 */
951 ESCRIPT_DLL_API
952 double
953 Lsup();
954
955 ESCRIPT_DLL_API
956 double
957 Lsup_const() const;
958
959
960 /**
961 \brief
962 Return the maximum value of this Data object.
963
964 The method is not const because lazy data needs to be expanded before sup can be computed.
965 The _const form can be used when the Data object is const, however this will only work for
966 Data which is not Lazy.
967
968 For Data which contain no samples (or tagged Data for which no tags in use have a value)
969 a large negative value is returned.
970 */
971 ESCRIPT_DLL_API
972 double
973 sup();
974
975 ESCRIPT_DLL_API
976 double
977 sup_const() const;
978
979
980 /**
981 \brief
982 Return the minimum value of this Data object.
983
984 The method is not const because lazy data needs to be expanded before inf can be computed.
985 The _const form can be used when the Data object is const, however this will only work for
986 Data which is not Lazy.
987
988 For Data which contain no samples (or tagged Data for which no tags in use have a value)
989 a large positive value is returned.
990 */
991 ESCRIPT_DLL_API
992 double
993 inf();
994
995 ESCRIPT_DLL_API
996 double
997 inf_const() const;
998
999
1000
1001 /**
1002 \brief
1003 Return the absolute value of each data point of this Data object.
1004 *
1005 */
1006 ESCRIPT_DLL_API
1007 Data
1008 abs() const;
1009
1010 /**
1011 \brief
1012 Return the maximum value of each data point of this Data object.
1013 *
1014 */
1015 ESCRIPT_DLL_API
1016 Data
1017 maxval() const;
1018
1019 /**
1020 \brief
1021 Return the minimum value of each data point of this Data object.
1022 *
1023 */
1024 ESCRIPT_DLL_API
1025 Data
1026 minval() const;
1027
1028 /**
1029 \brief
1030 Return the (sample number, data-point number) of the data point with
1031 the minimum component value in this Data object.
1032 \note If you are working in python, please consider using Locator
1033 instead of manually manipulating process and point IDs.
1034 */
1035 ESCRIPT_DLL_API
1036 const boost::python::tuple
1037 minGlobalDataPoint() const;
1038
1039 /**
1040 \brief
1041 Return the (sample number, data-point number) of the data point with
1042 the minimum component value in this Data object.
1043 \note If you are working in python, please consider using Locator
1044 instead of manually manipulating process and point IDs.
1045 */
1046 ESCRIPT_DLL_API
1047 const boost::python::tuple
1048 maxGlobalDataPoint() const;
1049
1050
1051
1052 /**
1053 \brief
1054 Return the sign of each data point of this Data object.
1055 -1 for negative values, zero for zero values, 1 for positive values.
1056 *
1057 */
1058 ESCRIPT_DLL_API
1059 Data
1060 sign() const;
1061
1062 /**
1063 \brief
1064 Return the symmetric part of a matrix which is half the matrix plus its transpose.
1065 *
1066 */
1067 ESCRIPT_DLL_API
1068 Data
1069 symmetric() const;
1070
1071 /**
1072 \brief
1073 Return the nonsymmetric part of a matrix which is half the matrix minus its transpose.
1074 *
1075 */
1076 ESCRIPT_DLL_API
1077 Data
1078 nonsymmetric() const;
1079
1080 /**
1081 \brief
1082 Return the trace of a matrix
1083 *
1084 */
1085 ESCRIPT_DLL_API
1086 Data
1087 trace(int axis_offset) const;
1088
1089 /**
1090 \brief
1091 Transpose each data point of this Data object around the given axis.
1092 *
1093 */
1094 ESCRIPT_DLL_API
1095 Data
1096 transpose(int axis_offset) const;
1097
1098 /**
1099 \brief
1100 Return the eigenvalues of the symmetric part at each data point of this Data object in increasing values.
1101 Currently this function is restricted to rank 2, square shape, and dimension 3.
1102 *
1103 */
1104 ESCRIPT_DLL_API
1105 Data
1106 eigenvalues() const;
1107
1108 /**
1109 \brief
1110 Return the eigenvalues and corresponding eigenvcetors of the symmetric part at each data point of this Data object.
1111 the eigenvalues are ordered in increasing size where eigenvalues with relative difference less than
1112 tol are treated as equal. The eigenvectors are orthogonal, normalized and the sclaed such that the
1113 first non-zero entry is positive.
1114 Currently this function is restricted to rank 2, square shape, and dimension 3
1115 *
1116 */
1117 ESCRIPT_DLL_API
1118 const boost::python::tuple
1119 eigenvalues_and_eigenvectors(const double tol=1.e-12) const;
1120
1121 /**
1122 \brief
1123 swaps the components axis0 and axis1
1124 *
1125 */
1126 ESCRIPT_DLL_API
1127 Data
1128 swapaxes(const int axis0, const int axis1) const;
1129
1130 /**
1131 \brief
1132 Return the error function erf of each data point of this Data object.
1133 *
1134 */
1135 ESCRIPT_DLL_API
1136 Data
1137 erf() const;
1138
1139 /**
1140 \brief
1141 Return the sin of each data point of this Data object.
1142 *
1143 */
1144 ESCRIPT_DLL_API
1145 Data
1146 sin() const;
1147
1148 /**
1149 \brief
1150 Return the cos of each data point of this Data object.
1151 *
1152 */
1153 ESCRIPT_DLL_API
1154 Data
1155 cos() const;
1156
1157 /**
1158 \brief
1159 Return the tan of each data point of this Data object.
1160 *
1161 */
1162 ESCRIPT_DLL_API
1163 Data
1164 tan() const;
1165
1166 /**
1167 \brief
1168 Return the asin of each data point of this Data object.
1169 *
1170 */
1171 ESCRIPT_DLL_API
1172 Data
1173 asin() const;
1174
1175 /**
1176 \brief
1177 Return the acos of each data point of this Data object.
1178 *
1179 */
1180 ESCRIPT_DLL_API
1181 Data
1182 acos() const;
1183
1184 /**
1185 \brief
1186 Return the atan of each data point of this Data object.
1187 *
1188 */
1189 ESCRIPT_DLL_API
1190 Data
1191 atan() const;
1192
1193 /**
1194 \brief
1195 Return the sinh of each data point of this Data object.
1196 *
1197 */
1198 ESCRIPT_DLL_API
1199 Data
1200 sinh() const;
1201
1202 /**
1203 \brief
1204 Return the cosh of each data point of this Data object.
1205 *
1206 */
1207 ESCRIPT_DLL_API
1208 Data
1209 cosh() const;
1210
1211 /**
1212 \brief
1213 Return the tanh of each data point of this Data object.
1214 *
1215 */
1216 ESCRIPT_DLL_API
1217 Data
1218 tanh() const;
1219
1220 /**
1221 \brief
1222 Return the asinh of each data point of this Data object.
1223 *
1224 */
1225 ESCRIPT_DLL_API
1226 Data
1227 asinh() const;
1228
1229 /**
1230 \brief
1231 Return the acosh of each data point of this Data object.
1232 *
1233 */
1234 ESCRIPT_DLL_API
1235 Data
1236 acosh() const;
1237
1238 /**
1239 \brief
1240 Return the atanh of each data point of this Data object.
1241 *
1242 */
1243 ESCRIPT_DLL_API
1244 Data
1245 atanh() const;
1246
1247 /**
1248 \brief
1249 Return the log to base 10 of each data point of this Data object.
1250 *
1251 */
1252 ESCRIPT_DLL_API
1253 Data
1254 log10() const;
1255
1256 /**
1257 \brief
1258 Return the natural log of each data point of this Data object.
1259 *
1260 */
1261 ESCRIPT_DLL_API
1262 Data
1263 log() const;
1264
1265 /**
1266 \brief
1267 Return the exponential function of each data point of this Data object.
1268 *
1269 */
1270 ESCRIPT_DLL_API
1271 Data
1272 exp() const;
1273
1274 /**
1275 \brief
1276 Return the square root of each data point of this Data object.
1277 *
1278 */
1279 ESCRIPT_DLL_API
1280 Data
1281 sqrt() const;
1282
1283 /**
1284 \brief
1285 Return the negation of each data point of this Data object.
1286 *
1287 */
1288 ESCRIPT_DLL_API
1289 Data
1290 neg() const;
1291
1292 /**
1293 \brief
1294 Return the identity of each data point of this Data object.
1295 Simply returns this object unmodified.
1296 *
1297 */
1298 ESCRIPT_DLL_API
1299 Data
1300 pos() const;
1301
1302 /**
1303 \brief
1304 Return the given power of each data point of this Data object.
1305
1306 \param right Input - the power to raise the object to.
1307 *
1308 */
1309 ESCRIPT_DLL_API
1310 Data
1311 powD(const Data& right) const;
1312
1313 /**
1314 \brief
1315 Return the given power of each data point of this boost python object.
1316
1317 \param right Input - the power to raise the object to.
1318 *
1319 */
1320 ESCRIPT_DLL_API
1321 Data
1322 powO(const boost::python::object& right) const;
1323
1324 /**
1325 \brief
1326 Return the given power of each data point of this boost python object.
1327
1328 \param left Input - the bases
1329 *
1330 */
1331
1332 ESCRIPT_DLL_API
1333 Data
1334 rpowO(const boost::python::object& left) const;
1335
1336 /**
1337 \brief
1338 Overloaded operator +=
1339 \param right - Input - The right hand side.
1340 *
1341 */
1342 ESCRIPT_DLL_API
1343 Data& operator+=(const Data& right);
1344 ESCRIPT_DLL_API
1345 Data& operator+=(const boost::python::object& right);
1346
1347 ESCRIPT_DLL_API
1348 Data& operator=(const Data& other);
1349
1350 /**
1351 \brief
1352 Overloaded operator -=
1353 \param right - Input - The right hand side.
1354 *
1355 */
1356 ESCRIPT_DLL_API
1357 Data& operator-=(const Data& right);
1358 ESCRIPT_DLL_API
1359 Data& operator-=(const boost::python::object& right);
1360
1361 /**
1362 \brief
1363 Overloaded operator *=
1364 \param right - Input - The right hand side.
1365 *
1366 */
1367 ESCRIPT_DLL_API
1368 Data& operator*=(const Data& right);
1369 ESCRIPT_DLL_API
1370 Data& operator*=(const boost::python::object& right);
1371
1372 /**
1373 \brief
1374 Overloaded operator /=
1375 \param right - Input - The right hand side.
1376 *
1377 */
1378 ESCRIPT_DLL_API
1379 Data& operator/=(const Data& right);
1380 ESCRIPT_DLL_API
1381 Data& operator/=(const boost::python::object& right);
1382
1383 /**
1384 \brief
1385 Newer style division operator for python
1386 */
1387 ESCRIPT_DLL_API
1388 Data truedivD(const Data& right);
1389
1390 /**
1391 \brief
1392 Newer style division operator for python
1393 */
1394 ESCRIPT_DLL_API
1395 Data truedivO(const boost::python::object& right);
1396
1397 /**
1398 \brief
1399 Newer style division operator for python
1400 */
1401 ESCRIPT_DLL_API
1402 Data rtruedivO(const boost::python::object& left);
1403
1404 /**
1405 \brief
1406 wrapper for python add operation
1407 */
1408 ESCRIPT_DLL_API
1409 boost::python::object __add__(const boost::python::object& right);
1410
1411
1412 /**
1413 \brief
1414 wrapper for python subtract operation
1415 */
1416 ESCRIPT_DLL_API
1417 boost::python::object __sub__(const boost::python::object& right);
1418
1419 /**
1420 \brief
1421 wrapper for python reverse subtract operation
1422 */
1423 ESCRIPT_DLL_API
1424 boost::python::object __rsub__(const boost::python::object& right);
1425
1426 /**
1427 \brief
1428 wrapper for python multiply operation
1429 */
1430 ESCRIPT_DLL_API
1431 boost::python::object __mul__(const boost::python::object& right);
1432
1433 /**
1434 \brief
1435 wrapper for python divide operation
1436 */
1437 ESCRIPT_DLL_API
1438 boost::python::object __div__(const boost::python::object& right);
1439
1440 /**
1441 \brief
1442 wrapper for python reverse divide operation
1443 */
1444 ESCRIPT_DLL_API
1445 boost::python::object __rdiv__(const boost::python::object& right);
1446
1447 /**
1448 \brief return inverse of matricies.
1449 */
1450 ESCRIPT_DLL_API
1451 Data
1452 matrixInverse() const;
1453
1454 /**
1455 \brief
1456 Returns true if this can be interpolated to functionspace.
1457 */
1458 ESCRIPT_DLL_API
1459 bool
1460 probeInterpolation(const FunctionSpace& functionspace) const;
1461
1462 /**
1463 Data object slicing methods.
1464 */
1465
1466 /**
1467 \brief
1468 Returns a slice from this Data object.
1469
1470 /description
1471 Implements the [] get operator in python.
1472 Calls getSlice.
1473
1474 \param key - Input - python slice tuple specifying
1475 slice to return.
1476 */
1477 ESCRIPT_DLL_API
1478 Data
1479 getItem(const boost::python::object& key) const;
1480
1481 /**
1482 \brief
1483 Copies slice from value into this Data object.
1484
1485 Implements the [] set operator in python.
1486 Calls setSlice.
1487
1488 \param key - Input - python slice tuple specifying
1489 slice to copy from value.
1490 \param value - Input - Data object to copy from.
1491 */
1492 ESCRIPT_DLL_API
1493 void
1494 setItemD(const boost::python::object& key,
1495 const Data& value);
1496
1497 ESCRIPT_DLL_API
1498 void
1499 setItemO(const boost::python::object& key,
1500 const boost::python::object& value);
1501
1502 // These following public methods should be treated as private.
1503
1504 /**
1505 \brief
1506 Perform the given unary operation on every element of every data point in
1507 this Data object.
1508 */
1509 template <class UnaryFunction>
1510 ESCRIPT_DLL_API
1511 inline
1512 void
1513 unaryOp2(UnaryFunction operation);
1514
1515 /**
1516 \brief
1517 Return a Data object containing the specified slice of
1518 this Data object.
1519 \param region - Input - Region to copy.
1520 *
1521 */
1522 ESCRIPT_DLL_API
1523 Data
1524 getSlice(const DataTypes::RegionType& region) const;
1525
1526 /**
1527 \brief
1528 Copy the specified slice from the given value into this
1529 Data object.
1530 \param value - Input - Data to copy from.
1531 \param region - Input - Region to copy.
1532 *
1533 */
1534 ESCRIPT_DLL_API
1535 void
1536 setSlice(const Data& value,
1537 const DataTypes::RegionType& region);
1538
1539 /**
1540 \brief
1541 print the data values to stdout. Used for debugging
1542 */
1543 ESCRIPT_DLL_API
1544 void
1545 print(void);
1546
1547 /**
1548 \brief
1549 return the MPI rank number of the local data
1550 MPI_COMM_WORLD is assumed and the result of MPI_Comm_size()
1551 is returned
1552 */
1553 ESCRIPT_DLL_API
1554 int
1555 get_MPIRank(void) const;
1556
1557 /**
1558 \brief
1559 return the MPI rank number of the local data
1560 MPI_COMM_WORLD is assumed and the result of MPI_Comm_rank()
1561 is returned
1562 */
1563 ESCRIPT_DLL_API
1564 int
1565 get_MPISize(void) const;
1566
1567 /**
1568 \brief
1569 return the MPI rank number of the local data
1570 MPI_COMM_WORLD is assumed and returned.
1571 */
1572 ESCRIPT_DLL_API
1573 MPI_Comm
1574 get_MPIComm(void) const;
1575
1576 /**
1577 \brief
1578 return the object produced by the factory, which is a DataConstant or DataExpanded
1579 TODO Ownership of this object should be explained in doco.
1580 */
1581 ESCRIPT_DLL_API
1582 DataAbstract*
1583 borrowData(void) const;
1584
1585 ESCRIPT_DLL_API
1586 DataAbstract_ptr
1587 borrowDataPtr(void) const;
1588
1589 ESCRIPT_DLL_API
1590 DataReady_ptr
1591 borrowReadyPtr(void) const;
1592
1593
1594
1595 /**
1596 \brief
1597 Return a pointer to the beginning of the datapoint at the specified offset.
1598 TODO Eventually these should be inlined.
1599 \param i - position(offset) in the underlying datastructure
1600 */
1601
1602 ESCRIPT_DLL_API
1603 DataTypes::ValueType::const_reference
1604 getDataAtOffsetRO(DataTypes::ValueType::size_type i);
1605
1606
1607 ESCRIPT_DLL_API
1608 DataTypes::ValueType::reference
1609 getDataAtOffsetRW(DataTypes::ValueType::size_type i);
1610
1611 /**
1612 \brief Ensures that the Data is expanded and returns its underlying vector
1613 Does not check for exclusive write so do that before calling if sharing
1614 Is a posibility.
1615 \warning For domain implementors only. Using this function will
1616 avoid using optimisations like lazy evaluation. It is intended
1617 to allow quick initialisation of Data by domain; not as a bypass around
1618 escript's other mechanisms.
1619 */
1620 ESCRIPT_DLL_API
1621 DataTypes::ValueType&
1622 getExpandedVectorReference();
1623
1624
1625 /**
1626 * \brief For tagged Data returns the number of tags with values.
1627 * For non-tagged data will return 0 (even Data which has been expanded from tagged).
1628 */
1629 ESCRIPT_DLL_API
1630 size_t
1631 getNumberOfTaggedValues() const;
1632
1633 protected:
1634
1635 private:
1636
1637 template <class BinaryOp>
1638 double
1639 #ifdef ESYS_MPI
1640 lazyAlgWorker(double init, MPI_Op mpiop_type);
1641 #else
1642 lazyAlgWorker(double init);
1643 #endif
1644
1645 double
1646 LsupWorker() const;
1647
1648 double
1649 supWorker() const;
1650
1651 double
1652 infWorker() const;
1653
1654 boost::python::object
1655 integrateWorker() const;
1656
1657 void
1658 calc_minGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1659
1660 void
1661 calc_maxGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1662
1663 // For internal use in Data.cpp only!
1664 // other uses should call the main entry points and allow laziness
1665 Data
1666 minval_nonlazy() const;
1667
1668 // For internal use in Data.cpp only!
1669 Data
1670 maxval_nonlazy() const;
1671
1672
1673 /**
1674 \brief
1675 Check *this and the right operand are compatible. Throws
1676 an exception if they aren't.
1677 \param right - Input - The right hand side.
1678 */
1679 inline
1680 void
1681 operandCheck(const Data& right) const
1682 {
1683 return m_data->operandCheck(*(right.m_data.get()));
1684 }
1685
1686 /**
1687 \brief
1688 Perform the specified reduction algorithm on every element of every data point in
1689 this Data object according to the given function and return the single value result.
1690 */
1691 template <class BinaryFunction>
1692 inline
1693 double
1694 algorithm(BinaryFunction operation,
1695 double initial_value) const;
1696
1697 /**
1698 \brief
1699 Reduce each data-point in this Data object using the given operation. Return a Data
1700 object with the same number of data-points, but with each data-point containing only
1701 one value - the result of the reduction operation on the corresponding data-point in
1702 this Data object
1703 */
1704 template <class BinaryFunction>
1705 inline
1706 Data
1707 dp_algorithm(BinaryFunction operation,
1708 double initial_value) const;
1709
1710 /**
1711 \brief
1712 Perform the given binary operation on all of the data's elements.
1713 The underlying type of the right hand side (right) determines the final
1714 type of *this after the operation. For example if the right hand side
1715 is expanded *this will be expanded if necessary.
1716 RHS is a Data object.
1717 */
1718 template <class BinaryFunction>
1719 inline
1720 void
1721 binaryOp(const Data& right,
1722 BinaryFunction operation);
1723
1724 /**
1725 \brief
1726 Convert the data type of the RHS to match this.
1727 \param right - Input - data type to match.
1728 */
1729 void
1730 typeMatchLeft(Data& right) const;
1731
1732 /**
1733 \brief
1734 Convert the data type of this to match the RHS.
1735 \param right - Input - data type to match.
1736 */
1737 void
1738 typeMatchRight(const Data& right);
1739
1740 /**
1741 \brief
1742 Construct a Data object of the appropriate type.
1743 */
1744
1745 void
1746 initialise(const DataTypes::ValueType& value,
1747 const DataTypes::ShapeType& shape,
1748 const FunctionSpace& what,
1749 bool expanded);
1750
1751 void
1752 initialise(const WrappedArray& value,
1753 const FunctionSpace& what,
1754 bool expanded);
1755
1756 void
1757 initialise(const double value,
1758 const DataTypes::ShapeType& shape,
1759 const FunctionSpace& what,
1760 bool expanded);
1761
1762 //
1763 // flag to protect the data object against any update
1764 bool m_protected;
1765 mutable bool m_shared;
1766 bool m_lazy;
1767
1768 //
1769 // pointer to the actual data object
1770 // boost::shared_ptr<DataAbstract> m_data;
1771 DataAbstract_ptr m_data;
1772
1773 // If possible please use getReadyPtr instead.
1774 // But see warning below.
1775 const DataReady*
1776 getReady() const;
1777
1778 DataReady*
1779 getReady();
1780
1781
1782 // Be wary of using this for local operations since it (temporarily) increases reference count.
1783 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1784 // getReady() instead
1785 DataReady_ptr
1786 getReadyPtr();
1787
1788 const_DataReady_ptr
1789 getReadyPtr() const;
1790
1791
1792 /**
1793 \brief Update the Data's shared flag
1794 This indicates that the DataAbstract used by this object is now shared (or no longer shared).
1795 For internal use only.
1796 */
1797 void updateShareStatus(bool nowshared) const
1798 {
1799 m_shared=nowshared; // m_shared is mutable
1800 }
1801
1802 // In the isShared() method below:
1803 // A problem would occur if m_data (the address pointed to) were being modified
1804 // while the call m_data->is_shared is being executed.
1805 //
1806 // Q: So why do I think this code can be thread safe/correct?
1807 // A: We need to make some assumptions.
1808 // 1. We assume it is acceptable to return true under some conditions when we aren't shared.
1809 // 2. We assume that no constructions or assignments which will share previously unshared
1810 // will occur while this call is executing. This is consistent with the way Data:: and C are written.
1811 //
1812 // This means that the only transition we need to consider, is when a previously shared object is
1813 // not shared anymore. ie. the other objects have been destroyed or a deep copy has been made.
1814 // In those cases the m_shared flag changes to false after m_data has completed changing.
1815 // For any threads executing before the flag switches they will assume the object is still shared.
1816 bool isShared() const
1817 {
1818 return m_shared;
1819 /* if (m_shared) return true;
1820 if (m_data->isShared())
1821 {
1822 updateShareStatus(true);
1823 return true;
1824 }
1825 return false;*/
1826 }
1827
1828 void forceResolve()
1829 {
1830 if (isLazy())
1831 {
1832 #ifdef _OPENMP
1833 if (omp_in_parallel())
1834 { // Yes this is throwing an exception out of an omp thread which is forbidden.
1835 throw DataException("Please do not call forceResolve() in a parallel region.");
1836 }
1837 #endif
1838 resolve();
1839 }
1840 }
1841
1842 /**
1843 \brief if another object is sharing out member data make a copy to work with instead.
1844 This code should only be called from single threaded sections of code.
1845 */
1846 void exclusiveWrite()
1847 {
1848 #ifdef _OPENMP
1849 if (omp_in_parallel())
1850 {
1851 // *((int*)0)=17;
1852 throw DataException("Programming error. Please do not run exclusiveWrite() in multi-threaded sections.");
1853 }
1854 #endif
1855 forceResolve();
1856 if (isShared())
1857 {
1858 DataAbstract* t=m_data->deepCopy();
1859 set_m_data(DataAbstract_ptr(t));
1860 }
1861 }
1862
1863 /**
1864 \brief checks if caller can have exclusive write to the object
1865 */
1866 void checkExclusiveWrite()
1867 {
1868 if (isLazy() || isShared())
1869 {
1870 throw DataException("Programming error. ExclusiveWrite required - please call requireWrite()");
1871 }
1872 }
1873
1874 /**
1875 \brief Modify the data abstract hosted by this Data object
1876 For internal use only.
1877 Passing a pointer to null is permitted (do this in the destructor)
1878 \warning Only to be called in single threaded code or inside a single/critical section. This method needs to be atomic.
1879 */
1880 void set_m_data(DataAbstract_ptr p);
1881
1882 friend class DataAbstract; // To allow calls to updateShareStatus
1883 friend class TestDomain; // so its getX will work quickly
1884 #ifdef IKNOWWHATIMDOING
1885 friend ESCRIPT_DLL_API Data applyBinaryCFunction(boost::python::object cfunc, boost::python::tuple shape, escript::Data& d, escript::Data& e);
1886 #endif
1887 friend ESCRIPT_DLL_API Data condEval(escript::Data& mask, escript::Data& trueval, escript::Data& falseval);
1888 friend ESCRIPT_DLL_API Data randomData(const boost::python::tuple& shape, const FunctionSpace& what, long seed, const boost::python::tuple& filter);
1889
1890 };
1891
1892
1893 #ifdef IKNOWWHATIMDOING
1894 ESCRIPT_DLL_API
1895 Data
1896 applyBinaryCFunction(boost::python::object func, boost::python::tuple shape, escript::Data& d, escript::Data& e);
1897 #endif
1898
1899
1900 ESCRIPT_DLL_API
1901 Data
1902 condEval(escript::Data& mask, escript::Data& trueval, escript::Data& falseval);
1903
1904
1905
1906 /**
1907 \brief Create a new Expanded Data object filled with pseudo-random data.
1908 */
1909 ESCRIPT_DLL_API
1910 Data randomData(const boost::python::tuple& shape,
1911 const FunctionSpace& what,
1912 long seed, const boost::python::tuple& filter);
1913
1914
1915 } // end namespace escript
1916
1917
1918 // No, this is not supposed to be at the top of the file
1919 // DataAbstact needs to be declared first, then DataReady needs to be fully declared
1920 // so that I can dynamic cast between them below.
1921 #include "DataReady.h"
1922 #include "DataLazy.h"
1923
1924 namespace escript
1925 {
1926
1927 inline
1928 const DataReady*
1929 Data::getReady() const
1930 {
1931 const DataReady* dr=dynamic_cast<const DataReady*>(m_data.get());
1932 EsysAssert((dr!=0), "Error - casting to DataReady.");
1933 return dr;
1934 }
1935
1936 inline
1937 DataReady*
1938 Data::getReady()
1939 {
1940 DataReady* dr=dynamic_cast<DataReady*>(m_data.get());
1941 EsysAssert((dr!=0), "Error - casting to DataReady.");
1942 return dr;
1943 }
1944
1945 // Be wary of using this for local operations since it (temporarily) increases reference count.
1946 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1947 // getReady() instead
1948 inline
1949 DataReady_ptr
1950 Data::getReadyPtr()
1951 {
1952 DataReady_ptr dr=boost::dynamic_pointer_cast<DataReady>(m_data);
1953 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1954 return dr;
1955 }
1956
1957
1958 inline
1959 const_DataReady_ptr
1960 Data::getReadyPtr() const
1961 {
1962 const_DataReady_ptr dr=boost::dynamic_pointer_cast<const DataReady>(m_data);
1963 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1964 return dr;
1965 }
1966
1967 inline
1968 DataAbstract::ValueType::value_type*
1969 Data::getSampleDataRW(DataAbstract::ValueType::size_type sampleNo)
1970 {
1971 if (isLazy())
1972 {
1973 throw DataException("Error, attempt to acquire RW access to lazy data. Please call requireWrite() first.");
1974 }
1975 return getReady()->getSampleDataRW(sampleNo);
1976 }
1977
1978 inline
1979 const DataAbstract::ValueType::value_type*
1980 Data::getSampleDataRO(DataAbstract::ValueType::size_type sampleNo) const
1981 {
1982 DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
1983 if (l!=0)
1984 {
1985 size_t offset=0;
1986 const DataTypes::ValueType* res=l->resolveSample(sampleNo,offset);
1987 return &((*res)[offset]);
1988 }
1989 return getReady()->getSampleDataRO(sampleNo);
1990 }
1991
1992 inline
1993 const DataAbstract::ValueType::value_type*
1994 Data::getDataRO() const
1995 {
1996 if (isLazy())
1997 {
1998 throw DataException("Programmer error - getDataRO must not be called on Lazy Data.");
1999 }
2000 if (getNumSamples()==0)
2001 {
2002 return 0;
2003 }
2004 else
2005 {
2006 return &(getReady()->getVectorRO()[0]);
2007 }
2008 }
2009
2010
2011 /**
2012 Binary Data object operators.
2013 */
2014 inline double rpow(double x,double y)
2015 {
2016 return pow(y,x);
2017 }
2018
2019 /**
2020 \brief
2021 Operator+
2022 Takes two Data objects.
2023 */
2024 ESCRIPT_DLL_API Data operator+(const Data& left, const Data& right);
2025
2026 /**
2027 \brief
2028 Operator-
2029 Takes two Data objects.
2030 */
2031 ESCRIPT_DLL_API Data operator-(const Data& left, const Data& right);
2032
2033 /**
2034 \brief
2035 Operator*
2036 Takes two Data objects.
2037 */
2038 ESCRIPT_DLL_API Data operator*(const Data& left, const Data& right);
2039
2040 /**
2041 \brief
2042 Operator/
2043 Takes two Data objects.
2044 */
2045 ESCRIPT_DLL_API Data operator/(const Data& left, const Data& right);
2046
2047 /**
2048 \brief
2049 Operator+
2050 Takes LHS Data object and RHS python::object.
2051 python::object must be convertable to Data type.
2052 */
2053 ESCRIPT_DLL_API Data operator+(const Data& left, const boost::python::object& right);
2054
2055 /**
2056 \brief
2057 Operator-
2058 Takes LHS Data object and RHS python::object.
2059 python::object must be convertable to Data type.
2060 */
2061 ESCRIPT_DLL_API Data operator-(const Data& left, const boost::python::object& right);
2062
2063 /**
2064 \brief
2065 Operator*
2066 Takes LHS Data object and RHS python::object.
2067 python::object must be convertable to Data type.
2068 */
2069 ESCRIPT_DLL_API Data operator*(const Data& left, const boost::python::object& right);
2070
2071 /**
2072 \brief
2073 Operator/
2074 Takes LHS Data object and RHS python::object.
2075 python::object must be convertable to Data type.
2076 */
2077 ESCRIPT_DLL_API Data operator/(const Data& left, const boost::python::object& right);
2078
2079 /**
2080 \brief
2081 Operator+
2082 Takes LHS python::object and RHS Data object.
2083 python::object must be convertable to Data type.
2084 */
2085 ESCRIPT_DLL_API Data operator+(const boost::python::object& left, const Data& right);
2086
2087 /**
2088 \brief
2089 Operator-
2090 Takes LHS python::object and RHS Data object.
2091 python::object must be convertable to Data type.
2092 */
2093 ESCRIPT_DLL_API Data operator-(const boost::python::object& left, const Data& right);
2094
2095 /**
2096 \brief
2097 Operator*
2098 Takes LHS python::object and RHS Data object.
2099 python::object must be convertable to Data type.
2100 */
2101 ESCRIPT_DLL_API Data operator*(const boost::python::object& left, const Data& right);
2102
2103 /**
2104 \brief
2105 Operator/
2106 Takes LHS python::object and RHS Data object.
2107 python::object must be convertable to Data type.
2108 */
2109 ESCRIPT_DLL_API Data operator/(const boost::python::object& left, const Data& right);
2110
2111
2112
2113 /**
2114 \brief
2115 Output operator
2116 */
2117 ESCRIPT_DLL_API std::ostream& operator<<(std::ostream& o, const Data& data);
2118
2119 /**
2120 \brief
2121 Compute a tensor product of two Data objects
2122 \param arg_0 - Input - Data object
2123 \param arg_1 - Input - Data object
2124 \param axis_offset - Input - axis offset
2125 \param transpose - Input - 0: transpose neither, 1: transpose arg0, 2: transpose arg1
2126 */
2127 ESCRIPT_DLL_API
2128 Data
2129 C_GeneralTensorProduct(Data& arg_0,
2130 Data& arg_1,
2131 int axis_offset=0,
2132 int transpose=0);
2133
2134 /**
2135 \brief
2136 Operator/
2137 Takes RHS Data object.
2138 */
2139 inline
2140 Data
2141 Data::truedivD(const Data& right)
2142 {
2143 return *this / right;
2144 }
2145
2146 /**
2147 \brief
2148 Operator/
2149 Takes RHS python::object.
2150 */
2151 inline
2152 Data
2153 Data::truedivO(const boost::python::object& right)
2154 {
2155 Data tmp(right, getFunctionSpace(), false);
2156 return truedivD(tmp);
2157 }
2158
2159 /**
2160 \brief
2161 Operator/
2162 Takes LHS python::object.
2163 */
2164 inline
2165 Data
2166 Data::rtruedivO(const boost::python::object& left)
2167 {
2168 Data tmp(left, getFunctionSpace(), false);
2169 return tmp.truedivD(*this);
2170 }
2171
2172 /**
2173 \brief
2174 Perform the given binary operation with this and right as operands.
2175 Right is a Data object.
2176 */
2177 template <class BinaryFunction>
2178 inline
2179 void
2180 Data::binaryOp(const Data& right,
2181 BinaryFunction operation)
2182 {
2183 //
2184 // if this has a rank of zero promote it to the rank of the RHS
2185 if (getDataPointRank()==0 && right.getDataPointRank()!=0) {
2186 throw DataException("Error - attempt to update rank zero object with object with rank bigger than zero.");
2187 }
2188
2189 if (isLazy() || right.isLazy())
2190 {
2191 throw DataException("Programmer error - attempt to call binaryOp with Lazy Data.");
2192 }
2193 //
2194 // initially make the temporary a shallow copy
2195 Data tempRight(right);
2196 FunctionSpace fsl=getFunctionSpace();
2197 FunctionSpace fsr=right.getFunctionSpace();
2198 if (fsl!=fsr) {
2199 signed char intres=fsl.getDomain()->preferredInterpolationOnDomain(fsr.getTypeCode(), fsl.getTypeCode());
2200 if (intres==0)
2201 {
2202 std::string msg="Error - attempt to combine incompatible FunctionSpaces.";
2203 msg+=fsl.toString();
2204 msg+=" ";
2205 msg+=fsr.toString();
2206 throw DataException(msg.c_str());
2207 }
2208 else if (intres==1)
2209 {
2210 // an interpolation is required so create a new Data
2211 tempRight=Data(right,fsl);
2212 }
2213 else // reverse interpolation preferred
2214 {
2215 // interpolate onto the RHS function space
2216 Data tempLeft(*this,fsr);
2217 set_m_data(tempLeft.m_data);
2218 }
2219 }
2220 operandCheck(tempRight);
2221 //
2222 // ensure this has the right type for the RHS
2223 typeMatchRight(tempRight);
2224 //
2225 // Need to cast to the concrete types so that the correct binaryOp
2226 // is called.
2227 if (isExpanded()) {
2228 //
2229 // Expanded data will be done in parallel, the right hand side can be
2230 // of any data type
2231 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2232 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2233 escript::binaryOp(*leftC,*(tempRight.getReady()),operation);
2234 } else if (isTagged()) {
2235 //
2236 // Tagged data is operated on serially, the right hand side can be
2237 // either DataConstant or DataTagged
2238 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2239 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2240 if (right.isTagged()) {
2241 DataTagged* rightC=dynamic_cast<DataTagged*>(tempRight.m_data.get());
2242 EsysAssert((rightC!=0), "Programming error - casting to DataTagged.");
2243 escript::binaryOp(*leftC,*rightC,operation);
2244 } else {
2245 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2246 EsysAssert((rightC!=0), "Programming error - casting to DataConstant.");
2247 escript::binaryOp(*leftC,*rightC,operation);
2248 }
2249 } else if (isConstant()) {
2250 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2251 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2252 EsysAssert((leftC!=0 && rightC!=0), "Programming error - casting to DataConstant.");
2253 escript::binaryOp(*leftC,*rightC,operation);
2254 }
2255 }
2256
2257 /**
2258 \brief
2259 Perform the given Data object reduction algorithm on this and return the result.
2260 Given operation combines each element of each data point, thus argument
2261 object (*this) is a rank n Data object, and returned object is a scalar.
2262 Calls escript::algorithm.
2263 */
2264 template <class BinaryFunction>
2265 inline
2266 double
2267 Data::algorithm(BinaryFunction operation, double initial_value) const
2268 {
2269 if (isExpanded()) {
2270 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2271 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2272 return escript::algorithm(*leftC,operation,initial_value);
2273 } else if (isTagged()) {
2274 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2275 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2276 return escript::algorithm(*leftC,operation,initial_value);
2277 } else if (isConstant()) {
2278 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2279 EsysAssert((leftC!=0), "Programming error - casting to DataConstant.");
2280 return escript::algorithm(*leftC,operation,initial_value);
2281 } else if (isEmpty()) {
2282 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2283 } else if (isLazy()) {
2284 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2285 } else {
2286 throw DataException("Error - Data encapsulates an unknown type.");
2287 }
2288 }
2289
2290 /**
2291 \brief
2292 Perform the given data point reduction algorithm on data and return the result.
2293 Given operation combines each element within each data point into a scalar,
2294 thus argument object is a rank n Data object, and returned object is a
2295 rank 0 Data object.
2296 Calls escript::dp_algorithm.
2297 */
2298 template <class BinaryFunction>
2299 inline
2300 Data
2301 Data::dp_algorithm(BinaryFunction operation, double initial_value) const
2302 {
2303 if (isEmpty()) {
2304 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2305 }
2306 else if (isExpanded()) {
2307 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2308 DataExpanded* dataE=dynamic_cast<DataExpanded*>(m_data.get());
2309 DataExpanded* resultE=dynamic_cast<DataExpanded*>(result.m_data.get());
2310 EsysAssert((dataE!=0), "Programming error - casting data to DataExpanded.");
2311 EsysAssert((resultE!=0), "Programming error - casting result to DataExpanded.");
2312 escript::dp_algorithm(*dataE,*resultE,operation,initial_value);
2313 return result;
2314 }
2315 else if (isTagged()) {
2316 DataTagged* dataT=dynamic_cast<DataTagged*>(m_data.get());
2317 EsysAssert((dataT!=0), "Programming error - casting data to DataTagged.");
2318 DataTypes::ValueType defval(1);
2319 defval[0]=0;
2320 DataTagged* resultT=new DataTagged(getFunctionSpace(), DataTypes::scalarShape, defval, dataT);
2321 escript::dp_algorithm(*dataT,*resultT,operation,initial_value);
2322 return Data(resultT); // note: the Data object now owns the resultT pointer
2323 }
2324 else if (isConstant()) {
2325 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2326 DataConstant* dataC=dynamic_cast<DataConstant*>(m_data.get());
2327 DataConstant* resultC=dynamic_cast<DataConstant*>(result.m_data.get());
2328 EsysAssert((dataC!=0), "Programming error - casting data to DataConstant.");
2329 EsysAssert((resultC!=0), "Programming error - casting result to DataConstant.");
2330 escript::dp_algorithm(*dataC,*resultC,operation,initial_value);
2331 return result;
2332 } else if (isLazy()) {
2333 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2334 } else {
2335 throw DataException("Error - Data encapsulates an unknown type.");
2336 }
2337 }
2338
2339 /**
2340 \brief
2341 Compute a tensor operation with two Data objects
2342 \param arg_0 - Input - Data object
2343 \param arg_1 - Input - Data object
2344 \param operation - Input - Binary op functor
2345 */
2346 template <typename BinaryFunction>
2347 inline
2348 Data
2349 C_TensorBinaryOperation(Data const &arg_0,
2350 Data const &arg_1,
2351 BinaryFunction operation)
2352 {
2353 if (arg_0.isEmpty() || arg_1.isEmpty())
2354 {
2355 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2356 }
2357 if (arg_0.isLazy() || arg_1.isLazy())
2358 {
2359 throw DataException("Error - Operations not permitted on lazy data.");
2360 }
2361 // Interpolate if necessary and find an appropriate function space
2362 Data arg_0_Z, arg_1_Z;
2363 FunctionSpace fsl=arg_0.getFunctionSpace();
2364 FunctionSpace fsr=arg_1.getFunctionSpace();
2365 if (fsl!=fsr) {
2366 signed char intres=fsl.getDomain()->preferredInterpolationOnDomain(fsr.getTypeCode(), fsl.getTypeCode());
2367 if (intres==0)
2368 {
2369 std::string msg="Error - C_TensorBinaryOperation: arguments have incompatible function spaces.";
2370 msg+=fsl.toString();
2371 msg+=" ";
2372 msg+=fsr.toString();
2373 throw DataException(msg.c_str());
2374 }
2375 else if (intres==1)
2376 {
2377 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2378 arg_0_Z =Data(arg_0);
2379 }
2380 else // reverse interpolation preferred
2381 {
2382 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2383 arg_1_Z = Data(arg_1);
2384 }
2385 } else {
2386 arg_0_Z = Data(arg_0);
2387 arg_1_Z = Data(arg_1);
2388 }
2389 // Get rank and shape of inputs
2390 int rank0 = arg_0_Z.getDataPointRank();
2391 int rank1 = arg_1_Z.getDataPointRank();
2392 DataTypes::ShapeType shape0 = arg_0_Z.getDataPointShape();
2393 DataTypes::ShapeType shape1 = arg_1_Z.getDataPointShape();
2394 int size0 = arg_0_Z.getDataPointSize();
2395 int size1 = arg_1_Z.getDataPointSize();
2396 // Declare output Data object
2397 Data res;
2398
2399 if (shape0 == shape1) {
2400 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2401 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2402 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2403 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2404 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2405
2406 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2407 }
2408 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2409
2410 // Prepare the DataConstant input
2411 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2412
2413 // Borrow DataTagged input from Data object
2414 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2415
2416 // Prepare a DataTagged output 2
2417 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2418 res.tag();
2419 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2420
2421 // Prepare offset into DataConstant
2422 int offset_0 = tmp_0->getPointOffset(0,0);
2423 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2424
2425 // Get the pointers to the actual data
2426 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2427 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2428
2429 // Compute a result for the default
2430 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2431 // Compute a result for each tag
2432 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2433 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2434 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2435 tmp_2->addTag(i->first);
2436 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2437 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2438
2439 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2440 }
2441
2442 }
2443 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2444 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2445 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2446 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2447 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2448
2449 int sampleNo_1,dataPointNo_1;
2450 int numSamples_1 = arg_1_Z.getNumSamples();
2451 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2452 int offset_0 = tmp_0->getPointOffset(0,0);
2453 res.requireWrite();
2454 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2455 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2456 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2457 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2458 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2459 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2460 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2461 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2462 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2463 }
2464 }
2465
2466 }
2467 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2468 // Borrow DataTagged input from Data object
2469 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2470
2471 // Prepare the DataConstant input
2472 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2473
2474 // Prepare a DataTagged output 2
2475 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2476 res.tag();
2477 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2478
2479 // Prepare offset into DataConstant
2480 int offset_1 = tmp_1->getPointOffset(0,0);
2481
2482 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2483 // Get the pointers to the actual data
2484 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2485 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2486 // Compute a result for the default
2487 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2488 // Compute a result for each tag
2489 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2490 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2491 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2492 tmp_2->addTag(i->first);
2493 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2494 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2495 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2496 }
2497
2498 }
2499 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2500 // Borrow DataTagged input from Data object
2501 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2502
2503 // Borrow DataTagged input from Data object
2504 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2505
2506 // Prepare a DataTagged output 2
2507 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2508 res.tag(); // DataTagged output
2509 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2510
2511 // Get the pointers to the actual data
2512 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2513 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2514 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2515
2516 // Compute a result for the default
2517 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2518 // Merge the tags
2519 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2520 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2521 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2522 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2523 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2524 }
2525 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2526 tmp_2->addTag(i->first);
2527 }
2528 // Compute a result for each tag
2529 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2530 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2531
2532 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2533 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2534 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2535
2536 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2537 }
2538
2539 }
2540 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2541 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2542 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2543 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2544 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2545 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2546
2547 int sampleNo_0,dataPointNo_0;
2548 int numSamples_0 = arg_0_Z.getNumSamples();
2549 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2550 res.requireWrite();
2551 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2552 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2553 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2554 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2555 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2556 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2557 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2558 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2559 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2560 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2561 }
2562 }
2563
2564 }
2565 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2566 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2567 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2568 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2569 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2570
2571 int sampleNo_0,dataPointNo_0;
2572 int numSamples_0 = arg_0_Z.getNumSamples();
2573 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2574 int offset_1 = tmp_1->getPointOffset(0,0);
2575 res.requireWrite();
2576 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2577 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2578 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2579 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2580 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2581
2582 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2583 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2584 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2585
2586
2587 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2588 }
2589 }
2590
2591 }
2592 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2593 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2594 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2595 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2596 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2597 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2598
2599 int sampleNo_0,dataPointNo_0;
2600 int numSamples_0 = arg_0_Z.getNumSamples();
2601 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2602 res.requireWrite();
2603 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2604 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2605 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2606 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2607 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2608 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2609 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2610 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2611 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2612 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2613 }
2614 }
2615
2616 }
2617 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2618 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2619 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2620 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2621 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2622 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2623
2624 int sampleNo_0,dataPointNo_0;
2625 int numSamples_0 = arg_0_Z.getNumSamples();
2626 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2627 res.requireWrite();
2628 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2629 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2630 dataPointNo_0=0;
2631 // for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2632 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2633 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2634 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2635 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2636 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2637 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2638 tensor_binary_operation(size0*numDataPointsPerSample_0, ptr_0, ptr_1, ptr_2, operation);
2639 // }
2640 }
2641
2642 }
2643 else {
2644 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2645 }
2646
2647 } else if (0 == rank0) {
2648 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2649 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataConstant output
2650 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2651 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2652 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2653 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2654 }
2655 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2656
2657 // Prepare the DataConstant input
2658 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2659
2660 // Borrow DataTagged input from Data object
2661 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2662
2663 // Prepare a DataTagged output 2
2664 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataTagged output
2665 res.tag();
2666 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2667
2668 // Prepare offset into DataConstant
2669 int offset_0 = tmp_0->getPointOffset(0,0);
2670 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2671
2672 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2673 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2674
2675 // Compute a result for the default
2676 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2677 // Compute a result for each tag
2678 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2679 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2680 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2681 tmp_2->addTag(i->first);
2682 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2683 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2684 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2685 }
2686
2687 }
2688 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2689
2690 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2691 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2692 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2693 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2694
2695 int sampleNo_1;
2696 int numSamples_1 = arg_1_Z.getNumSamples();
2697 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2698 int offset_0 = tmp_0->getPointOffset(0,0);
2699 const double *ptr_src = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2700 double ptr_0 = ptr_src[0];
2701 int size = size1*numDataPointsPerSample_1;
2702 res.requireWrite();
2703 #pragma omp parallel for private(sampleNo_1) schedule(static)
2704 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2705 // for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2706 int offset_1 = tmp_1->getPointOffset(sampleNo_1,0);
2707 int offset_2 = tmp_2->getPointOffset(sampleNo_1,0);
2708 // const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2709 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2710 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2711 tensor_binary_operation(size, ptr_0, ptr_1, ptr_2, operation);
2712
2713 // }
2714 }
2715
2716 }
2717 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2718
2719 // Borrow DataTagged input from Data object
2720 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2721
2722 // Prepare the DataConstant input
2723 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2724
2725 // Prepare a DataTagged output 2
2726 res = Data(0.0, shape1, arg_0_Z.getFunctionSpace()); // DataTagged output
2727 res.tag();
2728 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2729
2730 // Prepare offset into DataConstant
2731 int offset_1 = tmp_1->getPointOffset(0,0);
2732 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2733
2734 // Get the pointers to the actual data
2735 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2736 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2737
2738
2739 // Compute a result for the default
2740 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2741 // Compute a result for each tag
2742 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2743 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2744 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2745 tmp_2->addTag(i->first);
2746 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2747 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2748
2749 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2750 }
2751
2752 }
2753 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2754
2755 // Borrow DataTagged input from Data object
2756 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2757
2758 // Borrow DataTagged input from Data object
2759 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2760
2761 // Prepare a DataTagged output 2
2762 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());
2763 res.tag(); // DataTagged output
2764 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2765
2766 // Get the pointers to the actual data
2767 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2768 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2769 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2770
2771 // Compute a result for the default
2772 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2773 // Merge the tags
2774 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2775 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2776 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2777 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2778 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2779 }
2780 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2781 tmp_2->addTag(i->first);
2782 }
2783 // Compute a result for each tag
2784 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2785 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2786 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2787 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2788 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2789
2790 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2791 }
2792
2793 }
2794 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2795
2796 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2797 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2798 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2799 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2800 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2801
2802 int sampleNo_0,dataPointNo_0;
2803 int numSamples_0 = arg_0_Z.getNumSamples();
2804 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2805 res.requireWrite();
2806 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2807 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2808 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2809 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2810 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2811 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2812 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2813 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2814 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2815 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2816 }
2817 }
2818
2819 }
2820 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2821 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2822 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2823 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2824 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2825
2826 int sampleNo_0,dataPointNo_0;
2827 int numSamples_0 = arg_0_Z.getNumSamples();
2828 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2829 int offset_1 = tmp_1->getPointOffset(0,0);
2830 res.requireWrite();
2831 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2832 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2833 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2834 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2835 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2836 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2837 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2838 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2839 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2840 }
2841 }
2842
2843
2844 }
2845 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2846
2847 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2848 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2849 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2850 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2851 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2852
2853 int sampleNo_0,dataPointNo_0;
2854 int numSamples_0 = arg_0_Z.getNumSamples();
2855 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2856 res.requireWrite();
2857 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2858 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2859 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2860 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2861 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2862 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2863 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2864 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2865 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2866 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2867 }
2868 }
2869
2870 }
2871 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2872
2873 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2874 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2875 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2876 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2877 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2878
2879 int sampleNo_0,dataPointNo_0;
2880 int numSamples_0 = arg_0_Z.getNumSamples();
2881 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2882 res.requireWrite();
2883 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2884 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2885 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2886 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2887 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2888 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2889 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2890 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2891 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2892 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2893 }
2894 }
2895
2896 }
2897 else {
2898 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2899 }
2900
2901 } else if (0 == rank1) {
2902 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2903 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2904 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2905 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2906 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2907 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2908 }
2909 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2910
2911 // Prepare the DataConstant input
2912 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2913
2914 // Borrow DataTagged input from Data object
2915 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2916
2917 // Prepare a DataTagged output 2
2918 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2919 res.tag();
2920 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2921
2922 // Prepare offset into DataConstant
2923 int offset_0 = tmp_0->getPointOffset(0,0);
2924 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2925
2926 //Get the pointers to the actual data
2927 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2928 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2929
2930 // Compute a result for the default
2931 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2932 // Compute a result for each tag
2933 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2934 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2935 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2936 tmp_2->addTag(i->first);
2937 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2938 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2939 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2940 }
2941 }
2942 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2943
2944 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2945 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2946 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2947 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2948
2949 int sampleNo_1,dataPointNo_1;
2950 int numSamples_1 = arg_1_Z.getNumSamples();
2951 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2952 int offset_0 = tmp_0->getPointOffset(0,0);
2953 res.requireWrite();
2954 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2955 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2956 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2957 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2958 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2959 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2960 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2961 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2962 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2963 }
2964 }
2965
2966 }
2967 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2968
2969 // Borrow DataTagged input from Data object
2970 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2971
2972 // Prepare the DataConstant input
2973 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2974
2975 // Prepare a DataTagged output 2
2976 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2977 res.tag();
2978 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2979
2980 // Prepare offset into DataConstant
2981 int offset_1 = tmp_1->getPointOffset(0,0);
2982 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2983 // Get the pointers to the actual data
2984 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2985 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2986 // Compute a result for the default
2987 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2988 // Compute a result for each tag
2989 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2990 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2991 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2992 tmp_2->addTag(i->first);
2993 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2994 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2995 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2996 }
2997
2998 }
2999 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
3000
3001 // Borrow DataTagged input from Data object
3002 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
3003
3004 // Borrow DataTagged input from Data object
3005 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
3006
3007 // Prepare a DataTagged output 2
3008 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
3009 res.tag(); // DataTagged output
3010 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
3011
3012 // Get the pointers to the actual data
3013 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
3014 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
3015 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
3016
3017 // Compute a result for the default
3018 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3019 // Merge the tags
3020 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
3021 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
3022 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
3023 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
3024 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
3025 }
3026 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
3027 tmp_2->addTag(i->first);
3028 }
3029 // Compute a result for each tag
3030 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
3031 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
3032 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
3033 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
3034 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
3035 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3036 }
3037
3038 }
3039 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
3040
3041 // After finding a common function space above the two inputs have the same numSamples and num DPPS
3042 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3043 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
3044 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
3045 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3046
3047 int sampleNo_0,dataPointNo_0;
3048 int numSamples_0 = arg_0_Z.getNumSamples();
3049 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3050 res.requireWrite();
3051 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3052 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3053 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
3054 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3055 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3056 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
3057 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3058 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3059 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3060 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3061 }
3062 }
3063
3064 }
3065 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
3066 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3067 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3068 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
3069 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3070
3071 int sampleNo_0;
3072 int numSamples_0 = arg_0_Z.getNumSamples();
3073 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3074 int offset_1 = tmp_1->getPointOffset(0,0);
3075 const double *ptr_src = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3076 double ptr_1 = ptr_src[0];
3077 int size = size0 * numDataPointsPerSample_0;
3078 res.requireWrite();
3079 #pragma omp parallel for private(sampleNo_0) schedule(static)
3080 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3081 // for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3082 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0);
3083 int offset_2 = tmp_2->getPointOffset(sampleNo_0,0);
3084 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3085 // const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3086 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3087 tensor_binary_operation(size, ptr_0, ptr_1, ptr_2, operation);
3088 // }
3089 }
3090
3091
3092 }
3093 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
3094
3095 // After finding a common function space above the two inputs have the same numSamples and num DPPS
3096 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3097 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3098 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
3099 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3100
3101 int sampleNo_0,dataPointNo_0;
3102 int numSamples_0 = arg_0_Z.getNumSamples();
3103 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3104 res.requireWrite();
3105 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3106 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3107 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
3108 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3109 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3110 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
3111 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3112 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3113 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3114 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3115 }
3116 }
3117
3118 }
3119 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
3120
3121 // After finding a common function space above the two inputs have the same numSamples and num DPPS
3122 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3123 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3124 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
3125 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3126
3127 int sampleNo_0,dataPointNo_0;
3128 int numSamples_0 = arg_0_Z.getNumSamples();
3129 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3130 res.requireWrite();
3131 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3132 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3133 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3134 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
3135 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
3136 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3137 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3138 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3139 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3140 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3141 }
3142 }
3143
3144 }
3145 else {
3146 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
3147 }
3148
3149 } else {
3150 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible shapes");
3151 }
3152
3153 return res;
3154 }
3155
3156 template <typename UnaryFunction>
3157 Data
3158 C_TensorUnaryOperation(Data const &arg_0,
3159 UnaryFunction operation)
3160 {
3161 if (arg_0.isEmpty()) // do this before we attempt to interpolate
3162 {
3163 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
3164 }
3165 if (arg_0.isLazy())
3166 {
3167 throw DataException("Error - Operations not permitted on lazy data.");
3168 }
3169 // Interpolate if necessary and find an appropriate function space
3170 Data arg_0_Z = Data(arg_0);
3171
3172 // Get rank and shape of inputs
3173 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
3174 int size0 = arg_0_Z.getDataPointSize();
3175
3176 // Declare output Data object
3177 Data res;
3178
3179 if (arg_0_Z.isConstant()) {
3180 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataConstant output
3181 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
3182 double *ptr_2 = &(res.getDataAtOffsetRW(0));
3183 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
3184 }
3185 else if (arg_0_Z.isTagged()) {
3186
3187 // Borrow DataTagged input from Data object
3188 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
3189
3190 // Prepare a DataTagged output 2
3191 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
3192 res.tag();
3193 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
3194
3195 // Get the pointers to the actual data
3196 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
3197 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
3198 // Compute a result for the default
3199 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
3200 // Compute a result for each tag
3201 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
3202 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
3203 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
3204 tmp_2->addTag(i->first);
3205 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
3206 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
3207 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
3208 }
3209
3210 }
3211 else if (arg_0_Z.isExpanded()) {
3212
3213 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace(),true); // DataExpanded output
3214 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3215 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3216
3217 int sampleNo_0,dataPointNo_0;
3218 int numSamples_0 = arg_0_Z.getNumSamples();
3219 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3220 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3221 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3222 dataPointNo_0=0;
3223 // for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3224 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
3225 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3226 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3227 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3228 tensor_unary_operation(size0*numDataPointsPerSample_0, ptr_0, ptr_2, operation);
3229 // }
3230 }
3231 }
3232 else {
3233 throw DataException("Error - C_TensorUnaryOperation: unknown combination of inputs");
3234 }
3235
3236 return res;
3237 }
3238
3239 }
3240 #endif

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26