/[escript]/branches/split/escriptcore/src/Data.h
ViewVC logotype

Contents of /branches/split/escriptcore/src/Data.h

Parent Directory Parent Directory | Revision Log Revision Log


Revision 4763 - (show annotations)
Tue Mar 18 05:20:23 2014 UTC (3 years, 9 months ago) by jfenwick
File MIME type: text/plain
File size: 98369 byte(s)
Replaced references to getSampleDataRO(0) with getDataRO()
This was to deal with the case where processes with no data where trying to compute the offset of sample 0 when all that was really needed was a null.

1
2 /*****************************************************************************
3 *
4 * Copyright (c) 2003-2014 by University of Queensland
5 * http://www.uq.edu.au
6 *
7 * Primary Business: Queensland, Australia
8 * Licensed under the Open Software License version 3.0
9 * http://www.opensource.org/licenses/osl-3.0.php
10 *
11 * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12 * Development 2012-2013 by School of Earth Sciences
13 * Development from 2014 by Centre for Geoscience Computing (GeoComp)
14 *
15 *****************************************************************************/
16
17
18 /** \file Data.h */
19
20 #ifndef DATA_H
21 #define DATA_H
22 #include "system_dep.h"
23
24 #include "DataTypes.h"
25 #include "DataAbstract.h"
26 #include "DataAlgorithm.h"
27 #include "FunctionSpace.h"
28 #include "BinaryOp.h"
29 #include "UnaryOp.h"
30 #include "DataException.h"
31
32
33
34 #include "DataC.h"
35
36 #ifdef _OPENMP
37 #include <omp.h>
38 #endif
39
40 #include "esysUtils/Esys_MPI.h"
41 #include <string>
42 #include <algorithm>
43 #include <sstream>
44
45 #include <boost/shared_ptr.hpp>
46 #include <boost/python/object.hpp>
47 #include <boost/python/tuple.hpp>
48
49 namespace escript {
50
51 //
52 // Forward declaration for various implementations of Data.
53 class DataConstant;
54 class DataTagged;
55 class DataExpanded;
56 class DataLazy;
57
58 /**
59 \brief
60 Data represents a collection of datapoints.
61
62 Description:
63 Internally, the datapoints are actually stored by a DataAbstract object.
64 The specific instance of DataAbstract used may vary over the lifetime
65 of the Data object.
66 Some methods on this class return references (eg getShape()).
67 These references should not be used after an operation which changes the underlying DataAbstract object.
68 Doing so will lead to invalid memory access.
69 This should not affect any methods exposed via boost::python.
70 */
71 class Data {
72
73 public:
74
75 // These typedefs allow function names to be cast to pointers
76 // to functions of the appropriate type when calling unaryOp etc.
77 typedef double (*UnaryDFunPtr)(double);
78 typedef double (*BinaryDFunPtr)(double,double);
79
80
81 /**
82 Constructors.
83 */
84
85 /**
86 \brief
87 Default constructor.
88 Creates a DataEmpty object.
89 */
90 ESCRIPT_DLL_API
91 Data();
92
93 /**
94 \brief
95 Copy constructor.
96 WARNING: Only performs a shallow copy.
97 */
98 ESCRIPT_DLL_API
99 Data(const Data& inData);
100
101 /**
102 \brief
103 Constructor from another Data object. If "what" is different from the
104 function space of inData the inData are tried to be interpolated to what,
105 otherwise a shallow copy of inData is returned.
106 */
107 ESCRIPT_DLL_API
108 Data(const Data& inData,
109 const FunctionSpace& what);
110
111 /**
112 \brief Copy Data from an existing vector
113 */
114
115 ESCRIPT_DLL_API
116 Data(const DataTypes::ValueType& value,
117 const DataTypes::ShapeType& shape,
118 const FunctionSpace& what=FunctionSpace(),
119 bool expanded=false);
120
121 /**
122 \brief
123 Constructor which creates a Data with points having the specified shape.
124
125 \param value - Input - Single value applied to all Data.
126 \param dataPointShape - Input - The shape of each data point.
127 \param what - Input - A description of what this data represents.
128 \param expanded - Input - Flag, if true fill the entire container with
129 the given value. Otherwise a more efficient storage
130 mechanism will be used.
131 */
132 ESCRIPT_DLL_API
133 Data(double value,
134 const DataTypes::ShapeType& dataPointShape=DataTypes::ShapeType(),
135 const FunctionSpace& what=FunctionSpace(),
136 bool expanded=false);
137
138 /**
139 \brief
140 Constructor which performs a deep copy of a region from another Data object.
141
142 \param inData - Input - Input Data object.
143 \param region - Input - Region to copy.
144 */
145 ESCRIPT_DLL_API
146 Data(const Data& inData,
147 const DataTypes::RegionType& region);
148
149 /**
150 \brief
151 Constructor which copies data from any object that can be treated like a python array/sequence.
152
153 \param value - Input - Input data.
154 \param what - Input - A description of what this data represents.
155 \param expanded - Input - Flag, if true fill the entire container with
156 the value. Otherwise a more efficient storage
157 mechanism will be used.
158 */
159 ESCRIPT_DLL_API
160 Data(const boost::python::object& value,
161 const FunctionSpace& what=FunctionSpace(),
162 bool expanded=false);
163
164 /**
165 \brief
166 Constructor which copies data from a wrapped array.
167
168 \param w - Input - Input data.
169 \param what - Input - A description of what this data represents.
170 \param expanded - Input - Flag, if true fill the entire container with
171 the value. Otherwise a more efficient storage
172 mechanism will be used.
173 */
174 ESCRIPT_DLL_API
175 Data(const WrappedArray& w, const FunctionSpace& what,
176 bool expanded=false);
177
178
179 /**
180 \brief
181 Constructor which creates a DataConstant.
182 Copies data from any object that can be treated like a python array/sequence.
183 All other parameters are copied from other.
184
185 \param value - Input - Input data.
186 \param other - Input - contains all other parameters.
187 */
188 ESCRIPT_DLL_API
189 Data(const boost::python::object& value,
190 const Data& other);
191
192 /**
193 \brief
194 Constructor which creates a DataConstant of "shape" with constant value.
195 */
196 ESCRIPT_DLL_API
197 Data(double value,
198 const boost::python::tuple& shape=boost::python::make_tuple(),
199 const FunctionSpace& what=FunctionSpace(),
200 bool expanded=false);
201
202 /**
203 \brief Create a Data using an existing DataAbstract. Warning: The new object assumes ownership of the pointer!
204 Once you have passed the pointer, do not delete it.
205 */
206 ESCRIPT_DLL_API
207 explicit Data(DataAbstract* underlyingdata);
208
209 /**
210 \brief Create a Data based on the supplied DataAbstract
211 */
212 ESCRIPT_DLL_API
213 explicit Data(DataAbstract_ptr underlyingdata);
214
215 /**
216 \brief
217 Destructor
218 */
219 ESCRIPT_DLL_API
220 ~Data();
221
222 /**
223 \brief Make this object a deep copy of "other".
224 */
225 ESCRIPT_DLL_API
226 void
227 copy(const Data& other);
228
229 /**
230 \brief Return a pointer to a deep copy of this object.
231 */
232 ESCRIPT_DLL_API
233 Data
234 copySelf();
235
236
237 /**
238 \brief produce a delayed evaluation version of this Data.
239 */
240 ESCRIPT_DLL_API
241 Data
242 delay();
243
244 /**
245 \brief convert the current data into lazy data.
246 */
247 ESCRIPT_DLL_API
248 void
249 delaySelf();
250
251
252 /**
253 Member access methods.
254 */
255
256 /**
257 \brief
258 switches on update protection
259
260 */
261 ESCRIPT_DLL_API
262 void
263 setProtection();
264
265 /**
266 \brief
267 Returns true, if the data object is protected against update
268
269 */
270 ESCRIPT_DLL_API
271 bool
272 isProtected() const;
273
274
275 /**
276 \brief
277 Return the value of a data point as a python tuple.
278 */
279 ESCRIPT_DLL_API
280 const boost::python::object
281 getValueOfDataPointAsTuple(int dataPointNo);
282
283 /**
284 \brief
285 sets the values of a data-point from a python object on this process
286 */
287 ESCRIPT_DLL_API
288 void
289 setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object);
290
291 /**
292 \brief
293 sets the values of a data-point from a array-like object on this process
294 */
295 ESCRIPT_DLL_API
296 void
297 setValueOfDataPointToArray(int dataPointNo, const boost::python::object&);
298
299 /**
300 \brief
301 sets the values of a data-point on this process
302 */
303 ESCRIPT_DLL_API
304 void
305 setValueOfDataPoint(int dataPointNo, const double);
306
307 /**
308 \brief Return a data point across all processors as a python tuple.
309 */
310 ESCRIPT_DLL_API
311 const boost::python::object
312 getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo);
313
314
315 /**
316 \brief Set the value of a global data point
317 */
318 ESCRIPT_DLL_API
319 void
320 setTupleForGlobalDataPoint(int id, int proc, boost::python::object);
321
322 /**
323 \brief
324 Return the tag number associated with the given data-point.
325
326 */
327 ESCRIPT_DLL_API
328 int
329 getTagNumber(int dpno);
330
331 /**
332 \brief
333 Return the C wrapper for the Data object.
334 */
335 ESCRIPT_DLL_API
336 escriptDataC
337 getDataC();
338
339
340
341 /**
342 \brief
343 Return the C wrapper for the Data object - const version.
344 */
345 ESCRIPT_DLL_API
346 escriptDataC
347 getDataC() const;
348
349
350 /**
351 \brief
352 Write the data as a string. For large amounts of data, a summary is printed.
353 */
354 ESCRIPT_DLL_API
355 std::string
356 toString() const;
357
358 /**
359 \brief
360 Whatever the current Data type make this into a DataExpanded.
361 */
362 ESCRIPT_DLL_API
363 void
364 expand();
365
366 /**
367 \brief
368 If possible convert this Data to DataTagged. This will only allow
369 Constant data to be converted to tagged. An attempt to convert
370 Expanded data to tagged will throw an exception.
371 */
372 ESCRIPT_DLL_API
373 void
374 tag();
375
376 /**
377 \brief If this data is lazy, then convert it to ready data.
378 What type of ready data depends on the expression. For example, Constant+Tagged==Tagged.
379 */
380 ESCRIPT_DLL_API
381 void
382 resolve();
383
384
385 /**
386 \brief Ensures data is ready for write access.
387 This means that the data will be resolved if lazy and will be copied if shared with another Data object.
388 \warning This method should only be called in single threaded sections of code. (It modifies m_data).
389 Do not create any Data objects from this one between calling requireWrite and getSampleDataRW.
390 Doing so might introduce additional sharing.
391 */
392 ESCRIPT_DLL_API
393 void
394 requireWrite();
395
396 /**
397 \brief
398 Return true if this Data is expanded.
399 \note To determine if a sample will contain separate values for each datapoint. Use actsExpanded instead.
400 */
401 ESCRIPT_DLL_API
402 bool
403 isExpanded() const;
404
405 /**
406 \brief
407 Return true if this Data is expanded or resolves to expanded.
408 That is, if it has a separate value for each datapoint in the sample.
409 */
410 ESCRIPT_DLL_API
411 bool
412 actsExpanded() const;
413
414
415 /**
416 \brief
417 Return true if this Data is tagged.
418 */
419 ESCRIPT_DLL_API
420 bool
421 isTagged() const;
422
423 /**
424 \brief
425 Return true if this Data is constant.
426 */
427 ESCRIPT_DLL_API
428 bool
429 isConstant() const;
430
431 /**
432 \brief Return true if this Data is lazy.
433 */
434 ESCRIPT_DLL_API
435 bool
436 isLazy() const;
437
438 /**
439 \brief Return true if this data is ready.
440 */
441 ESCRIPT_DLL_API
442 bool
443 isReady() const;
444
445 /**
446 \brief
447 Return true if this Data holds an instance of DataEmpty. This is _not_ the same as asking if the object
448 contains datapoints.
449 */
450 ESCRIPT_DLL_API
451 bool
452 isEmpty() const;
453
454 /**
455 \brief
456 Return the function space.
457 */
458 ESCRIPT_DLL_API
459 inline
460 const FunctionSpace&
461 getFunctionSpace() const
462 {
463 return m_data->getFunctionSpace();
464 }
465
466 /**
467 \brief
468 Return the domain.
469 */
470 ESCRIPT_DLL_API
471 inline
472 // const AbstractDomain&
473 const_Domain_ptr
474 getDomain() const
475 {
476 return getFunctionSpace().getDomain();
477 }
478
479
480 /**
481 \brief
482 Return the domain.
483 TODO: For internal use only. This should be removed.
484 */
485 ESCRIPT_DLL_API
486 inline
487 // const AbstractDomain&
488 Domain_ptr
489 getDomainPython() const
490 {
491 return getFunctionSpace().getDomainPython();
492 }
493
494 /**
495 \brief
496 Return the rank of the point data.
497 */
498 ESCRIPT_DLL_API
499 inline
500 unsigned int
501 getDataPointRank() const
502 {
503 return m_data->getRank();
504 }
505
506 /**
507 \brief
508 Return the number of data points
509 */
510 ESCRIPT_DLL_API
511 inline
512 int
513 getNumDataPoints() const
514 {
515 return getNumSamples() * getNumDataPointsPerSample();
516 }
517 /**
518 \brief
519 Return the number of samples.
520 */
521 ESCRIPT_DLL_API
522 inline
523 int
524 getNumSamples() const
525 {
526 return m_data->getNumSamples();
527 }
528
529 /**
530 \brief
531 Return the number of data points per sample.
532 */
533 ESCRIPT_DLL_API
534 inline
535 int
536 getNumDataPointsPerSample() const
537 {
538 return m_data->getNumDPPSample();
539 }
540
541 /**
542 \brief
543 Returns true if the number of data points per sample and the number of
544 samples match the respective argument. DataEmpty always returns true.
545 */
546 ESCRIPT_DLL_API
547 inline
548 bool numSamplesEqual(int numDataPointsPerSample, int numSamples) const
549 {
550 return (isEmpty() ||
551 (numDataPointsPerSample==getNumDataPointsPerSample() && numSamples==getNumSamples()));
552 }
553
554 /**
555 \brief
556 Returns true if the shape matches the vector (dimensions[0],...,
557 dimensions[rank-1]). DataEmpty always returns true.
558 */
559 inline
560 bool isDataPointShapeEqual(int rank, const int* dimensions) const
561 {
562 if (isEmpty())
563 return true;
564 const DataTypes::ShapeType givenShape(&dimensions[0],&dimensions[rank]);
565 return (getDataPointShape()==givenShape);
566 }
567
568 /**
569 \brief
570 Return the number of values in the shape for this object.
571 */
572 ESCRIPT_DLL_API
573 int
574 getNoValues() const
575 {
576 return m_data->getNoValues();
577 }
578
579
580 /**
581 \brief
582 dumps the object into a netCDF file
583 */
584 ESCRIPT_DLL_API
585 void
586 dump(const std::string fileName) const;
587
588 /**
589 \brief returns the values of the object as a list of tuples (one for each datapoint).
590
591 \param scalarastuple If true, scalar data will produce single valued tuples [(1,) (2,) ...]
592 If false, the result is a list of scalars [1, 2, ...]
593 */
594 ESCRIPT_DLL_API
595 const boost::python::object
596 toListOfTuples(bool scalarastuple=true);
597
598
599 /**
600 \brief
601 Return the sample data for the given sample no.
602 Please do not use this unless you NEED to access samples individually
603 \param sampleNo - Input - the given sample no.
604 \return pointer to the sample data.
605 */
606 ESCRIPT_DLL_API
607 inline
608 const DataAbstract::ValueType::value_type*
609 getSampleDataRO(DataAbstract::ValueType::size_type sampleNo) const;
610
611
612 /**
613 \brief
614 Return the sample data for the given sample no.
615 Please do not use this unless you NEED to access samples individually
616 \param sampleNo - Input - the given sample no.
617 \return pointer to the sample data.
618 */
619 ESCRIPT_DLL_API
620 inline
621 DataAbstract::ValueType::value_type*
622 getSampleDataRW(DataAbstract::ValueType::size_type sampleNo);
623
624
625 /**
626 \brief
627 Return a pointer to the beginning of the underlying data
628 \warning please avoid using this method since it by-passes possible lazy improvements. May be removed without notice.
629 \return pointer to the data.
630 */
631 ESCRIPT_DLL_API
632 inline
633 const DataAbstract::ValueType::value_type*
634 getDataRO() const;
635
636 /**
637 \brief
638 Return the sample data for the given tag. If an attempt is made to
639 access data that isn't tagged an exception will be thrown.
640 \param tag - Input - the tag key.
641 */
642 ESCRIPT_DLL_API
643 inline
644 DataAbstract::ValueType::value_type*
645 getSampleDataByTag(int tag)
646 {
647 return m_data->getSampleDataByTag(tag);
648 }
649
650 /**
651 \brief
652 Return a reference into the DataVector which points to the specified data point.
653 \param sampleNo - Input -
654 \param dataPointNo - Input -
655 */
656 ESCRIPT_DLL_API
657 DataTypes::ValueType::const_reference
658 getDataPointRO(int sampleNo, int dataPointNo);
659
660 /**
661 \brief
662 Return a reference into the DataVector which points to the specified data point.
663 \param sampleNo - Input -
664 \param dataPointNo - Input -
665 */
666 ESCRIPT_DLL_API
667 DataTypes::ValueType::reference
668 getDataPointRW(int sampleNo, int dataPointNo);
669
670
671
672 /**
673 \brief
674 Return the offset for the given sample and point within the sample
675 */
676 ESCRIPT_DLL_API
677 inline
678 DataTypes::ValueType::size_type
679 getDataOffset(int sampleNo,
680 int dataPointNo)
681 {
682 return m_data->getPointOffset(sampleNo,dataPointNo);
683 }
684
685 /**
686 \brief
687 Return a reference to the data point shape.
688 */
689 ESCRIPT_DLL_API
690 inline
691 const DataTypes::ShapeType&
692 getDataPointShape() const
693 {
694 return m_data->getShape();
695 }
696
697 /**
698 \brief
699 Return the data point shape as a tuple of integers.
700 */
701 ESCRIPT_DLL_API
702 const boost::python::tuple
703 getShapeTuple() const;
704
705 /**
706 \brief
707 Return the size of the data point. It is the product of the
708 data point shape dimensions.
709 */
710 ESCRIPT_DLL_API
711 int
712 getDataPointSize() const;
713
714 /**
715 \brief
716 Return the number of doubles stored for this Data.
717 */
718 ESCRIPT_DLL_API
719 DataTypes::ValueType::size_type
720 getLength() const;
721
722 /**
723 \brief Return true if this object contains no samples.
724 This is not the same as isEmpty()
725 */
726 ESCRIPT_DLL_API
727 bool
728 hasNoSamples() const
729 {
730 return getLength()==0;
731 }
732
733 /**
734 \brief
735 Assign the given value to the tag assocciated with name. Implicitly converts this
736 object to type DataTagged. Throws an exception if this object
737 cannot be converted to a DataTagged object or name cannot be mapped onto a tag key.
738 \param name - Input - name of tag.
739 \param value - Input - Value to associate with given key.
740 */
741 ESCRIPT_DLL_API
742 void
743 setTaggedValueByName(std::string name,
744 const boost::python::object& value);
745
746 /**
747 \brief
748 Assign the given value to the tag. Implicitly converts this
749 object to type DataTagged if it is constant.
750
751 \param tagKey - Input - Integer key.
752 \param value - Input - Value to associate with given key.
753 ==>*
754 */
755 ESCRIPT_DLL_API
756 void
757 setTaggedValue(int tagKey,
758 const boost::python::object& value);
759
760 /**
761 \brief
762 Assign the given value to the tag. Implicitly converts this
763 object to type DataTagged if it is constant.
764
765 \param tagKey - Input - Integer key.
766 \param pointshape - Input - The shape of the value parameter
767 \param value - Input - Value to associate with given key.
768 \param dataOffset - Input - Offset of the begining of the point within the value parameter
769 */
770 ESCRIPT_DLL_API
771 void
772 setTaggedValueFromCPP(int tagKey,
773 const DataTypes::ShapeType& pointshape,
774 const DataTypes::ValueType& value,
775 int dataOffset=0);
776
777
778
779 /**
780 \brief
781 Copy other Data object into this Data object where mask is positive.
782 */
783 ESCRIPT_DLL_API
784 void
785 copyWithMask(const Data& other,
786 const Data& mask);
787
788 /**
789 Data object operation methods and operators.
790 */
791
792 /**
793 \brief
794 set all values to zero
795 *
796 */
797 ESCRIPT_DLL_API
798 void
799 setToZero();
800
801 /**
802 \brief
803 Interpolates this onto the given functionspace and returns
804 the result as a Data object.
805 *
806 */
807 ESCRIPT_DLL_API
808 Data
809 interpolate(const FunctionSpace& functionspace) const;
810
811 ESCRIPT_DLL_API
812 Data
813 interpolateFromTable3D(const WrappedArray& table, double Amin, double Astep,
814 double undef, Data& B, double Bmin, double Bstep, Data& C,
815 double Cmin, double Cstep, bool check_boundaries);
816
817 ESCRIPT_DLL_API
818 Data
819 interpolateFromTable2D(const WrappedArray& table, double Amin, double Astep,
820 double undef, Data& B, double Bmin, double Bstep,bool check_boundaries);
821
822 ESCRIPT_DLL_API
823 Data
824 interpolateFromTable1D(const WrappedArray& table, double Amin, double Astep,
825 double undef,bool check_boundaries);
826
827
828 ESCRIPT_DLL_API
829 Data
830 interpolateFromTable3DP(boost::python::object table, double Amin, double Astep,
831 Data& B, double Bmin, double Bstep, Data& C, double Cmin, double Cstep, double undef,bool check_boundaries);
832
833
834 ESCRIPT_DLL_API
835 Data
836 interpolateFromTable2DP(boost::python::object table, double Amin, double Astep,
837 Data& B, double Bmin, double Bstep, double undef,bool check_boundaries);
838
839 ESCRIPT_DLL_API
840 Data
841 interpolateFromTable1DP(boost::python::object table, double Amin, double Astep,
842 double undef,bool check_boundaries);
843
844 ESCRIPT_DLL_API
845 Data
846 nonuniforminterp(boost::python::object in, boost::python::object out, bool check_boundaries);
847
848 ESCRIPT_DLL_API
849 Data
850 nonuniformslope(boost::python::object in, boost::python::object out, bool check_boundaries);
851
852 /**
853 \brief
854 Calculates the gradient of the data at the data points of functionspace.
855 If functionspace is not present the function space of Function(getDomain()) is used.
856 *
857 */
858 ESCRIPT_DLL_API
859 Data
860 gradOn(const FunctionSpace& functionspace) const;
861
862 ESCRIPT_DLL_API
863 Data
864 grad() const;
865
866 /**
867 \brief
868 Calculate the integral over the function space domain as a python tuple.
869 */
870 ESCRIPT_DLL_API
871 boost::python::object
872 integrateToTuple_const() const;
873
874
875 /**
876 \brief
877 Calculate the integral over the function space domain as a python tuple.
878 */
879 ESCRIPT_DLL_API
880 boost::python::object
881 integrateToTuple();
882
883
884
885 /**
886 \brief
887 Returns 1./ Data object
888 *
889 */
890 ESCRIPT_DLL_API
891 Data
892 oneOver() const;
893 /**
894 \brief
895 Return a Data with a 1 for +ive values and a 0 for 0 or -ive values.
896 *
897 */
898 ESCRIPT_DLL_API
899 Data
900 wherePositive() const;
901
902 /**
903 \brief
904 Return a Data with a 1 for -ive values and a 0 for +ive or 0 values.
905 *
906 */
907 ESCRIPT_DLL_API
908 Data
909 whereNegative() const;
910
911 /**
912 \brief
913 Return a Data with a 1 for +ive or 0 values and a 0 for -ive values.
914 *
915 */
916 ESCRIPT_DLL_API
917 Data
918 whereNonNegative() const;
919
920 /**
921 \brief
922 Return a Data with a 1 for -ive or 0 values and a 0 for +ive values.
923 *
924 */
925 ESCRIPT_DLL_API
926 Data
927 whereNonPositive() const;
928
929 /**
930 \brief
931 Return a Data with a 1 for 0 values and a 0 for +ive or -ive values.
932 *
933 */
934 ESCRIPT_DLL_API
935 Data
936 whereZero(double tol=0.0) const;
937
938 /**
939 \brief
940 Return a Data with a 0 for 0 values and a 1 for +ive or -ive values.
941 *
942 */
943 ESCRIPT_DLL_API
944 Data
945 whereNonZero(double tol=0.0) const;
946
947 /**
948 \brief
949 Return the maximum absolute value of this Data object.
950
951 The method is not const because lazy data needs to be expanded before Lsup can be computed.
952 The _const form can be used when the Data object is const, however this will only work for
953 Data which is not Lazy.
954
955 For Data which contain no samples (or tagged Data for which no tags in use have a value)
956 zero is returned.
957 */
958 ESCRIPT_DLL_API
959 double
960 Lsup();
961
962 ESCRIPT_DLL_API
963 double
964 Lsup_const() const;
965
966
967 /**
968 \brief
969 Return the maximum value of this Data object.
970
971 The method is not const because lazy data needs to be expanded before sup can be computed.
972 The _const form can be used when the Data object is const, however this will only work for
973 Data which is not Lazy.
974
975 For Data which contain no samples (or tagged Data for which no tags in use have a value)
976 a large negative value is returned.
977 */
978 ESCRIPT_DLL_API
979 double
980 sup();
981
982 ESCRIPT_DLL_API
983 double
984 sup_const() const;
985
986
987 /**
988 \brief
989 Return the minimum value of this Data object.
990
991 The method is not const because lazy data needs to be expanded before inf can be computed.
992 The _const form can be used when the Data object is const, however this will only work for
993 Data which is not Lazy.
994
995 For Data which contain no samples (or tagged Data for which no tags in use have a value)
996 a large positive value is returned.
997 */
998 ESCRIPT_DLL_API
999 double
1000 inf();
1001
1002 ESCRIPT_DLL_API
1003 double
1004 inf_const() const;
1005
1006
1007
1008 /**
1009 \brief
1010 Return the absolute value of each data point of this Data object.
1011 *
1012 */
1013 ESCRIPT_DLL_API
1014 Data
1015 abs() const;
1016
1017 /**
1018 \brief
1019 Return the maximum value of each data point of this Data object.
1020 *
1021 */
1022 ESCRIPT_DLL_API
1023 Data
1024 maxval() const;
1025
1026 /**
1027 \brief
1028 Return the minimum value of each data point of this Data object.
1029 *
1030 */
1031 ESCRIPT_DLL_API
1032 Data
1033 minval() const;
1034
1035 /**
1036 \brief
1037 Return the (sample number, data-point number) of the data point with
1038 the minimum component value in this Data object.
1039 \note If you are working in python, please consider using Locator
1040 instead of manually manipulating process and point IDs.
1041 */
1042 ESCRIPT_DLL_API
1043 const boost::python::tuple
1044 minGlobalDataPoint() const;
1045
1046 /**
1047 \brief
1048 Return the (sample number, data-point number) of the data point with
1049 the minimum component value in this Data object.
1050 \note If you are working in python, please consider using Locator
1051 instead of manually manipulating process and point IDs.
1052 */
1053 ESCRIPT_DLL_API
1054 const boost::python::tuple
1055 maxGlobalDataPoint() const;
1056
1057
1058
1059 /**
1060 \brief
1061 Return the sign of each data point of this Data object.
1062 -1 for negative values, zero for zero values, 1 for positive values.
1063 *
1064 */
1065 ESCRIPT_DLL_API
1066 Data
1067 sign() const;
1068
1069 /**
1070 \brief
1071 Return the symmetric part of a matrix which is half the matrix plus its transpose.
1072 *
1073 */
1074 ESCRIPT_DLL_API
1075 Data
1076 symmetric() const;
1077
1078 /**
1079 \brief
1080 Return the nonsymmetric part of a matrix which is half the matrix minus its transpose.
1081 *
1082 */
1083 ESCRIPT_DLL_API
1084 Data
1085 nonsymmetric() const;
1086
1087 /**
1088 \brief
1089 Return the trace of a matrix
1090 *
1091 */
1092 ESCRIPT_DLL_API
1093 Data
1094 trace(int axis_offset) const;
1095
1096 /**
1097 \brief
1098 Transpose each data point of this Data object around the given axis.
1099 *
1100 */
1101 ESCRIPT_DLL_API
1102 Data
1103 transpose(int axis_offset) const;
1104
1105 /**
1106 \brief
1107 Return the eigenvalues of the symmetric part at each data point of this Data object in increasing values.
1108 Currently this function is restricted to rank 2, square shape, and dimension 3.
1109 *
1110 */
1111 ESCRIPT_DLL_API
1112 Data
1113 eigenvalues() const;
1114
1115 /**
1116 \brief
1117 Return the eigenvalues and corresponding eigenvcetors of the symmetric part at each data point of this Data object.
1118 the eigenvalues are ordered in increasing size where eigenvalues with relative difference less than
1119 tol are treated as equal. The eigenvectors are orthogonal, normalized and the sclaed such that the
1120 first non-zero entry is positive.
1121 Currently this function is restricted to rank 2, square shape, and dimension 3
1122 *
1123 */
1124 ESCRIPT_DLL_API
1125 const boost::python::tuple
1126 eigenvalues_and_eigenvectors(const double tol=1.e-12) const;
1127
1128 /**
1129 \brief
1130 swaps the components axis0 and axis1
1131 *
1132 */
1133 ESCRIPT_DLL_API
1134 Data
1135 swapaxes(const int axis0, const int axis1) const;
1136
1137 /**
1138 \brief
1139 Return the error function erf of each data point of this Data object.
1140 *
1141 */
1142 ESCRIPT_DLL_API
1143 Data
1144 erf() const;
1145
1146 /**
1147 \brief
1148 Return the sin of each data point of this Data object.
1149 *
1150 */
1151 ESCRIPT_DLL_API
1152 Data
1153 sin() const;
1154
1155 /**
1156 \brief
1157 Return the cos of each data point of this Data object.
1158 *
1159 */
1160 ESCRIPT_DLL_API
1161 Data
1162 cos() const;
1163
1164 /**
1165 \brief
1166 Return the tan of each data point of this Data object.
1167 *
1168 */
1169 ESCRIPT_DLL_API
1170 Data
1171 tan() const;
1172
1173 /**
1174 \brief
1175 Return the asin of each data point of this Data object.
1176 *
1177 */
1178 ESCRIPT_DLL_API
1179 Data
1180 asin() const;
1181
1182 /**
1183 \brief
1184 Return the acos of each data point of this Data object.
1185 *
1186 */
1187 ESCRIPT_DLL_API
1188 Data
1189 acos() const;
1190
1191 /**
1192 \brief
1193 Return the atan of each data point of this Data object.
1194 *
1195 */
1196 ESCRIPT_DLL_API
1197 Data
1198 atan() const;
1199
1200 /**
1201 \brief
1202 Return the sinh of each data point of this Data object.
1203 *
1204 */
1205 ESCRIPT_DLL_API
1206 Data
1207 sinh() const;
1208
1209 /**
1210 \brief
1211 Return the cosh of each data point of this Data object.
1212 *
1213 */
1214 ESCRIPT_DLL_API
1215 Data
1216 cosh() const;
1217
1218 /**
1219 \brief
1220 Return the tanh of each data point of this Data object.
1221 *
1222 */
1223 ESCRIPT_DLL_API
1224 Data
1225 tanh() const;
1226
1227 /**
1228 \brief
1229 Return the asinh of each data point of this Data object.
1230 *
1231 */
1232 ESCRIPT_DLL_API
1233 Data
1234 asinh() const;
1235
1236 /**
1237 \brief
1238 Return the acosh of each data point of this Data object.
1239 *
1240 */
1241 ESCRIPT_DLL_API
1242 Data
1243 acosh() const;
1244
1245 /**
1246 \brief
1247 Return the atanh of each data point of this Data object.
1248 *
1249 */
1250 ESCRIPT_DLL_API
1251 Data
1252 atanh() const;
1253
1254 /**
1255 \brief
1256 Return the log to base 10 of each data point of this Data object.
1257 *
1258 */
1259 ESCRIPT_DLL_API
1260 Data
1261 log10() const;
1262
1263 /**
1264 \brief
1265 Return the natural log of each data point of this Data object.
1266 *
1267 */
1268 ESCRIPT_DLL_API
1269 Data
1270 log() const;
1271
1272 /**
1273 \brief
1274 Return the exponential function of each data point of this Data object.
1275 *
1276 */
1277 ESCRIPT_DLL_API
1278 Data
1279 exp() const;
1280
1281 /**
1282 \brief
1283 Return the square root of each data point of this Data object.
1284 *
1285 */
1286 ESCRIPT_DLL_API
1287 Data
1288 sqrt() const;
1289
1290 /**
1291 \brief
1292 Return the negation of each data point of this Data object.
1293 *
1294 */
1295 ESCRIPT_DLL_API
1296 Data
1297 neg() const;
1298
1299 /**
1300 \brief
1301 Return the identity of each data point of this Data object.
1302 Simply returns this object unmodified.
1303 *
1304 */
1305 ESCRIPT_DLL_API
1306 Data
1307 pos() const;
1308
1309 /**
1310 \brief
1311 Return the given power of each data point of this Data object.
1312
1313 \param right Input - the power to raise the object to.
1314 *
1315 */
1316 ESCRIPT_DLL_API
1317 Data
1318 powD(const Data& right) const;
1319
1320 /**
1321 \brief
1322 Return the given power of each data point of this boost python object.
1323
1324 \param right Input - the power to raise the object to.
1325 *
1326 */
1327 ESCRIPT_DLL_API
1328 Data
1329 powO(const boost::python::object& right) const;
1330
1331 /**
1332 \brief
1333 Return the given power of each data point of this boost python object.
1334
1335 \param left Input - the bases
1336 *
1337 */
1338
1339 ESCRIPT_DLL_API
1340 Data
1341 rpowO(const boost::python::object& left) const;
1342
1343 /**
1344 \brief
1345 Overloaded operator +=
1346 \param right - Input - The right hand side.
1347 *
1348 */
1349 ESCRIPT_DLL_API
1350 Data& operator+=(const Data& right);
1351 ESCRIPT_DLL_API
1352 Data& operator+=(const boost::python::object& right);
1353
1354 ESCRIPT_DLL_API
1355 Data& operator=(const Data& other);
1356
1357 /**
1358 \brief
1359 Overloaded operator -=
1360 \param right - Input - The right hand side.
1361 *
1362 */
1363 ESCRIPT_DLL_API
1364 Data& operator-=(const Data& right);
1365 ESCRIPT_DLL_API
1366 Data& operator-=(const boost::python::object& right);
1367
1368 /**
1369 \brief
1370 Overloaded operator *=
1371 \param right - Input - The right hand side.
1372 *
1373 */
1374 ESCRIPT_DLL_API
1375 Data& operator*=(const Data& right);
1376 ESCRIPT_DLL_API
1377 Data& operator*=(const boost::python::object& right);
1378
1379 /**
1380 \brief
1381 Overloaded operator /=
1382 \param right - Input - The right hand side.
1383 *
1384 */
1385 ESCRIPT_DLL_API
1386 Data& operator/=(const Data& right);
1387 ESCRIPT_DLL_API
1388 Data& operator/=(const boost::python::object& right);
1389
1390 /**
1391 \brief
1392 Newer style division operator for python
1393 */
1394 ESCRIPT_DLL_API
1395 Data truedivD(const Data& right);
1396
1397 /**
1398 \brief
1399 Newer style division operator for python
1400 */
1401 ESCRIPT_DLL_API
1402 Data truedivO(const boost::python::object& right);
1403
1404 /**
1405 \brief
1406 Newer style division operator for python
1407 */
1408 ESCRIPT_DLL_API
1409 Data rtruedivO(const boost::python::object& left);
1410
1411 /**
1412 \brief
1413 wrapper for python add operation
1414 */
1415 ESCRIPT_DLL_API
1416 boost::python::object __add__(const boost::python::object& right);
1417
1418
1419 /**
1420 \brief
1421 wrapper for python subtract operation
1422 */
1423 ESCRIPT_DLL_API
1424 boost::python::object __sub__(const boost::python::object& right);
1425
1426 /**
1427 \brief
1428 wrapper for python reverse subtract operation
1429 */
1430 ESCRIPT_DLL_API
1431 boost::python::object __rsub__(const boost::python::object& right);
1432
1433 /**
1434 \brief
1435 wrapper for python multiply operation
1436 */
1437 ESCRIPT_DLL_API
1438 boost::python::object __mul__(const boost::python::object& right);
1439
1440 /**
1441 \brief
1442 wrapper for python divide operation
1443 */
1444 ESCRIPT_DLL_API
1445 boost::python::object __div__(const boost::python::object& right);
1446
1447 /**
1448 \brief
1449 wrapper for python reverse divide operation
1450 */
1451 ESCRIPT_DLL_API
1452 boost::python::object __rdiv__(const boost::python::object& right);
1453
1454 /**
1455 \brief return inverse of matricies.
1456 */
1457 ESCRIPT_DLL_API
1458 Data
1459 matrixInverse() const;
1460
1461 /**
1462 \brief
1463 Returns true if this can be interpolated to functionspace.
1464 */
1465 ESCRIPT_DLL_API
1466 bool
1467 probeInterpolation(const FunctionSpace& functionspace) const;
1468
1469 /**
1470 Data object slicing methods.
1471 */
1472
1473 /**
1474 \brief
1475 Returns a slice from this Data object.
1476
1477 /description
1478 Implements the [] get operator in python.
1479 Calls getSlice.
1480
1481 \param key - Input - python slice tuple specifying
1482 slice to return.
1483 */
1484 ESCRIPT_DLL_API
1485 Data
1486 getItem(const boost::python::object& key) const;
1487
1488 /**
1489 \brief
1490 Copies slice from value into this Data object.
1491
1492 Implements the [] set operator in python.
1493 Calls setSlice.
1494
1495 \param key - Input - python slice tuple specifying
1496 slice to copy from value.
1497 \param value - Input - Data object to copy from.
1498 */
1499 ESCRIPT_DLL_API
1500 void
1501 setItemD(const boost::python::object& key,
1502 const Data& value);
1503
1504 ESCRIPT_DLL_API
1505 void
1506 setItemO(const boost::python::object& key,
1507 const boost::python::object& value);
1508
1509 // These following public methods should be treated as private.
1510
1511 /**
1512 \brief
1513 Perform the given unary operation on every element of every data point in
1514 this Data object.
1515 */
1516 template <class UnaryFunction>
1517 ESCRIPT_DLL_API
1518 inline
1519 void
1520 unaryOp2(UnaryFunction operation);
1521
1522 /**
1523 \brief
1524 Return a Data object containing the specified slice of
1525 this Data object.
1526 \param region - Input - Region to copy.
1527 *
1528 */
1529 ESCRIPT_DLL_API
1530 Data
1531 getSlice(const DataTypes::RegionType& region) const;
1532
1533 /**
1534 \brief
1535 Copy the specified slice from the given value into this
1536 Data object.
1537 \param value - Input - Data to copy from.
1538 \param region - Input - Region to copy.
1539 *
1540 */
1541 ESCRIPT_DLL_API
1542 void
1543 setSlice(const Data& value,
1544 const DataTypes::RegionType& region);
1545
1546 /**
1547 \brief
1548 print the data values to stdout. Used for debugging
1549 */
1550 ESCRIPT_DLL_API
1551 void
1552 print(void);
1553
1554 /**
1555 \brief
1556 return the MPI rank number of the local data
1557 MPI_COMM_WORLD is assumed and the result of MPI_Comm_size()
1558 is returned
1559 */
1560 ESCRIPT_DLL_API
1561 int
1562 get_MPIRank(void) const;
1563
1564 /**
1565 \brief
1566 return the MPI rank number of the local data
1567 MPI_COMM_WORLD is assumed and the result of MPI_Comm_rank()
1568 is returned
1569 */
1570 ESCRIPT_DLL_API
1571 int
1572 get_MPISize(void) const;
1573
1574 /**
1575 \brief
1576 return the MPI rank number of the local data
1577 MPI_COMM_WORLD is assumed and returned.
1578 */
1579 ESCRIPT_DLL_API
1580 MPI_Comm
1581 get_MPIComm(void) const;
1582
1583 /**
1584 \brief
1585 return the object produced by the factory, which is a DataConstant or DataExpanded
1586 TODO Ownership of this object should be explained in doco.
1587 */
1588 ESCRIPT_DLL_API
1589 DataAbstract*
1590 borrowData(void) const;
1591
1592 ESCRIPT_DLL_API
1593 DataAbstract_ptr
1594 borrowDataPtr(void) const;
1595
1596 ESCRIPT_DLL_API
1597 DataReady_ptr
1598 borrowReadyPtr(void) const;
1599
1600
1601
1602 /**
1603 \brief
1604 Return a pointer to the beginning of the datapoint at the specified offset.
1605 TODO Eventually these should be inlined.
1606 \param i - position(offset) in the underlying datastructure
1607 */
1608
1609 ESCRIPT_DLL_API
1610 DataTypes::ValueType::const_reference
1611 getDataAtOffsetRO(DataTypes::ValueType::size_type i);
1612
1613
1614 ESCRIPT_DLL_API
1615 DataTypes::ValueType::reference
1616 getDataAtOffsetRW(DataTypes::ValueType::size_type i);
1617
1618 /**
1619 \brief Ensures that the Data is expanded and returns its underlying vector
1620 Does not check for exclusive write so do that before calling if sharing
1621 Is a posibility.
1622 \warning For domain implementors only. Using this function will
1623 avoid using optimisations like lazy evaluation. It is intended
1624 to allow quick initialisation of Data by domain; not as a bypass around
1625 escript's other mechanisms.
1626 */
1627 ESCRIPT_DLL_API
1628 DataTypes::ValueType&
1629 getExpandedVectorReference();
1630
1631 protected:
1632
1633 private:
1634
1635 template <class BinaryOp>
1636 double
1637 #ifdef ESYS_MPI
1638 lazyAlgWorker(double init, MPI_Op mpiop_type);
1639 #else
1640 lazyAlgWorker(double init);
1641 #endif
1642
1643 double
1644 LsupWorker() const;
1645
1646 double
1647 supWorker() const;
1648
1649 double
1650 infWorker() const;
1651
1652 boost::python::object
1653 integrateWorker() const;
1654
1655 void
1656 calc_minGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1657
1658 void
1659 calc_maxGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1660
1661 // For internal use in Data.cpp only!
1662 // other uses should call the main entry points and allow laziness
1663 Data
1664 minval_nonlazy() const;
1665
1666 // For internal use in Data.cpp only!
1667 Data
1668 maxval_nonlazy() const;
1669
1670
1671 /**
1672 \brief
1673 Check *this and the right operand are compatible. Throws
1674 an exception if they aren't.
1675 \param right - Input - The right hand side.
1676 */
1677 inline
1678 void
1679 operandCheck(const Data& right) const
1680 {
1681 return m_data->operandCheck(*(right.m_data.get()));
1682 }
1683
1684 /**
1685 \brief
1686 Perform the specified reduction algorithm on every element of every data point in
1687 this Data object according to the given function and return the single value result.
1688 */
1689 template <class BinaryFunction>
1690 inline
1691 double
1692 algorithm(BinaryFunction operation,
1693 double initial_value) const;
1694
1695 /**
1696 \brief
1697 Reduce each data-point in this Data object using the given operation. Return a Data
1698 object with the same number of data-points, but with each data-point containing only
1699 one value - the result of the reduction operation on the corresponding data-point in
1700 this Data object
1701 */
1702 template <class BinaryFunction>
1703 inline
1704 Data
1705 dp_algorithm(BinaryFunction operation,
1706 double initial_value) const;
1707
1708 /**
1709 \brief
1710 Perform the given binary operation on all of the data's elements.
1711 The underlying type of the right hand side (right) determines the final
1712 type of *this after the operation. For example if the right hand side
1713 is expanded *this will be expanded if necessary.
1714 RHS is a Data object.
1715 */
1716 template <class BinaryFunction>
1717 inline
1718 void
1719 binaryOp(const Data& right,
1720 BinaryFunction operation);
1721
1722 /**
1723 \brief
1724 Convert the data type of the RHS to match this.
1725 \param right - Input - data type to match.
1726 */
1727 void
1728 typeMatchLeft(Data& right) const;
1729
1730 /**
1731 \brief
1732 Convert the data type of this to match the RHS.
1733 \param right - Input - data type to match.
1734 */
1735 void
1736 typeMatchRight(const Data& right);
1737
1738 /**
1739 \brief
1740 Construct a Data object of the appropriate type.
1741 */
1742
1743 void
1744 initialise(const DataTypes::ValueType& value,
1745 const DataTypes::ShapeType& shape,
1746 const FunctionSpace& what,
1747 bool expanded);
1748
1749 void
1750 initialise(const WrappedArray& value,
1751 const FunctionSpace& what,
1752 bool expanded);
1753
1754 void
1755 initialise(const double value,
1756 const DataTypes::ShapeType& shape,
1757 const FunctionSpace& what,
1758 bool expanded);
1759
1760 //
1761 // flag to protect the data object against any update
1762 bool m_protected;
1763 mutable bool m_shared;
1764 bool m_lazy;
1765
1766 //
1767 // pointer to the actual data object
1768 // boost::shared_ptr<DataAbstract> m_data;
1769 DataAbstract_ptr m_data;
1770
1771 // If possible please use getReadyPtr instead.
1772 // But see warning below.
1773 const DataReady*
1774 getReady() const;
1775
1776 DataReady*
1777 getReady();
1778
1779
1780 // Be wary of using this for local operations since it (temporarily) increases reference count.
1781 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1782 // getReady() instead
1783 DataReady_ptr
1784 getReadyPtr();
1785
1786 const_DataReady_ptr
1787 getReadyPtr() const;
1788
1789
1790 /**
1791 \brief Update the Data's shared flag
1792 This indicates that the DataAbstract used by this object is now shared (or no longer shared).
1793 For internal use only.
1794 */
1795 void updateShareStatus(bool nowshared) const
1796 {
1797 m_shared=nowshared; // m_shared is mutable
1798 }
1799
1800 // In the isShared() method below:
1801 // A problem would occur if m_data (the address pointed to) were being modified
1802 // while the call m_data->is_shared is being executed.
1803 //
1804 // Q: So why do I think this code can be thread safe/correct?
1805 // A: We need to make some assumptions.
1806 // 1. We assume it is acceptable to return true under some conditions when we aren't shared.
1807 // 2. We assume that no constructions or assignments which will share previously unshared
1808 // will occur while this call is executing. This is consistent with the way Data:: and C are written.
1809 //
1810 // This means that the only transition we need to consider, is when a previously shared object is
1811 // not shared anymore. ie. the other objects have been destroyed or a deep copy has been made.
1812 // In those cases the m_shared flag changes to false after m_data has completed changing.
1813 // For any threads executing before the flag switches they will assume the object is still shared.
1814 bool isShared() const
1815 {
1816 return m_shared;
1817 /* if (m_shared) return true;
1818 if (m_data->isShared())
1819 {
1820 updateShareStatus(true);
1821 return true;
1822 }
1823 return false;*/
1824 }
1825
1826 void forceResolve()
1827 {
1828 if (isLazy())
1829 {
1830 #ifdef _OPENMP
1831 if (omp_in_parallel())
1832 { // Yes this is throwing an exception out of an omp thread which is forbidden.
1833 throw DataException("Please do not call forceResolve() in a parallel region.");
1834 }
1835 #endif
1836 resolve();
1837 }
1838 }
1839
1840 /**
1841 \brief if another object is sharing out member data make a copy to work with instead.
1842 This code should only be called from single threaded sections of code.
1843 */
1844 void exclusiveWrite()
1845 {
1846 #ifdef _OPENMP
1847 if (omp_in_parallel())
1848 {
1849 // *((int*)0)=17;
1850 throw DataException("Programming error. Please do not run exclusiveWrite() in multi-threaded sections.");
1851 }
1852 #endif
1853 forceResolve();
1854 if (isShared())
1855 {
1856 DataAbstract* t=m_data->deepCopy();
1857 set_m_data(DataAbstract_ptr(t));
1858 }
1859 }
1860
1861 /**
1862 \brief checks if caller can have exclusive write to the object
1863 */
1864 void checkExclusiveWrite()
1865 {
1866 if (isLazy() || isShared())
1867 {
1868 throw DataException("Programming error. ExclusiveWrite required - please call requireWrite()");
1869 }
1870 }
1871
1872 /**
1873 \brief Modify the data abstract hosted by this Data object
1874 For internal use only.
1875 Passing a pointer to null is permitted (do this in the destructor)
1876 \warning Only to be called in single threaded code or inside a single/critical section. This method needs to be atomic.
1877 */
1878 void set_m_data(DataAbstract_ptr p);
1879
1880 friend class DataAbstract; // To allow calls to updateShareStatus
1881 friend class TestDomain; // so its getX will work quickly
1882 #ifdef IKNOWWHATIMDOING
1883 friend ESCRIPT_DLL_API Data applyBinaryCFunction(boost::python::object cfunc, boost::python::tuple shape, escript::Data& d, escript::Data& e);
1884 #endif
1885 friend ESCRIPT_DLL_API Data condEval(escript::Data& mask, escript::Data& trueval, escript::Data& falseval);
1886 friend ESCRIPT_DLL_API Data randomData(const boost::python::tuple& shape, const FunctionSpace& what, long seed, const boost::python::tuple& filter);
1887
1888 };
1889
1890
1891 #ifdef IKNOWWHATIMDOING
1892 ESCRIPT_DLL_API
1893 Data
1894 applyBinaryCFunction(boost::python::object func, boost::python::tuple shape, escript::Data& d, escript::Data& e);
1895 #endif
1896
1897
1898 ESCRIPT_DLL_API
1899 Data
1900 condEval(escript::Data& mask, escript::Data& trueval, escript::Data& falseval);
1901
1902
1903
1904 /**
1905 \brief Create a new Expanded Data object filled with pseudo-random data.
1906 */
1907 ESCRIPT_DLL_API
1908 Data randomData(const boost::python::tuple& shape,
1909 const FunctionSpace& what,
1910 long seed, const boost::python::tuple& filter);
1911
1912
1913 } // end namespace escript
1914
1915
1916 // No, this is not supposed to be at the top of the file
1917 // DataAbstact needs to be declared first, then DataReady needs to be fully declared
1918 // so that I can dynamic cast between them below.
1919 #include "DataReady.h"
1920 #include "DataLazy.h"
1921
1922 namespace escript
1923 {
1924
1925 inline
1926 const DataReady*
1927 Data::getReady() const
1928 {
1929 const DataReady* dr=dynamic_cast<const DataReady*>(m_data.get());
1930 EsysAssert((dr!=0), "Error - casting to DataReady.");
1931 return dr;
1932 }
1933
1934 inline
1935 DataReady*
1936 Data::getReady()
1937 {
1938 DataReady* dr=dynamic_cast<DataReady*>(m_data.get());
1939 EsysAssert((dr!=0), "Error - casting to DataReady.");
1940 return dr;
1941 }
1942
1943 // Be wary of using this for local operations since it (temporarily) increases reference count.
1944 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1945 // getReady() instead
1946 inline
1947 DataReady_ptr
1948 Data::getReadyPtr()
1949 {
1950 DataReady_ptr dr=boost::dynamic_pointer_cast<DataReady>(m_data);
1951 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1952 return dr;
1953 }
1954
1955
1956 inline
1957 const_DataReady_ptr
1958 Data::getReadyPtr() const
1959 {
1960 const_DataReady_ptr dr=boost::dynamic_pointer_cast<const DataReady>(m_data);
1961 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1962 return dr;
1963 }
1964
1965 inline
1966 DataAbstract::ValueType::value_type*
1967 Data::getSampleDataRW(DataAbstract::ValueType::size_type sampleNo)
1968 {
1969 if (isLazy())
1970 {
1971 throw DataException("Error, attempt to acquire RW access to lazy data. Please call requireWrite() first.");
1972 }
1973 return getReady()->getSampleDataRW(sampleNo);
1974 }
1975
1976 inline
1977 const DataAbstract::ValueType::value_type*
1978 Data::getSampleDataRO(DataAbstract::ValueType::size_type sampleNo) const
1979 {
1980 DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
1981 if (l!=0)
1982 {
1983 size_t offset=0;
1984 const DataTypes::ValueType* res=l->resolveSample(sampleNo,offset);
1985 return &((*res)[offset]);
1986 }
1987 return getReady()->getSampleDataRO(sampleNo);
1988 }
1989
1990 inline
1991 const DataAbstract::ValueType::value_type*
1992 Data::getDataRO() const
1993 {
1994 if (isLazy())
1995 {
1996 throw DataException("Programmer error - getDataRO must not be called on Lazy Data.");
1997 }
1998 if (getNumSamples()==0)
1999 {
2000 return 0;
2001 }
2002 else
2003 {
2004 return &(getReady()->getVectorRO()[0]);
2005 }
2006 }
2007
2008
2009 /**
2010 Binary Data object operators.
2011 */
2012 inline double rpow(double x,double y)
2013 {
2014 return pow(y,x);
2015 }
2016
2017 /**
2018 \brief
2019 Operator+
2020 Takes two Data objects.
2021 */
2022 ESCRIPT_DLL_API Data operator+(const Data& left, const Data& right);
2023
2024 /**
2025 \brief
2026 Operator-
2027 Takes two Data objects.
2028 */
2029 ESCRIPT_DLL_API Data operator-(const Data& left, const Data& right);
2030
2031 /**
2032 \brief
2033 Operator*
2034 Takes two Data objects.
2035 */
2036 ESCRIPT_DLL_API Data operator*(const Data& left, const Data& right);
2037
2038 /**
2039 \brief
2040 Operator/
2041 Takes two Data objects.
2042 */
2043 ESCRIPT_DLL_API Data operator/(const Data& left, const Data& right);
2044
2045 /**
2046 \brief
2047 Operator+
2048 Takes LHS Data object and RHS python::object.
2049 python::object must be convertable to Data type.
2050 */
2051 ESCRIPT_DLL_API Data operator+(const Data& left, const boost::python::object& right);
2052
2053 /**
2054 \brief
2055 Operator-
2056 Takes LHS Data object and RHS python::object.
2057 python::object must be convertable to Data type.
2058 */
2059 ESCRIPT_DLL_API Data operator-(const Data& left, const boost::python::object& right);
2060
2061 /**
2062 \brief
2063 Operator*
2064 Takes LHS Data object and RHS python::object.
2065 python::object must be convertable to Data type.
2066 */
2067 ESCRIPT_DLL_API Data operator*(const Data& left, const boost::python::object& right);
2068
2069 /**
2070 \brief
2071 Operator/
2072 Takes LHS Data object and RHS python::object.
2073 python::object must be convertable to Data type.
2074 */
2075 ESCRIPT_DLL_API Data operator/(const Data& left, const boost::python::object& right);
2076
2077 /**
2078 \brief
2079 Operator+
2080 Takes LHS python::object and RHS Data object.
2081 python::object must be convertable to Data type.
2082 */
2083 ESCRIPT_DLL_API Data operator+(const boost::python::object& left, const Data& right);
2084
2085 /**
2086 \brief
2087 Operator-
2088 Takes LHS python::object and RHS Data object.
2089 python::object must be convertable to Data type.
2090 */
2091 ESCRIPT_DLL_API Data operator-(const boost::python::object& left, const Data& right);
2092
2093 /**
2094 \brief
2095 Operator*
2096 Takes LHS python::object and RHS Data object.
2097 python::object must be convertable to Data type.
2098 */
2099 ESCRIPT_DLL_API Data operator*(const boost::python::object& left, const Data& right);
2100
2101 /**
2102 \brief
2103 Operator/
2104 Takes LHS python::object and RHS Data object.
2105 python::object must be convertable to Data type.
2106 */
2107 ESCRIPT_DLL_API Data operator/(const boost::python::object& left, const Data& right);
2108
2109
2110
2111 /**
2112 \brief
2113 Output operator
2114 */
2115 ESCRIPT_DLL_API std::ostream& operator<<(std::ostream& o, const Data& data);
2116
2117 /**
2118 \brief
2119 Compute a tensor product of two Data objects
2120 \param arg_0 - Input - Data object
2121 \param arg_1 - Input - Data object
2122 \param axis_offset - Input - axis offset
2123 \param transpose - Input - 0: transpose neither, 1: transpose arg0, 2: transpose arg1
2124 */
2125 ESCRIPT_DLL_API
2126 Data
2127 C_GeneralTensorProduct(Data& arg_0,
2128 Data& arg_1,
2129 int axis_offset=0,
2130 int transpose=0);
2131
2132 /**
2133 \brief
2134 Operator/
2135 Takes RHS Data object.
2136 */
2137 inline
2138 Data
2139 Data::truedivD(const Data& right)
2140 {
2141 return *this / right;
2142 }
2143
2144 /**
2145 \brief
2146 Operator/
2147 Takes RHS python::object.
2148 */
2149 inline
2150 Data
2151 Data::truedivO(const boost::python::object& right)
2152 {
2153 Data tmp(right, getFunctionSpace(), false);
2154 return truedivD(tmp);
2155 }
2156
2157 /**
2158 \brief
2159 Operator/
2160 Takes LHS python::object.
2161 */
2162 inline
2163 Data
2164 Data::rtruedivO(const boost::python::object& left)
2165 {
2166 Data tmp(left, getFunctionSpace(), false);
2167 return tmp.truedivD(*this);
2168 }
2169
2170 /**
2171 \brief
2172 Perform the given binary operation with this and right as operands.
2173 Right is a Data object.
2174 */
2175 template <class BinaryFunction>
2176 inline
2177 void
2178 Data::binaryOp(const Data& right,
2179 BinaryFunction operation)
2180 {
2181 //
2182 // if this has a rank of zero promote it to the rank of the RHS
2183 if (getDataPointRank()==0 && right.getDataPointRank()!=0) {
2184 throw DataException("Error - attempt to update rank zero object with object with rank bigger than zero.");
2185 }
2186
2187 if (isLazy() || right.isLazy())
2188 {
2189 throw DataException("Programmer error - attempt to call binaryOp with Lazy Data.");
2190 }
2191 //
2192 // initially make the temporary a shallow copy
2193 Data tempRight(right);
2194 FunctionSpace fsl=getFunctionSpace();
2195 FunctionSpace fsr=right.getFunctionSpace();
2196 if (fsl!=fsr) {
2197 signed char intres=fsl.getDomain()->preferredInterpolationOnDomain(fsr.getTypeCode(), fsl.getTypeCode());
2198 if (intres==0)
2199 {
2200 std::string msg="Error - attempt to combine incompatible FunctionSpaces.";
2201 msg+=fsl.toString();
2202 msg+=" ";
2203 msg+=fsr.toString();
2204 throw DataException(msg.c_str());
2205 }
2206 else if (intres==1)
2207 {
2208 // an interpolation is required so create a new Data
2209 tempRight=Data(right,fsl);
2210 }
2211 else // reverse interpolation preferred
2212 {
2213 // interpolate onto the RHS function space
2214 Data tempLeft(*this,fsr);
2215 set_m_data(tempLeft.m_data);
2216 }
2217 }
2218 operandCheck(tempRight);
2219 //
2220 // ensure this has the right type for the RHS
2221 typeMatchRight(tempRight);
2222 //
2223 // Need to cast to the concrete types so that the correct binaryOp
2224 // is called.
2225 if (isExpanded()) {
2226 //
2227 // Expanded data will be done in parallel, the right hand side can be
2228 // of any data type
2229 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2230 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2231 escript::binaryOp(*leftC,*(tempRight.getReady()),operation);
2232 } else if (isTagged()) {
2233 //
2234 // Tagged data is operated on serially, the right hand side can be
2235 // either DataConstant or DataTagged
2236 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2237 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2238 if (right.isTagged()) {
2239 DataTagged* rightC=dynamic_cast<DataTagged*>(tempRight.m_data.get());
2240 EsysAssert((rightC!=0), "Programming error - casting to DataTagged.");
2241 escript::binaryOp(*leftC,*rightC,operation);
2242 } else {
2243 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2244 EsysAssert((rightC!=0), "Programming error - casting to DataConstant.");
2245 escript::binaryOp(*leftC,*rightC,operation);
2246 }
2247 } else if (isConstant()) {
2248 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2249 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2250 EsysAssert((leftC!=0 && rightC!=0), "Programming error - casting to DataConstant.");
2251 escript::binaryOp(*leftC,*rightC,operation);
2252 }
2253 }
2254
2255 /**
2256 \brief
2257 Perform the given Data object reduction algorithm on this and return the result.
2258 Given operation combines each element of each data point, thus argument
2259 object (*this) is a rank n Data object, and returned object is a scalar.
2260 Calls escript::algorithm.
2261 */
2262 template <class BinaryFunction>
2263 inline
2264 double
2265 Data::algorithm(BinaryFunction operation, double initial_value) const
2266 {
2267 if (isExpanded()) {
2268 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2269 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2270 return escript::algorithm(*leftC,operation,initial_value);
2271 } else if (isTagged()) {
2272 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2273 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2274 return escript::algorithm(*leftC,operation,initial_value);
2275 } else if (isConstant()) {
2276 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2277 EsysAssert((leftC!=0), "Programming error - casting to DataConstant.");
2278 return escript::algorithm(*leftC,operation,initial_value);
2279 } else if (isEmpty()) {
2280 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2281 } else if (isLazy()) {
2282 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2283 } else {
2284 throw DataException("Error - Data encapsulates an unknown type.");
2285 }
2286 }
2287
2288 /**
2289 \brief
2290 Perform the given data point reduction algorithm on data and return the result.
2291 Given operation combines each element within each data point into a scalar,
2292 thus argument object is a rank n Data object, and returned object is a
2293 rank 0 Data object.
2294 Calls escript::dp_algorithm.
2295 */
2296 template <class BinaryFunction>
2297 inline
2298 Data
2299 Data::dp_algorithm(BinaryFunction operation, double initial_value) const
2300 {
2301 if (isEmpty()) {
2302 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2303 }
2304 else if (isExpanded()) {
2305 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2306 DataExpanded* dataE=dynamic_cast<DataExpanded*>(m_data.get());
2307 DataExpanded* resultE=dynamic_cast<DataExpanded*>(result.m_data.get());
2308 EsysAssert((dataE!=0), "Programming error - casting data to DataExpanded.");
2309 EsysAssert((resultE!=0), "Programming error - casting result to DataExpanded.");
2310 escript::dp_algorithm(*dataE,*resultE,operation,initial_value);
2311 return result;
2312 }
2313 else if (isTagged()) {
2314 DataTagged* dataT=dynamic_cast<DataTagged*>(m_data.get());
2315 EsysAssert((dataT!=0), "Programming error - casting data to DataTagged.");
2316 DataTypes::ValueType defval(1);
2317 defval[0]=0;
2318 DataTagged* resultT=new DataTagged(getFunctionSpace(), DataTypes::scalarShape, defval, dataT);
2319 escript::dp_algorithm(*dataT,*resultT,operation,initial_value);
2320 return Data(resultT); // note: the Data object now owns the resultT pointer
2321 }
2322 else if (isConstant()) {
2323 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2324 DataConstant* dataC=dynamic_cast<DataConstant*>(m_data.get());
2325 DataConstant* resultC=dynamic_cast<DataConstant*>(result.m_data.get());
2326 EsysAssert((dataC!=0), "Programming error - casting data to DataConstant.");
2327 EsysAssert((resultC!=0), "Programming error - casting result to DataConstant.");
2328 escript::dp_algorithm(*dataC,*resultC,operation,initial_value);
2329 return result;
2330 } else if (isLazy()) {
2331 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2332 } else {
2333 throw DataException("Error - Data encapsulates an unknown type.");
2334 }
2335 }
2336
2337 /**
2338 \brief
2339 Compute a tensor operation with two Data objects
2340 \param arg_0 - Input - Data object
2341 \param arg_1 - Input - Data object
2342 \param operation - Input - Binary op functor
2343 */
2344 template <typename BinaryFunction>
2345 inline
2346 Data
2347 C_TensorBinaryOperation(Data const &arg_0,
2348 Data const &arg_1,
2349 BinaryFunction operation)
2350 {
2351 if (arg_0.isEmpty() || arg_1.isEmpty())
2352 {
2353 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2354 }
2355 if (arg_0.isLazy() || arg_1.isLazy())
2356 {
2357 throw DataException("Error - Operations not permitted on lazy data.");
2358 }
2359 // Interpolate if necessary and find an appropriate function space
2360 Data arg_0_Z, arg_1_Z;
2361 FunctionSpace fsl=arg_0.getFunctionSpace();
2362 FunctionSpace fsr=arg_1.getFunctionSpace();
2363 if (fsl!=fsr) {
2364 signed char intres=fsl.getDomain()->preferredInterpolationOnDomain(fsr.getTypeCode(), fsl.getTypeCode());
2365 if (intres==0)
2366 {
2367 std::string msg="Error - C_TensorBinaryOperation: arguments have incompatible function spaces.";
2368 msg+=fsl.toString();
2369 msg+=" ";
2370 msg+=fsr.toString();
2371 throw DataException(msg.c_str());
2372 }
2373 else if (intres==1)
2374 {
2375 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2376 arg_0_Z =Data(arg_0);
2377 }
2378 else // reverse interpolation preferred
2379 {
2380 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2381 arg_1_Z = Data(arg_1);
2382 }
2383 } else {
2384 arg_0_Z = Data(arg_0);
2385 arg_1_Z = Data(arg_1);
2386 }
2387 // Get rank and shape of inputs
2388 int rank0 = arg_0_Z.getDataPointRank();
2389 int rank1 = arg_1_Z.getDataPointRank();
2390 DataTypes::ShapeType shape0 = arg_0_Z.getDataPointShape();
2391 DataTypes::ShapeType shape1 = arg_1_Z.getDataPointShape();
2392 int size0 = arg_0_Z.getDataPointSize();
2393 int size1 = arg_1_Z.getDataPointSize();
2394 // Declare output Data object
2395 Data res;
2396
2397 if (shape0 == shape1) {
2398 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2399 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2400 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2401 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2402 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2403
2404 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2405 }
2406 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2407
2408 // Prepare the DataConstant input
2409 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2410
2411 // Borrow DataTagged input from Data object
2412 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2413
2414 // Prepare a DataTagged output 2
2415 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2416 res.tag();
2417 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2418
2419 // Prepare offset into DataConstant
2420 int offset_0 = tmp_0->getPointOffset(0,0);
2421 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2422
2423 // Get the pointers to the actual data
2424 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2425 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2426
2427 // Compute a result for the default
2428 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2429 // Compute a result for each tag
2430 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2431 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2432 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2433 tmp_2->addTag(i->first);
2434 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2435 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2436
2437 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2438 }
2439
2440 }
2441 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2442 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2443 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2444 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2445 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2446
2447 int sampleNo_1,dataPointNo_1;
2448 int numSamples_1 = arg_1_Z.getNumSamples();
2449 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2450 int offset_0 = tmp_0->getPointOffset(0,0);
2451 res.requireWrite();
2452 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2453 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2454 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2455 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2456 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2457 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2458 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2459 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2460 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2461 }
2462 }
2463
2464 }
2465 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2466 // Borrow DataTagged input from Data object
2467 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2468
2469 // Prepare the DataConstant input
2470 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2471
2472 // Prepare a DataTagged output 2
2473 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2474 res.tag();
2475 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2476
2477 // Prepare offset into DataConstant
2478 int offset_1 = tmp_1->getPointOffset(0,0);
2479
2480 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2481 // Get the pointers to the actual data
2482 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2483 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2484 // Compute a result for the default
2485 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2486 // Compute a result for each tag
2487 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2488 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2489 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2490 tmp_2->addTag(i->first);
2491 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2492 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2493 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2494 }
2495
2496 }
2497 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2498 // Borrow DataTagged input from Data object
2499 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2500
2501 // Borrow DataTagged input from Data object
2502 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2503
2504 // Prepare a DataTagged output 2
2505 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2506 res.tag(); // DataTagged output
2507 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2508
2509 // Get the pointers to the actual data
2510 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2511 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2512 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2513
2514 // Compute a result for the default
2515 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2516 // Merge the tags
2517 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2518 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2519 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2520 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2521 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2522 }
2523 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2524 tmp_2->addTag(i->first);
2525 }
2526 // Compute a result for each tag
2527 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2528 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2529
2530 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2531 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2532 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2533
2534 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2535 }
2536
2537 }
2538 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2539 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2540 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2541 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2542 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2543 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2544
2545 int sampleNo_0,dataPointNo_0;
2546 int numSamples_0 = arg_0_Z.getNumSamples();
2547 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2548 res.requireWrite();
2549 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2550 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2551 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2552 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2553 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2554 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2555 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2556 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2557 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2558 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2559 }
2560 }
2561
2562 }
2563 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2564 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2565 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2566 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2567 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2568
2569 int sampleNo_0,dataPointNo_0;
2570 int numSamples_0 = arg_0_Z.getNumSamples();
2571 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2572 int offset_1 = tmp_1->getPointOffset(0,0);
2573 res.requireWrite();
2574 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2575 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2576 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2577 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2578 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2579
2580 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2581 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2582 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2583
2584
2585 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2586 }
2587 }
2588
2589 }
2590 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2591 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2592 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2593 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2594 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2595 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2596
2597 int sampleNo_0,dataPointNo_0;
2598 int numSamples_0 = arg_0_Z.getNumSamples();
2599 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2600 res.requireWrite();
2601 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2602 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2603 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2604 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2605 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2606 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2607 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2608 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2609 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2610 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2611 }
2612 }
2613
2614 }
2615 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2616 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2617 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2618 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2619 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2620 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2621
2622 int sampleNo_0,dataPointNo_0;
2623 int numSamples_0 = arg_0_Z.getNumSamples();
2624 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2625 res.requireWrite();
2626 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2627 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2628 dataPointNo_0=0;
2629 // for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2630 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2631 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2632 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2633 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2634 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2635 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2636 tensor_binary_operation(size0*numDataPointsPerSample_0, ptr_0, ptr_1, ptr_2, operation);
2637 // }
2638 }
2639
2640 }
2641 else {
2642 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2643 }
2644
2645 } else if (0 == rank0) {
2646 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2647 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataConstant output
2648 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2649 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2650 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2651 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2652 }
2653 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2654
2655 // Prepare the DataConstant input
2656 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2657
2658 // Borrow DataTagged input from Data object
2659 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2660
2661 // Prepare a DataTagged output 2
2662 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataTagged output
2663 res.tag();
2664 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2665
2666 // Prepare offset into DataConstant
2667 int offset_0 = tmp_0->getPointOffset(0,0);
2668 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2669
2670 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2671 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2672
2673 // Compute a result for the default
2674 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2675 // Compute a result for each tag
2676 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2677 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2678 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2679 tmp_2->addTag(i->first);
2680 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2681 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2682 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2683 }
2684
2685 }
2686 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2687
2688 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2689 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2690 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2691 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2692
2693 int sampleNo_1;
2694 int numSamples_1 = arg_1_Z.getNumSamples();
2695 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2696 int offset_0 = tmp_0->getPointOffset(0,0);
2697 const double *ptr_src = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2698 double ptr_0 = ptr_src[0];
2699 int size = size1*numDataPointsPerSample_1;
2700 res.requireWrite();
2701 #pragma omp parallel for private(sampleNo_1) schedule(static)
2702 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2703 // for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2704 int offset_1 = tmp_1->getPointOffset(sampleNo_1,0);
2705 int offset_2 = tmp_2->getPointOffset(sampleNo_1,0);
2706 // const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2707 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2708 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2709 tensor_binary_operation(size, ptr_0, ptr_1, ptr_2, operation);
2710
2711 // }
2712 }
2713
2714 }
2715 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2716
2717 // Borrow DataTagged input from Data object
2718 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2719
2720 // Prepare the DataConstant input
2721 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2722
2723 // Prepare a DataTagged output 2
2724 res = Data(0.0, shape1, arg_0_Z.getFunctionSpace()); // DataTagged output
2725 res.tag();
2726 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2727
2728 // Prepare offset into DataConstant
2729 int offset_1 = tmp_1->getPointOffset(0,0);
2730 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2731
2732 // Get the pointers to the actual data
2733 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2734 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2735
2736
2737 // Compute a result for the default
2738 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2739 // Compute a result for each tag
2740 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2741 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2742 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2743 tmp_2->addTag(i->first);
2744 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2745 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2746
2747 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2748 }
2749
2750 }
2751 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2752
2753 // Borrow DataTagged input from Data object
2754 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2755
2756 // Borrow DataTagged input from Data object
2757 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2758
2759 // Prepare a DataTagged output 2
2760 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());
2761 res.tag(); // DataTagged output
2762 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2763
2764 // Get the pointers to the actual data
2765 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2766 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2767 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2768
2769 // Compute a result for the default
2770 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2771 // Merge the tags
2772 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2773 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2774 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2775 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2776 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2777 }
2778 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2779 tmp_2->addTag(i->first);
2780 }
2781 // Compute a result for each tag
2782 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2783 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2784 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2785 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2786 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2787
2788 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2789 }
2790
2791 }
2792 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2793
2794 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2795 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2796 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2797 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2798 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2799
2800 int sampleNo_0,dataPointNo_0;
2801 int numSamples_0 = arg_0_Z.getNumSamples();
2802 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2803 res.requireWrite();
2804 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2805 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2806 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2807 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2808 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2809 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2810 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2811 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2812 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2813 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2814 }
2815 }
2816
2817 }
2818 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2819 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2820 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2821 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2822 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2823
2824 int sampleNo_0,dataPointNo_0;
2825 int numSamples_0 = arg_0_Z.getNumSamples();
2826 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2827 int offset_1 = tmp_1->getPointOffset(0,0);
2828 res.requireWrite();
2829 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2830 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2831 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2832 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2833 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2834 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2835 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2836 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2837 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2838 }
2839 }
2840
2841
2842 }
2843 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2844
2845 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2846 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2847 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2848 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2849 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2850
2851 int sampleNo_0,dataPointNo_0;
2852 int numSamples_0 = arg_0_Z.getNumSamples();
2853 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2854 res.requireWrite();
2855 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2856 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2857 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2858 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2859 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2860 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2861 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2862 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2863 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2864 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2865 }
2866 }
2867
2868 }
2869 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2870
2871 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2872 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2873 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2874 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2875 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2876
2877 int sampleNo_0,dataPointNo_0;
2878 int numSamples_0 = arg_0_Z.getNumSamples();
2879 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2880 res.requireWrite();
2881 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2882 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2883 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2884 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2885 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2886 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2887 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2888 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2889 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2890 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2891 }
2892 }
2893
2894 }
2895 else {
2896 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2897 }
2898
2899 } else if (0 == rank1) {
2900 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2901 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2902 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2903 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2904 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2905 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2906 }
2907 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2908
2909 // Prepare the DataConstant input
2910 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2911
2912 // Borrow DataTagged input from Data object
2913 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2914
2915 // Prepare a DataTagged output 2
2916 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2917 res.tag();
2918 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2919
2920 // Prepare offset into DataConstant
2921 int offset_0 = tmp_0->getPointOffset(0,0);
2922 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2923
2924 //Get the pointers to the actual data
2925 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2926 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2927
2928 // Compute a result for the default
2929 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2930 // Compute a result for each tag
2931 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2932 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2933 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2934 tmp_2->addTag(i->first);
2935 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2936 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2937 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2938 }
2939 }
2940 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2941
2942 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2943 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2944 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2945 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2946
2947 int sampleNo_1,dataPointNo_1;
2948 int numSamples_1 = arg_1_Z.getNumSamples();
2949 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2950 int offset_0 = tmp_0->getPointOffset(0,0);
2951 res.requireWrite();
2952 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2953 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2954 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2955 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2956 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2957 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2958 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2959 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2960 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2961 }
2962 }
2963
2964 }
2965 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2966
2967 // Borrow DataTagged input from Data object
2968 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2969
2970 // Prepare the DataConstant input
2971 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2972
2973 // Prepare a DataTagged output 2
2974 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2975 res.tag();
2976 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2977
2978 // Prepare offset into DataConstant
2979 int offset_1 = tmp_1->getPointOffset(0,0);
2980 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2981 // Get the pointers to the actual data
2982 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2983 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2984 // Compute a result for the default
2985 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2986 // Compute a result for each tag
2987 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2988 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2989 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2990 tmp_2->addTag(i->first);
2991 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2992 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2993 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2994 }
2995
2996 }
2997 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2998
2999 // Borrow DataTagged input from Data object
3000 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
3001
3002 // Borrow DataTagged input from Data object
3003 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
3004
3005 // Prepare a DataTagged output 2
3006 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
3007 res.tag(); // DataTagged output
3008 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
3009
3010 // Get the pointers to the actual data
3011 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
3012 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
3013 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
3014
3015 // Compute a result for the default
3016 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3017 // Merge the tags
3018 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
3019 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
3020 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
3021 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
3022 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
3023 }
3024 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
3025 tmp_2->addTag(i->first);
3026 }
3027 // Compute a result for each tag
3028 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
3029 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
3030 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
3031 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
3032 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
3033 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3034 }
3035
3036 }
3037 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
3038
3039 // After finding a common function space above the two inputs have the same numSamples and num DPPS
3040 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3041 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
3042 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
3043 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3044
3045 int sampleNo_0,dataPointNo_0;
3046 int numSamples_0 = arg_0_Z.getNumSamples();
3047 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3048 res.requireWrite();
3049 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3050 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3051 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
3052 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3053 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3054 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
3055 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3056 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3057 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3058 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3059 }
3060 }
3061
3062 }
3063 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
3064 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3065 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3066 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
3067 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3068
3069 int sampleNo_0;
3070 int numSamples_0 = arg_0_Z.getNumSamples();
3071 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3072 int offset_1 = tmp_1->getPointOffset(0,0);
3073 const double *ptr_src = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3074 double ptr_1 = ptr_src[0];
3075 int size = size0 * numDataPointsPerSample_0;
3076 res.requireWrite();
3077 #pragma omp parallel for private(sampleNo_0) schedule(static)
3078 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3079 // for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3080 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0);
3081 int offset_2 = tmp_2->getPointOffset(sampleNo_0,0);
3082 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3083 // const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3084 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3085 tensor_binary_operation(size, ptr_0, ptr_1, ptr_2, operation);
3086 // }
3087 }
3088
3089
3090 }
3091 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
3092
3093 // After finding a common function space above the two inputs have the same numSamples and num DPPS
3094 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3095 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3096 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
3097 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3098
3099 int sampleNo_0,dataPointNo_0;
3100 int numSamples_0 = arg_0_Z.getNumSamples();
3101 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3102 res.requireWrite();
3103 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3104 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3105 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
3106 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3107 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3108 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
3109 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3110 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3111 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3112 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3113 }
3114 }
3115
3116 }
3117 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
3118
3119 // After finding a common function space above the two inputs have the same numSamples and num DPPS
3120 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3121 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3122 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
3123 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3124
3125 int sampleNo_0,dataPointNo_0;
3126 int numSamples_0 = arg_0_Z.getNumSamples();
3127 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3128 res.requireWrite();
3129 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3130 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3131 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3132 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
3133 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
3134 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3135 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3136 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3137 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3138 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3139 }
3140 }
3141
3142 }
3143 else {
3144 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
3145 }
3146
3147 } else {
3148 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible shapes");
3149 }
3150
3151 return res;
3152 }
3153
3154 template <typename UnaryFunction>
3155 Data
3156 C_TensorUnaryOperation(Data const &arg_0,
3157 UnaryFunction operation)
3158 {
3159 if (arg_0.isEmpty()) // do this before we attempt to interpolate
3160 {
3161 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
3162 }
3163 if (arg_0.isLazy())
3164 {
3165 throw DataException("Error - Operations not permitted on lazy data.");
3166 }
3167 // Interpolate if necessary and find an appropriate function space
3168 Data arg_0_Z = Data(arg_0);
3169
3170 // Get rank and shape of inputs
3171 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
3172 int size0 = arg_0_Z.getDataPointSize();
3173
3174 // Declare output Data object
3175 Data res;
3176
3177 if (arg_0_Z.isConstant()) {
3178 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataConstant output
3179 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
3180 double *ptr_2 = &(res.getDataAtOffsetRW(0));
3181 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
3182 }
3183 else if (arg_0_Z.isTagged()) {
3184
3185 // Borrow DataTagged input from Data object
3186 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
3187
3188 // Prepare a DataTagged output 2
3189 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
3190 res.tag();
3191 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
3192
3193 // Get the pointers to the actual data
3194 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
3195 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
3196 // Compute a result for the default
3197 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
3198 // Compute a result for each tag
3199 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
3200 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
3201 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
3202 tmp_2->addTag(i->first);
3203 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
3204 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
3205 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
3206 }
3207
3208 }
3209 else if (arg_0_Z.isExpanded()) {
3210
3211 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace(),true); // DataExpanded output
3212 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3213 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3214
3215 int sampleNo_0,dataPointNo_0;
3216 int numSamples_0 = arg_0_Z.getNumSamples();
3217 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3218 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3219 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3220 dataPointNo_0=0;
3221 // for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3222 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
3223 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3224 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3225 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3226 tensor_unary_operation(size0*numDataPointsPerSample_0, ptr_0, ptr_2, operation);
3227 // }
3228 }
3229 }
3230 else {
3231 throw DataException("Error - C_TensorUnaryOperation: unknown combination of inputs");
3232 }
3233
3234 return res;
3235 }
3236
3237 }
3238 #endif

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26