/[escript]/branches/complex/escriptcore/src/Data.h
ViewVC logotype

Contents of /branches/complex/escriptcore/src/Data.h

Parent Directory Parent Directory | Revision Log Revision Log


Revision 5881 - (show annotations)
Thu Jan 21 04:21:25 2016 UTC (3 years, 2 months ago) by jfenwick
File MIME type: text/plain
File size: 104649 byte(s)
TensorOps functions are now defined in terms of template parameters rather
than using "double" directly.
This uses the typeinfo and c++11 feature decltype.
This commit does not necessarily fix the functions which TensorOps 
call but it passes unit tests for now.

Also replaced all <double (*)(double) > instantiations with function objects.

This commit does not force c++11 compilation in options files so
if your options are not configured properly you may have compile issues.


1
2 /*****************************************************************************
3 *
4 * Copyright (c) 2003-2016 by The University of Queensland
5 * http://www.uq.edu.au
6 *
7 * Primary Business: Queensland, Australia
8 * Licensed under the Open Software License version 3.0
9 * http://www.opensource.org/licenses/osl-3.0.php
10 *
11 * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12 * Development 2012-2013 by School of Earth Sciences
13 * Development from 2014 by Centre for Geoscience Computing (GeoComp)
14 *
15 *****************************************************************************/
16
17
18 /** \file Data.h */
19
20 #ifndef DATA_H
21 #define DATA_H
22 #include "system_dep.h"
23
24 #include "DataTypes.h"
25 #include "DataAbstract.h"
26 #include "DataAlgorithm.h"
27 #include "FunctionSpace.h"
28 #include "BinaryOp.h"
29 #include "UnaryOp.h"
30 #include "DataException.h"
31
32 #ifdef _OPENMP
33 #include <omp.h>
34 #endif
35
36 #include "esysUtils/Esys_MPI.h"
37 #include <string>
38 #include <algorithm>
39 #include <sstream>
40
41 #include <boost/shared_ptr.hpp>
42 #include <boost/python/object.hpp>
43 #include <boost/python/tuple.hpp>
44 #include <boost/math/special_functions/bessel.hpp>
45
46 namespace escript {
47
48 //
49 // Forward declaration for various implementations of Data.
50 class DataConstant;
51 class DataTagged;
52 class DataExpanded;
53 class DataLazy;
54
55 /**
56 \brief
57 Data represents a collection of datapoints.
58
59 Description:
60 Internally, the datapoints are actually stored by a DataAbstract object.
61 The specific instance of DataAbstract used may vary over the lifetime
62 of the Data object.
63 Some methods on this class return references (eg getShape()).
64 These references should not be used after an operation which changes the underlying DataAbstract object.
65 Doing so will lead to invalid memory access.
66 This should not affect any methods exposed via boost::python.
67 */
68 class Data {
69
70 public:
71
72 // These typedefs allow function names to be cast to pointers
73 // to functions of the appropriate type when calling unaryOp etc.
74 typedef double (*UnaryDFunPtr)(double);
75 typedef double (*BinaryDFunPtr)(double,double);
76
77
78 /**
79 Constructors.
80 */
81
82 /**
83 \brief
84 Default constructor.
85 Creates a DataEmpty object.
86 */
87 ESCRIPT_DLL_API
88 Data();
89
90 /**
91 \brief
92 Copy constructor.
93 WARNING: Only performs a shallow copy.
94 */
95 ESCRIPT_DLL_API
96 Data(const Data& inData);
97
98 /**
99 \brief
100 Constructor from another Data object. If "what" is different from the
101 function space of inData the inData are tried to be interpolated to what,
102 otherwise a shallow copy of inData is returned.
103 */
104 ESCRIPT_DLL_API
105 Data(const Data& inData,
106 const FunctionSpace& what);
107
108 /**
109 \brief Copy Data from an existing vector
110 */
111
112 ESCRIPT_DLL_API
113 Data(const DataTypes::FloatVectorType& value,
114 const DataTypes::ShapeType& shape,
115 const FunctionSpace& what=FunctionSpace(),
116 bool expanded=false);
117
118 /**
119 \brief
120 Constructor which creates a Data with points having the specified shape.
121
122 \param value - Input - Single value applied to all Data.
123 \param dataPointShape - Input - The shape of each data point.
124 \param what - Input - A description of what this data represents.
125 \param expanded - Input - Flag, if true fill the entire container with
126 the given value. Otherwise a more efficient storage
127 mechanism will be used.
128 */
129 ESCRIPT_DLL_API
130 Data(double value,
131 const DataTypes::ShapeType& dataPointShape=DataTypes::ShapeType(),
132 const FunctionSpace& what=FunctionSpace(),
133 bool expanded=false);
134
135 /**
136 \brief
137 Constructor which performs a deep copy of a region from another Data object.
138
139 \param inData - Input - Input Data object.
140 \param region - Input - Region to copy.
141 */
142 ESCRIPT_DLL_API
143 Data(const Data& inData,
144 const DataTypes::RegionType& region);
145
146 /**
147 \brief
148 Constructor which copies data from any object that can be treated like a python array/sequence.
149
150 \param value - Input - Input data.
151 \param what - Input - A description of what this data represents.
152 \param expanded - Input - Flag, if true fill the entire container with
153 the value. Otherwise a more efficient storage
154 mechanism will be used.
155 */
156 ESCRIPT_DLL_API
157 Data(const boost::python::object& value,
158 const FunctionSpace& what=FunctionSpace(),
159 bool expanded=false);
160
161 /**
162 \brief
163 Constructor which copies data from a wrapped array.
164
165 \param w - Input - Input data.
166 \param what - Input - A description of what this data represents.
167 \param expanded - Input - Flag, if true fill the entire container with
168 the value. Otherwise a more efficient storage
169 mechanism will be used.
170 */
171 ESCRIPT_DLL_API
172 Data(const WrappedArray& w, const FunctionSpace& what,
173 bool expanded=false);
174
175
176 /**
177 \brief
178 Constructor which creates a DataConstant.
179 Copies data from any object that can be treated like a python array/sequence.
180 All other parameters are copied from other.
181
182 \param value - Input - Input data.
183 \param other - Input - contains all other parameters.
184 */
185 ESCRIPT_DLL_API
186 Data(const boost::python::object& value,
187 const Data& other);
188
189 /**
190 \brief
191 Constructor which creates a DataConstant of "shape" with constant value.
192 */
193 ESCRIPT_DLL_API
194 Data(double value,
195 const boost::python::tuple& shape=boost::python::make_tuple(),
196 const FunctionSpace& what=FunctionSpace(),
197 bool expanded=false);
198
199 /**
200 \brief Create a Data using an existing DataAbstract. Warning: The new object assumes ownership of the pointer!
201 Once you have passed the pointer, do not delete it.
202 */
203 ESCRIPT_DLL_API
204 explicit Data(DataAbstract* underlyingdata);
205
206 /**
207 \brief Create a Data based on the supplied DataAbstract
208 */
209 ESCRIPT_DLL_API
210 explicit Data(DataAbstract_ptr underlyingdata);
211
212 /**
213 \brief
214 Destructor
215 */
216 ESCRIPT_DLL_API
217 ~Data();
218
219 /**
220 \brief Make this object a deep copy of "other".
221 */
222 ESCRIPT_DLL_API
223 void
224 copy(const Data& other);
225
226 /**
227 \brief Return a pointer to a deep copy of this object.
228 */
229 ESCRIPT_DLL_API
230 Data
231 copySelf() const;
232
233
234 /**
235 \brief produce a delayed evaluation version of this Data.
236 */
237 ESCRIPT_DLL_API
238 Data
239 delay();
240
241 /**
242 \brief convert the current data into lazy data.
243 */
244 ESCRIPT_DLL_API
245 void
246 delaySelf();
247
248
249 /**
250 Member access methods.
251 */
252
253 /**
254 \brief
255 switches on update protection
256
257 */
258 ESCRIPT_DLL_API
259 void
260 setProtection();
261
262 /**
263 \brief
264 Returns true, if the data object is protected against update
265
266 */
267 ESCRIPT_DLL_API
268 bool
269 isProtected() const;
270
271
272 /**
273 \brief
274 Return the value of a data point as a python tuple.
275 */
276 ESCRIPT_DLL_API
277 const boost::python::object
278 getValueOfDataPointAsTuple(int dataPointNo);
279
280 /**
281 \brief
282 sets the values of a data-point from a python object on this process
283 */
284 ESCRIPT_DLL_API
285 void
286 setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object);
287
288 /**
289 \brief
290 sets the values of a data-point from a array-like object on this process
291 */
292 ESCRIPT_DLL_API
293 void
294 setValueOfDataPointToArray(int dataPointNo, const boost::python::object&);
295
296 /**
297 \brief
298 sets the values of a data-point on this process
299 */
300 ESCRIPT_DLL_API
301 void
302 setValueOfDataPoint(int dataPointNo, const double);
303
304 /**
305 \brief Return a data point across all processors as a python tuple.
306 */
307 ESCRIPT_DLL_API
308 const boost::python::object
309 getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo);
310
311
312 /**
313 \brief Set the value of a global data point
314 */
315 ESCRIPT_DLL_API
316 void
317 setTupleForGlobalDataPoint(int id, int proc, boost::python::object);
318
319 /**
320 \brief
321 Return the tag number associated with the given data-point.
322
323 */
324 ESCRIPT_DLL_API
325 int
326 getTagNumber(int dpno);
327
328
329 /**
330 \brief
331 Write the data as a string. For large amounts of data, a summary is printed.
332 */
333 ESCRIPT_DLL_API
334 std::string
335 toString() const;
336
337 /**
338 \brief
339 Whatever the current Data type make this into a DataExpanded.
340 */
341 ESCRIPT_DLL_API
342 void
343 expand();
344
345 /**
346 \brief
347 If possible convert this Data to DataTagged. This will only allow
348 Constant data to be converted to tagged. An attempt to convert
349 Expanded data to tagged will throw an exception.
350 */
351 ESCRIPT_DLL_API
352 void
353 tag();
354
355 /**
356 \brief If this data is lazy, then convert it to ready data.
357 What type of ready data depends on the expression. For example, Constant+Tagged==Tagged.
358 */
359 ESCRIPT_DLL_API
360 void
361 resolve();
362
363 /**
364 \brief returns return true if data contains NaN.
365 \warning This is dependent on the ability to reliably detect NaNs on your compiler.
366 See the nancheck function in LocalOps for details.
367 */
368 ESCRIPT_DLL_API
369 bool
370 hasNaN();
371
372 /**
373 \brief replaces all NaN values with value
374 */
375 ESCRIPT_DLL_API
376 void
377 replaceNaN(double value);
378
379 /**
380 \brief Ensures data is ready for write access.
381 This means that the data will be resolved if lazy and will be copied if shared with another Data object.
382 \warning This method should only be called in single threaded sections of code. (It modifies m_data).
383 Do not create any Data objects from this one between calling requireWrite and getSampleDataRW.
384 Doing so might introduce additional sharing.
385 */
386 ESCRIPT_DLL_API
387 void
388 requireWrite();
389
390 /**
391 \brief
392 Return true if this Data is expanded.
393 \note To determine if a sample will contain separate values for each datapoint. Use actsExpanded instead.
394 */
395 ESCRIPT_DLL_API
396 bool
397 isExpanded() const;
398
399 /**
400 \brief
401 Return true if this Data is expanded or resolves to expanded.
402 That is, if it has a separate value for each datapoint in the sample.
403 */
404 ESCRIPT_DLL_API
405 bool
406 actsExpanded() const;
407
408
409 /**
410 \brief
411 Return true if this Data is tagged.
412 */
413 ESCRIPT_DLL_API
414 bool
415 isTagged() const;
416
417 /**
418 \brief
419 Return true if this Data is constant.
420 */
421 ESCRIPT_DLL_API
422 bool
423 isConstant() const;
424
425 /**
426 \brief Return true if this Data is lazy.
427 */
428 ESCRIPT_DLL_API
429 bool
430 isLazy() const;
431
432 /**
433 \brief Return true if this data is ready.
434 */
435 ESCRIPT_DLL_API
436 bool
437 isReady() const;
438
439 /**
440 \brief
441 Return true if this Data holds an instance of DataEmpty. This is _not_ the same as asking if the object
442 contains datapoints.
443 */
444 ESCRIPT_DLL_API
445 bool
446 isEmpty() const;
447
448 /**
449 \brief
450 True if components of this data are stored as complex
451 */
452 ESCRIPT_DLL_API
453 bool
454 isComplex() const;
455
456 /**
457 \brief
458 Return the function space.
459 */
460 ESCRIPT_DLL_API
461 inline
462 const FunctionSpace&
463 getFunctionSpace() const
464 {
465 return m_data->getFunctionSpace();
466 }
467
468 /**
469 \brief
470 Return the domain.
471 */
472 ESCRIPT_DLL_API
473 inline
474 // const AbstractDomain&
475 const_Domain_ptr
476 getDomain() const
477 {
478 return getFunctionSpace().getDomain();
479 }
480
481
482 /**
483 \brief
484 Return the domain.
485 TODO: For internal use only. This should be removed.
486 */
487 ESCRIPT_DLL_API
488 inline
489 // const AbstractDomain&
490 Domain_ptr
491 getDomainPython() const
492 {
493 return getFunctionSpace().getDomainPython();
494 }
495
496 /**
497 \brief
498 Return the rank of the point data.
499 */
500 ESCRIPT_DLL_API
501 inline
502 unsigned int
503 getDataPointRank() const
504 {
505 return m_data->getRank();
506 }
507
508 /**
509 \brief
510 Return the number of data points
511 */
512 ESCRIPT_DLL_API
513 inline
514 int
515 getNumDataPoints() const
516 {
517 return getNumSamples() * getNumDataPointsPerSample();
518 }
519 /**
520 \brief
521 Return the number of samples.
522 */
523 ESCRIPT_DLL_API
524 inline
525 int
526 getNumSamples() const
527 {
528 return m_data->getNumSamples();
529 }
530
531 /**
532 \brief
533 Return the number of data points per sample.
534 */
535 ESCRIPT_DLL_API
536 inline
537 int
538 getNumDataPointsPerSample() const
539 {
540 return m_data->getNumDPPSample();
541 }
542
543 /**
544 \brief
545 Returns true if the number of data points per sample and the number of
546 samples match the respective argument. DataEmpty always returns true.
547 */
548 ESCRIPT_DLL_API
549 inline
550 bool numSamplesEqual(int numDataPointsPerSample, int numSamples) const
551 {
552 return (isEmpty() ||
553 (numDataPointsPerSample==getNumDataPointsPerSample() && numSamples==getNumSamples()));
554 }
555
556 /**
557 \brief
558 Returns true if the shape matches the vector (dimensions[0],...,
559 dimensions[rank-1]). DataEmpty always returns true.
560 */
561 inline
562 bool isDataPointShapeEqual(int rank, const int* dimensions) const
563 {
564 if (isEmpty())
565 return true;
566 const DataTypes::ShapeType givenShape(&dimensions[0],&dimensions[rank]);
567 return (getDataPointShape()==givenShape);
568 }
569
570 /**
571 \brief
572 Return the number of values in the shape for this object.
573 */
574 ESCRIPT_DLL_API
575 int
576 getNoValues() const
577 {
578 return m_data->getNoValues();
579 }
580
581
582 /**
583 \brief
584 dumps the object into a netCDF file
585 */
586 ESCRIPT_DLL_API
587 void
588 dump(const std::string fileName) const;
589
590 /**
591 \brief returns the values of the object as a list of tuples (one for each datapoint).
592
593 \param scalarastuple If true, scalar data will produce single valued tuples [(1,) (2,) ...]
594 If false, the result is a list of scalars [1, 2, ...]
595 */
596 ESCRIPT_DLL_API
597 const boost::python::object
598 toListOfTuples(bool scalarastuple=true);
599
600
601 /**
602 \brief
603 Return the sample data for the given sample no.
604 Please do not use this unless you NEED to access samples individually
605 \param sampleNo - Input - the given sample no.
606 \return pointer to the sample data.
607 */
608 ESCRIPT_DLL_API
609 inline
610 const DataAbstract::ValueType::value_type*
611 getSampleDataRO(DataAbstract::ValueType::size_type sampleNo) const;
612
613
614 /**
615 \brief
616 Return the sample data for the given sample no.
617 Please do not use this unless you NEED to access samples individually
618 \param sampleNo - Input - the given sample no.
619 \return pointer to the sample data.
620 */
621 ESCRIPT_DLL_API
622 inline
623 DataAbstract::ValueType::value_type*
624 getSampleDataRW(DataAbstract::ValueType::size_type sampleNo);
625
626
627 /**
628 \brief
629 Return a pointer to the beginning of the underlying data
630 \warning please avoid using this method since it by-passes possible lazy improvements. May be removed without notice.
631 \return pointer to the data.
632 */
633 ESCRIPT_DLL_API
634 inline
635 const DataAbstract::ValueType::value_type*
636 getDataRO() const;
637
638 /**
639 \brief
640 Return the sample data for the given tag. If an attempt is made to
641 access data that isn't tagged an exception will be thrown.
642 \param tag - Input - the tag key.
643 */
644 ESCRIPT_DLL_API
645 inline
646 DataAbstract::ValueType::value_type*
647 getSampleDataByTag(int tag)
648 {
649 return m_data->getSampleDataByTag(tag);
650 }
651
652 /**
653 \brief
654 Return a reference into the DataVector which points to the specified data point.
655 \param sampleNo - Input -
656 \param dataPointNo - Input -
657 */
658 ESCRIPT_DLL_API
659 DataTypes::FloatVectorType::const_reference
660 getDataPointRO(int sampleNo, int dataPointNo);
661
662 /**
663 \brief
664 Return a reference into the DataVector which points to the specified data point.
665 \param sampleNo - Input -
666 \param dataPointNo - Input -
667 */
668 ESCRIPT_DLL_API
669 DataTypes::FloatVectorType::reference
670 getDataPointRW(int sampleNo, int dataPointNo);
671
672
673
674 /**
675 \brief
676 Return the offset for the given sample and point within the sample
677 */
678 ESCRIPT_DLL_API
679 inline
680 DataTypes::FloatVectorType::size_type
681 getDataOffset(int sampleNo,
682 int dataPointNo)
683 {
684 return m_data->getPointOffset(sampleNo,dataPointNo);
685 }
686
687 /**
688 \brief
689 Return a reference to the data point shape.
690 */
691 ESCRIPT_DLL_API
692 inline
693 const DataTypes::ShapeType&
694 getDataPointShape() const
695 {
696 return m_data->getShape();
697 }
698
699 /**
700 \brief
701 Return the data point shape as a tuple of integers.
702 */
703 ESCRIPT_DLL_API
704 const boost::python::tuple
705 getShapeTuple() const;
706
707 /**
708 \brief
709 Return the size of the data point. It is the product of the
710 data point shape dimensions.
711 */
712 ESCRIPT_DLL_API
713 int
714 getDataPointSize() const;
715
716 /**
717 \brief
718 Return the number of doubles stored for this Data.
719 */
720 ESCRIPT_DLL_API
721 DataTypes::FloatVectorType::size_type
722 getLength() const;
723
724 /**
725 \brief Return true if this object contains no samples.
726 This is not the same as isEmpty()
727 */
728 ESCRIPT_DLL_API
729 bool
730 hasNoSamples() const
731 {
732 return getLength()==0;
733 }
734
735 /**
736 \brief
737 Assign the given value to the tag assocciated with name. Implicitly converts this
738 object to type DataTagged. Throws an exception if this object
739 cannot be converted to a DataTagged object or name cannot be mapped onto a tag key.
740 \param name - Input - name of tag.
741 \param value - Input - Value to associate with given key.
742 */
743 ESCRIPT_DLL_API
744 void
745 setTaggedValueByName(std::string name,
746 const boost::python::object& value);
747
748 /**
749 \brief
750 Assign the given value to the tag. Implicitly converts this
751 object to type DataTagged if it is constant.
752
753 \param tagKey - Input - Integer key.
754 \param value - Input - Value to associate with given key.
755 ==>*
756 */
757 ESCRIPT_DLL_API
758 void
759 setTaggedValue(int tagKey,
760 const boost::python::object& value);
761
762 /**
763 \brief
764 Assign the given value to the tag. Implicitly converts this
765 object to type DataTagged if it is constant.
766
767 \param tagKey - Input - Integer key.
768 \param pointshape - Input - The shape of the value parameter
769 \param value - Input - Value to associate with given key.
770 \param dataOffset - Input - Offset of the begining of the point within the value parameter
771 */
772 ESCRIPT_DLL_API
773 void
774 setTaggedValueFromCPP(int tagKey,
775 const DataTypes::ShapeType& pointshape,
776 const DataTypes::FloatVectorType& value,
777 int dataOffset=0);
778
779
780
781 /**
782 \brief
783 Copy other Data object into this Data object where mask is positive.
784 */
785 ESCRIPT_DLL_API
786 void
787 copyWithMask(const Data& other,
788 const Data& mask);
789
790 /**
791 Data object operation methods and operators.
792 */
793
794 /**
795 \brief
796 set all values to zero
797 *
798 */
799 ESCRIPT_DLL_API
800 void
801 setToZero();
802
803 /**
804 \brief
805 Interpolates this onto the given functionspace and returns
806 the result as a Data object.
807 *
808 */
809 ESCRIPT_DLL_API
810 Data
811 interpolate(const FunctionSpace& functionspace) const;
812
813 ESCRIPT_DLL_API
814 Data
815 interpolateFromTable3D(const WrappedArray& table, double Amin, double Astep,
816 double undef, Data& B, double Bmin, double Bstep, Data& C,
817 double Cmin, double Cstep, bool check_boundaries);
818
819 ESCRIPT_DLL_API
820 Data
821 interpolateFromTable2D(const WrappedArray& table, double Amin, double Astep,
822 double undef, Data& B, double Bmin, double Bstep,bool check_boundaries);
823
824 ESCRIPT_DLL_API
825 Data
826 interpolateFromTable1D(const WrappedArray& table, double Amin, double Astep,
827 double undef,bool check_boundaries);
828
829
830 ESCRIPT_DLL_API
831 Data
832 interpolateFromTable3DP(boost::python::object table, double Amin, double Astep,
833 Data& B, double Bmin, double Bstep, Data& C, double Cmin, double Cstep, double undef,bool check_boundaries);
834
835
836 ESCRIPT_DLL_API
837 Data
838 interpolateFromTable2DP(boost::python::object table, double Amin, double Astep,
839 Data& B, double Bmin, double Bstep, double undef,bool check_boundaries);
840
841 ESCRIPT_DLL_API
842 Data
843 interpolateFromTable1DP(boost::python::object table, double Amin, double Astep,
844 double undef,bool check_boundaries);
845
846 ESCRIPT_DLL_API
847 Data
848 nonuniforminterp(boost::python::object in, boost::python::object out, bool check_boundaries);
849
850 ESCRIPT_DLL_API
851 Data
852 nonuniformslope(boost::python::object in, boost::python::object out, bool check_boundaries);
853
854 /**
855 \brief
856 Calculates the gradient of the data at the data points of functionspace.
857 If functionspace is not present the function space of Function(getDomain()) is used.
858 *
859 */
860 ESCRIPT_DLL_API
861 Data
862 gradOn(const FunctionSpace& functionspace) const;
863
864 ESCRIPT_DLL_API
865 Data
866 grad() const;
867
868 /**
869 \brief
870 Calculate the integral over the function space domain as a python tuple.
871 */
872 ESCRIPT_DLL_API
873 boost::python::object
874 integrateToTuple_const() const;
875
876
877 /**
878 \brief
879 Calculate the integral over the function space domain as a python tuple.
880 */
881 ESCRIPT_DLL_API
882 boost::python::object
883 integrateToTuple();
884
885
886
887 /**
888 \brief
889 Returns 1./ Data object
890 *
891 */
892 ESCRIPT_DLL_API
893 Data
894 oneOver() const;
895 /**
896 \brief
897 Return a Data with a 1 for +ive values and a 0 for 0 or -ive values.
898 *
899 */
900 ESCRIPT_DLL_API
901 Data
902 wherePositive() const;
903
904 /**
905 \brief
906 Return a Data with a 1 for -ive values and a 0 for +ive or 0 values.
907 *
908 */
909 ESCRIPT_DLL_API
910 Data
911 whereNegative() const;
912
913 /**
914 \brief
915 Return a Data with a 1 for +ive or 0 values and a 0 for -ive values.
916 *
917 */
918 ESCRIPT_DLL_API
919 Data
920 whereNonNegative() const;
921
922 /**
923 \brief
924 Return a Data with a 1 for -ive or 0 values and a 0 for +ive values.
925 *
926 */
927 ESCRIPT_DLL_API
928 Data
929 whereNonPositive() const;
930
931 /**
932 \brief
933 Return a Data with a 1 for 0 values and a 0 for +ive or -ive values.
934 *
935 */
936 ESCRIPT_DLL_API
937 Data
938 whereZero(double tol=0.0) const;
939
940 /**
941 \brief
942 Return a Data with a 0 for 0 values and a 1 for +ive or -ive values.
943 *
944 */
945 ESCRIPT_DLL_API
946 Data
947 whereNonZero(double tol=0.0) const;
948
949 /**
950 \brief
951 Return the maximum absolute value of this Data object.
952
953 The method is not const because lazy data needs to be expanded before Lsup can be computed.
954 The _const form can be used when the Data object is const, however this will only work for
955 Data which is not Lazy.
956
957 For Data which contain no samples (or tagged Data for which no tags in use have a value)
958 zero is returned.
959 */
960 ESCRIPT_DLL_API
961 double
962 Lsup();
963
964 ESCRIPT_DLL_API
965 double
966 Lsup_const() const;
967
968
969 /**
970 \brief
971 Return the maximum value of this Data object.
972
973 The method is not const because lazy data needs to be expanded before sup can be computed.
974 The _const form can be used when the Data object is const, however this will only work for
975 Data which is not Lazy.
976
977 For Data which contain no samples (or tagged Data for which no tags in use have a value)
978 a large negative value is returned.
979 */
980 ESCRIPT_DLL_API
981 double
982 sup();
983
984 ESCRIPT_DLL_API
985 double
986 sup_const() const;
987
988
989 /**
990 \brief
991 Return the minimum value of this Data object.
992
993 The method is not const because lazy data needs to be expanded before inf can be computed.
994 The _const form can be used when the Data object is const, however this will only work for
995 Data which is not Lazy.
996
997 For Data which contain no samples (or tagged Data for which no tags in use have a value)
998 a large positive value is returned.
999 */
1000 ESCRIPT_DLL_API
1001 double
1002 inf();
1003
1004 ESCRIPT_DLL_API
1005 double
1006 inf_const() const;
1007
1008
1009
1010 /**
1011 \brief
1012 Return the absolute value of each data point of this Data object.
1013 *
1014 */
1015 ESCRIPT_DLL_API
1016 Data
1017 abs() const;
1018
1019 /**
1020 \brief
1021 Return the maximum value of each data point of this Data object.
1022 *
1023 */
1024 ESCRIPT_DLL_API
1025 Data
1026 maxval() const;
1027
1028 /**
1029 \brief
1030 Return the minimum value of each data point of this Data object.
1031 *
1032 */
1033 ESCRIPT_DLL_API
1034 Data
1035 minval() const;
1036
1037 /**
1038 \brief
1039 Return the (sample number, data-point number) of the data point with
1040 the minimum component value in this Data object.
1041 \note If you are working in python, please consider using Locator
1042 instead of manually manipulating process and point IDs.
1043 */
1044 ESCRIPT_DLL_API
1045 const boost::python::tuple
1046 minGlobalDataPoint() const;
1047
1048 /**
1049 \brief
1050 Return the (sample number, data-point number) of the data point with
1051 the minimum component value in this Data object.
1052 \note If you are working in python, please consider using Locator
1053 instead of manually manipulating process and point IDs.
1054 */
1055 ESCRIPT_DLL_API
1056 const boost::python::tuple
1057 maxGlobalDataPoint() const;
1058
1059
1060
1061 /**
1062 \brief
1063 Return the sign of each data point of this Data object.
1064 -1 for negative values, zero for zero values, 1 for positive values.
1065 *
1066 */
1067 ESCRIPT_DLL_API
1068 Data
1069 sign() const;
1070
1071 /**
1072 \brief
1073 Return the symmetric part of a matrix which is half the matrix plus its transpose.
1074 *
1075 */
1076 ESCRIPT_DLL_API
1077 Data
1078 symmetric() const;
1079
1080 /**
1081 \brief
1082 Return the nonsymmetric part of a matrix which is half the matrix minus its transpose.
1083 *
1084 */
1085 ESCRIPT_DLL_API
1086 Data
1087 nonsymmetric() const;
1088
1089 /**
1090 \brief
1091 Return the trace of a matrix
1092 *
1093 */
1094 ESCRIPT_DLL_API
1095 Data
1096 trace(int axis_offset) const;
1097
1098 /**
1099 \brief
1100 Transpose each data point of this Data object around the given axis.
1101 *
1102 */
1103 ESCRIPT_DLL_API
1104 Data
1105 transpose(int axis_offset) const;
1106
1107 /**
1108 \brief
1109 Return the eigenvalues of the symmetric part at each data point of this Data object in increasing values.
1110 Currently this function is restricted to rank 2, square shape, and dimension 3.
1111 *
1112 */
1113 ESCRIPT_DLL_API
1114 Data
1115 eigenvalues() const;
1116
1117 /**
1118 \brief
1119 Return the eigenvalues and corresponding eigenvcetors of the symmetric part at each data point of this Data object.
1120 the eigenvalues are ordered in increasing size where eigenvalues with relative difference less than
1121 tol are treated as equal. The eigenvectors are orthogonal, normalized and the sclaed such that the
1122 first non-zero entry is positive.
1123 Currently this function is restricted to rank 2, square shape, and dimension 3
1124 *
1125 */
1126 ESCRIPT_DLL_API
1127 const boost::python::tuple
1128 eigenvalues_and_eigenvectors(const double tol=1.e-12) const;
1129
1130 /**
1131 \brief
1132 swaps the components axis0 and axis1
1133 *
1134 */
1135 ESCRIPT_DLL_API
1136 Data
1137 swapaxes(const int axis0, const int axis1) const;
1138
1139 /**
1140 \brief
1141 Return the error function erf of each data point of this Data object.
1142 *
1143 */
1144 ESCRIPT_DLL_API
1145 Data
1146 erf() const;
1147
1148
1149 /**
1150 \brief
1151 For complex values return the conjugate values.
1152 For non-complex data return a copy
1153 */
1154 ESCRIPT_DLL_API
1155 Data
1156 conjugate() const;
1157
1158 /**
1159 \brief
1160 Return the sin of each data point of this Data object.
1161 *
1162 */
1163 ESCRIPT_DLL_API
1164 Data
1165 sin() const;
1166
1167 /**
1168 \brief
1169 Return the cos of each data point of this Data object.
1170 *
1171 */
1172 ESCRIPT_DLL_API
1173 Data
1174 cos() const;
1175
1176 /**
1177 \brief
1178 Bessel worker function.
1179 *
1180 */
1181 ESCRIPT_DLL_API
1182 Data
1183 bessel(int order, double (*besselfunc) (int,double) );
1184
1185
1186 /**
1187 \brief
1188 Return the Bessel function of the first kind for each data point of this Data object.
1189 *
1190 */
1191 ESCRIPT_DLL_API
1192 Data
1193 besselFirstKind(int order);
1194
1195 /**
1196 \brief
1197 Return the Bessel function of the second kind for each data point of this Data object.
1198 *
1199 */
1200 ESCRIPT_DLL_API
1201 Data
1202 besselSecondKind(int order);
1203
1204
1205 /**
1206 \brief
1207 Return the tan of each data point of this Data object.
1208 *
1209 */
1210 ESCRIPT_DLL_API
1211 Data
1212 tan() const;
1213
1214 /**
1215 \brief
1216 Return the asin of each data point of this Data object.
1217 *
1218 */
1219 ESCRIPT_DLL_API
1220 Data
1221 asin() const;
1222
1223 /**
1224 \brief
1225 Return the acos of each data point of this Data object.
1226 *
1227 */
1228 ESCRIPT_DLL_API
1229 Data
1230 acos() const;
1231
1232 /**
1233 \brief
1234 Return the atan of each data point of this Data object.
1235 *
1236 */
1237 ESCRIPT_DLL_API
1238 Data
1239 atan() const;
1240
1241 /**
1242 \brief
1243 Return the sinh of each data point of this Data object.
1244 *
1245 */
1246 ESCRIPT_DLL_API
1247 Data
1248 sinh() const;
1249
1250 /**
1251 \brief
1252 Return the cosh of each data point of this Data object.
1253 *
1254 */
1255 ESCRIPT_DLL_API
1256 Data
1257 cosh() const;
1258
1259 /**
1260 \brief
1261 Return the tanh of each data point of this Data object.
1262 *
1263 */
1264 ESCRIPT_DLL_API
1265 Data
1266 tanh() const;
1267
1268 /**
1269 \brief
1270 Return the asinh of each data point of this Data object.
1271 *
1272 */
1273 ESCRIPT_DLL_API
1274 Data
1275 asinh() const;
1276
1277 /**
1278 \brief
1279 Return the acosh of each data point of this Data object.
1280 *
1281 */
1282 ESCRIPT_DLL_API
1283 Data
1284 acosh() const;
1285
1286 /**
1287 \brief
1288 Return the atanh of each data point of this Data object.
1289 *
1290 */
1291 ESCRIPT_DLL_API
1292 Data
1293 atanh() const;
1294
1295 /**
1296 \brief
1297 Return the log to base 10 of each data point of this Data object.
1298 *
1299 */
1300 ESCRIPT_DLL_API
1301 Data
1302 log10() const;
1303
1304 /**
1305 \brief
1306 Return the natural log of each data point of this Data object.
1307 *
1308 */
1309 ESCRIPT_DLL_API
1310 Data
1311 log() const;
1312
1313 /**
1314 \brief
1315 Return the exponential function of each data point of this Data object.
1316 *
1317 */
1318 ESCRIPT_DLL_API
1319 Data
1320 exp() const;
1321
1322 /**
1323 \brief
1324 Return the square root of each data point of this Data object.
1325 *
1326 */
1327 ESCRIPT_DLL_API
1328 Data
1329 sqrt() const;
1330
1331 /**
1332 \brief
1333 Return the negation of each data point of this Data object.
1334 *
1335 */
1336 ESCRIPT_DLL_API
1337 Data
1338 neg() const;
1339
1340 /**
1341 \brief
1342 Return the identity of each data point of this Data object.
1343 Simply returns this object unmodified.
1344 *
1345 */
1346 ESCRIPT_DLL_API
1347 Data
1348 pos() const;
1349
1350 /**
1351 \brief
1352 Return the given power of each data point of this Data object.
1353
1354 \param right Input - the power to raise the object to.
1355 *
1356 */
1357 ESCRIPT_DLL_API
1358 Data
1359 powD(const Data& right) const;
1360
1361 /**
1362 \brief
1363 Return the given power of each data point of this boost python object.
1364
1365 \param right Input - the power to raise the object to.
1366 *
1367 */
1368 ESCRIPT_DLL_API
1369 Data
1370 powO(const boost::python::object& right) const;
1371
1372 /**
1373 \brief
1374 Return the given power of each data point of this boost python object.
1375
1376 \param left Input - the bases
1377 *
1378 */
1379
1380 ESCRIPT_DLL_API
1381 Data
1382 rpowO(const boost::python::object& left) const;
1383
1384 /**
1385 \brief
1386 Overloaded operator +=
1387 \param right - Input - The right hand side.
1388 *
1389 */
1390 ESCRIPT_DLL_API
1391 Data& operator+=(const Data& right);
1392 ESCRIPT_DLL_API
1393 Data& operator+=(const boost::python::object& right);
1394
1395 ESCRIPT_DLL_API
1396 Data& operator=(const Data& other);
1397
1398 /**
1399 \brief
1400 Overloaded operator -=
1401 \param right - Input - The right hand side.
1402 *
1403 */
1404 ESCRIPT_DLL_API
1405 Data& operator-=(const Data& right);
1406 ESCRIPT_DLL_API
1407 Data& operator-=(const boost::python::object& right);
1408
1409 /**
1410 \brief
1411 Overloaded operator *=
1412 \param right - Input - The right hand side.
1413 *
1414 */
1415 ESCRIPT_DLL_API
1416 Data& operator*=(const Data& right);
1417 ESCRIPT_DLL_API
1418 Data& operator*=(const boost::python::object& right);
1419
1420 /**
1421 \brief
1422 Overloaded operator /=
1423 \param right - Input - The right hand side.
1424 *
1425 */
1426 ESCRIPT_DLL_API
1427 Data& operator/=(const Data& right);
1428 ESCRIPT_DLL_API
1429 Data& operator/=(const boost::python::object& right);
1430
1431 /**
1432 \brief
1433 Newer style division operator for python
1434 */
1435 ESCRIPT_DLL_API
1436 Data truedivD(const Data& right);
1437
1438 /**
1439 \brief
1440 Newer style division operator for python
1441 */
1442 ESCRIPT_DLL_API
1443 Data truedivO(const boost::python::object& right);
1444
1445 /**
1446 \brief
1447 Newer style division operator for python
1448 */
1449 ESCRIPT_DLL_API
1450 Data rtruedivO(const boost::python::object& left);
1451
1452 /**
1453 \brief
1454 wrapper for python add operation
1455 */
1456 ESCRIPT_DLL_API
1457 boost::python::object __add__(const boost::python::object& right);
1458
1459
1460 /**
1461 \brief
1462 wrapper for python subtract operation
1463 */
1464 ESCRIPT_DLL_API
1465 boost::python::object __sub__(const boost::python::object& right);
1466
1467 /**
1468 \brief
1469 wrapper for python reverse subtract operation
1470 */
1471 ESCRIPT_DLL_API
1472 boost::python::object __rsub__(const boost::python::object& right);
1473
1474 /**
1475 \brief
1476 wrapper for python multiply operation
1477 */
1478 ESCRIPT_DLL_API
1479 boost::python::object __mul__(const boost::python::object& right);
1480
1481 /**
1482 \brief
1483 wrapper for python divide operation
1484 */
1485 ESCRIPT_DLL_API
1486 boost::python::object __div__(const boost::python::object& right);
1487
1488 /**
1489 \brief
1490 wrapper for python reverse divide operation
1491 */
1492 ESCRIPT_DLL_API
1493 boost::python::object __rdiv__(const boost::python::object& right);
1494
1495 /**
1496 \brief return inverse of matricies.
1497 */
1498 ESCRIPT_DLL_API
1499 Data
1500 matrixInverse() const;
1501
1502 /**
1503 \brief
1504 Returns true if this can be interpolated to functionspace.
1505 */
1506 ESCRIPT_DLL_API
1507 bool
1508 probeInterpolation(const FunctionSpace& functionspace) const;
1509
1510 /**
1511 Data object slicing methods.
1512 */
1513
1514 /**
1515 \brief
1516 Returns a slice from this Data object.
1517
1518 /description
1519 Implements the [] get operator in python.
1520 Calls getSlice.
1521
1522 \param key - Input - python slice tuple specifying
1523 slice to return.
1524 */
1525 ESCRIPT_DLL_API
1526 Data
1527 getItem(const boost::python::object& key) const;
1528
1529 /**
1530 \brief
1531 Copies slice from value into this Data object.
1532
1533 Implements the [] set operator in python.
1534 Calls setSlice.
1535
1536 \param key - Input - python slice tuple specifying
1537 slice to copy from value.
1538 \param value - Input - Data object to copy from.
1539 */
1540 ESCRIPT_DLL_API
1541 void
1542 setItemD(const boost::python::object& key,
1543 const Data& value);
1544
1545 ESCRIPT_DLL_API
1546 void
1547 setItemO(const boost::python::object& key,
1548 const boost::python::object& value);
1549
1550 // These following public methods should be treated as private.
1551
1552 /**
1553 \brief
1554 Perform the given unary operation on every element of every data point in
1555 this Data object.
1556 */
1557 template <class UnaryFunction>
1558 ESCRIPT_DLL_API
1559 inline
1560 void
1561 unaryOp2(UnaryFunction operation);
1562
1563 /**
1564 \brief
1565 Return a Data object containing the specified slice of
1566 this Data object.
1567 \param region - Input - Region to copy.
1568 *
1569 */
1570 ESCRIPT_DLL_API
1571 Data
1572 getSlice(const DataTypes::RegionType& region) const;
1573
1574 /**
1575 \brief
1576 Copy the specified slice from the given value into this
1577 Data object.
1578 \param value - Input - Data to copy from.
1579 \param region - Input - Region to copy.
1580 *
1581 */
1582 ESCRIPT_DLL_API
1583 void
1584 setSlice(const Data& value,
1585 const DataTypes::RegionType& region);
1586
1587 /**
1588 \brief
1589 print the data values to stdout. Used for debugging
1590 */
1591 ESCRIPT_DLL_API
1592 void
1593 print(void);
1594
1595 /**
1596 \brief
1597 return the MPI rank number of the local data
1598 MPI_COMM_WORLD is assumed and the result of MPI_Comm_size()
1599 is returned
1600 */
1601 ESCRIPT_DLL_API
1602 int
1603 get_MPIRank(void) const;
1604
1605 /**
1606 \brief
1607 return the MPI rank number of the local data
1608 MPI_COMM_WORLD is assumed and the result of MPI_Comm_rank()
1609 is returned
1610 */
1611 ESCRIPT_DLL_API
1612 int
1613 get_MPISize(void) const;
1614
1615 /**
1616 \brief
1617 return the MPI rank number of the local data
1618 MPI_COMM_WORLD is assumed and returned.
1619 */
1620 ESCRIPT_DLL_API
1621 MPI_Comm
1622 get_MPIComm(void) const;
1623
1624 /**
1625 \brief
1626 return the object produced by the factory, which is a DataConstant or DataExpanded
1627 TODO Ownership of this object should be explained in doco.
1628 */
1629 ESCRIPT_DLL_API
1630 DataAbstract*
1631 borrowData(void) const;
1632
1633 ESCRIPT_DLL_API
1634 DataAbstract_ptr
1635 borrowDataPtr(void) const;
1636
1637 ESCRIPT_DLL_API
1638 DataReady_ptr
1639 borrowReadyPtr(void) const;
1640
1641
1642
1643 /**
1644 \brief
1645 Return a pointer to the beginning of the datapoint at the specified offset.
1646 TODO Eventually these should be inlined.
1647 \param i - position(offset) in the underlying datastructure
1648 */
1649
1650 ESCRIPT_DLL_API
1651 DataTypes::FloatVectorType::const_reference
1652 getDataAtOffsetRO(DataTypes::FloatVectorType::size_type i);
1653
1654
1655 ESCRIPT_DLL_API
1656 DataTypes::FloatVectorType::reference
1657 getDataAtOffsetRW(DataTypes::FloatVectorType::size_type i);
1658
1659 /**
1660 \brief Ensures that the Data is expanded and returns its underlying vector
1661 Does not check for exclusive write so do that before calling if sharing
1662 Is a posibility.
1663 \warning For domain implementors only. Using this function will
1664 avoid using optimisations like lazy evaluation. It is intended
1665 to allow quick initialisation of Data by domain; not as a bypass around
1666 escript's other mechanisms.
1667 */
1668 ESCRIPT_DLL_API
1669 DataTypes::FloatVectorType&
1670 getExpandedVectorReference();
1671
1672
1673 /**
1674 * \brief For tagged Data returns the number of tags with values.
1675 * For non-tagged data will return 0 (even Data which has been expanded from tagged).
1676 */
1677 ESCRIPT_DLL_API
1678 size_t
1679 getNumberOfTaggedValues() const;
1680
1681 protected:
1682
1683 private:
1684
1685 template <class BinaryOp>
1686 double
1687 #ifdef ESYS_MPI
1688 lazyAlgWorker(double init, MPI_Op mpiop_type);
1689 #else
1690 lazyAlgWorker(double init);
1691 #endif
1692
1693 double
1694 LsupWorker() const;
1695
1696 double
1697 supWorker() const;
1698
1699 double
1700 infWorker() const;
1701
1702 boost::python::object
1703 integrateWorker() const;
1704
1705 void
1706 calc_minGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1707
1708 void
1709 calc_maxGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1710
1711 // For internal use in Data.cpp only!
1712 // other uses should call the main entry points and allow laziness
1713 Data
1714 minval_nonlazy() const;
1715
1716 // For internal use in Data.cpp only!
1717 Data
1718 maxval_nonlazy() const;
1719
1720
1721 /**
1722 \brief
1723 Check *this and the right operand are compatible. Throws
1724 an exception if they aren't.
1725 \param right - Input - The right hand side.
1726 */
1727 inline
1728 void
1729 operandCheck(const Data& right) const
1730 {
1731 return m_data->operandCheck(*(right.m_data.get()));
1732 }
1733
1734 /**
1735 \brief
1736 Perform the specified reduction algorithm on every element of every data point in
1737 this Data object according to the given function and return the single value result.
1738 */
1739 template <class BinaryFunction>
1740 inline
1741 double
1742 algorithm(BinaryFunction operation,
1743 double initial_value) const;
1744
1745 /**
1746 \brief
1747 Reduce each data-point in this Data object using the given operation. Return a Data
1748 object with the same number of data-points, but with each data-point containing only
1749 one value - the result of the reduction operation on the corresponding data-point in
1750 this Data object
1751 */
1752 template <class BinaryFunction>
1753 inline
1754 Data
1755 dp_algorithm(BinaryFunction operation,
1756 double initial_value) const;
1757
1758 /**
1759 \brief
1760 Perform the given binary operation on all of the data's elements.
1761 The underlying type of the right hand side (right) determines the final
1762 type of *this after the operation. For example if the right hand side
1763 is expanded *this will be expanded if necessary.
1764 RHS is a Data object.
1765 */
1766 template <class BinaryFunction>
1767 inline
1768 void
1769 binaryOp(const Data& right,
1770 BinaryFunction operation);
1771
1772 /**
1773 \brief
1774 Convert the data type of the RHS to match this.
1775 \param right - Input - data type to match.
1776 */
1777 void
1778 typeMatchLeft(Data& right) const;
1779
1780 /**
1781 \brief
1782 Convert the data type of this to match the RHS.
1783 \param right - Input - data type to match.
1784 */
1785 void
1786 typeMatchRight(const Data& right);
1787
1788 /**
1789 \brief
1790 Construct a Data object of the appropriate type.
1791 */
1792
1793 void
1794 initialise(const DataTypes::FloatVectorType& value,
1795 const DataTypes::ShapeType& shape,
1796 const FunctionSpace& what,
1797 bool expanded);
1798
1799 void
1800 initialise(const WrappedArray& value,
1801 const FunctionSpace& what,
1802 bool expanded);
1803
1804 void
1805 initialise(const double value,
1806 const DataTypes::ShapeType& shape,
1807 const FunctionSpace& what,
1808 bool expanded);
1809
1810 //
1811 // flag to protect the data object against any update
1812 bool m_protected;
1813 mutable bool m_shared;
1814 bool m_lazy;
1815
1816 //
1817 // pointer to the actual data object
1818 // boost::shared_ptr<DataAbstract> m_data;
1819 DataAbstract_ptr m_data;
1820
1821 // If possible please use getReadyPtr instead.
1822 // But see warning below.
1823 const DataReady*
1824 getReady() const;
1825
1826 DataReady*
1827 getReady();
1828
1829
1830 // Be wary of using this for local operations since it (temporarily) increases reference count.
1831 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1832 // getReady() instead
1833 DataReady_ptr
1834 getReadyPtr();
1835
1836 const_DataReady_ptr
1837 getReadyPtr() const;
1838
1839
1840 /**
1841 \brief Update the Data's shared flag
1842 This indicates that the DataAbstract used by this object is now shared (or no longer shared).
1843 For internal use only.
1844 */
1845 void updateShareStatus(bool nowshared) const
1846 {
1847 m_shared=nowshared; // m_shared is mutable
1848 }
1849
1850 // In the isShared() method below:
1851 // A problem would occur if m_data (the address pointed to) were being modified
1852 // while the call m_data->is_shared is being executed.
1853 //
1854 // Q: So why do I think this code can be thread safe/correct?
1855 // A: We need to make some assumptions.
1856 // 1. We assume it is acceptable to return true under some conditions when we aren't shared.
1857 // 2. We assume that no constructions or assignments which will share previously unshared
1858 // will occur while this call is executing. This is consistent with the way Data:: and C are written.
1859 //
1860 // This means that the only transition we need to consider, is when a previously shared object is
1861 // not shared anymore. ie. the other objects have been destroyed or a deep copy has been made.
1862 // In those cases the m_shared flag changes to false after m_data has completed changing.
1863 // For any threads executing before the flag switches they will assume the object is still shared.
1864 bool isShared() const
1865 {
1866 return m_shared;
1867 /* if (m_shared) return true;
1868 if (m_data->isShared())
1869 {
1870 updateShareStatus(true);
1871 return true;
1872 }
1873 return false;*/
1874 }
1875
1876 void forceResolve()
1877 {
1878 if (isLazy())
1879 {
1880 #ifdef _OPENMP
1881 if (omp_in_parallel())
1882 { // Yes this is throwing an exception out of an omp thread which is forbidden.
1883 throw DataException("Please do not call forceResolve() in a parallel region.");
1884 }
1885 #endif
1886 resolve();
1887 }
1888 }
1889
1890 /**
1891 \brief if another object is sharing out member data make a copy to work with instead.
1892 This code should only be called from single threaded sections of code.
1893 */
1894 void exclusiveWrite()
1895 {
1896 #ifdef _OPENMP
1897 if (omp_in_parallel())
1898 {
1899 // *((int*)0)=17;
1900 throw DataException("Programming error. Please do not run exclusiveWrite() in multi-threaded sections.");
1901 }
1902 #endif
1903 forceResolve();
1904 if (isShared())
1905 {
1906 DataAbstract* t=m_data->deepCopy();
1907 set_m_data(DataAbstract_ptr(t));
1908 }
1909 #ifdef EXWRITECHK
1910 m_data->exclusivewritecalled=true;
1911 #endif
1912 }
1913
1914 /**
1915 \brief checks if caller can have exclusive write to the object
1916 */
1917 void checkExclusiveWrite()
1918 {
1919 if (isLazy() || isShared())
1920 {
1921 throw DataException("Programming error. ExclusiveWrite required - please call requireWrite()");
1922 }
1923 }
1924
1925 /**
1926 \brief Modify the data abstract hosted by this Data object
1927 For internal use only.
1928 Passing a pointer to null is permitted (do this in the destructor)
1929 \warning Only to be called in single threaded code or inside a single/critical section. This method needs to be atomic.
1930 */
1931 void set_m_data(DataAbstract_ptr p);
1932
1933 friend class DataAbstract; // To allow calls to updateShareStatus
1934 friend class TestDomain; // so its getX will work quickly
1935 #ifdef IKNOWWHATIMDOING
1936 friend ESCRIPT_DLL_API Data applyBinaryCFunction(boost::python::object cfunc, boost::python::tuple shape, escript::Data& d, escript::Data& e);
1937 #endif
1938 friend ESCRIPT_DLL_API Data condEval(escript::Data& mask, escript::Data& trueval, escript::Data& falseval);
1939 friend ESCRIPT_DLL_API Data randomData(const boost::python::tuple& shape, const FunctionSpace& what, long seed, const boost::python::tuple& filter);
1940
1941 };
1942
1943
1944 #ifdef IKNOWWHATIMDOING
1945 ESCRIPT_DLL_API
1946 Data
1947 applyBinaryCFunction(boost::python::object func, boost::python::tuple shape, escript::Data& d, escript::Data& e);
1948 #endif
1949
1950
1951 ESCRIPT_DLL_API
1952 Data
1953 condEval(escript::Data& mask, escript::Data& trueval, escript::Data& falseval);
1954
1955
1956
1957 /**
1958 \brief Create a new Expanded Data object filled with pseudo-random data.
1959 */
1960 ESCRIPT_DLL_API
1961 Data randomData(const boost::python::tuple& shape,
1962 const FunctionSpace& what,
1963 long seed, const boost::python::tuple& filter);
1964
1965
1966 } // end namespace escript
1967
1968
1969 // No, this is not supposed to be at the top of the file
1970 // DataAbstact needs to be declared first, then DataReady needs to be fully declared
1971 // so that I can dynamic cast between them below.
1972 #include "DataReady.h"
1973 #include "DataLazy.h"
1974
1975 namespace escript
1976 {
1977
1978 inline
1979 const DataReady*
1980 Data::getReady() const
1981 {
1982 const DataReady* dr=dynamic_cast<const DataReady*>(m_data.get());
1983 EsysAssert((dr!=0), "Error - casting to DataReady.");
1984 return dr;
1985 }
1986
1987 inline
1988 DataReady*
1989 Data::getReady()
1990 {
1991 DataReady* dr=dynamic_cast<DataReady*>(m_data.get());
1992 EsysAssert((dr!=0), "Error - casting to DataReady.");
1993 return dr;
1994 }
1995
1996 // Be wary of using this for local operations since it (temporarily) increases reference count.
1997 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1998 // getReady() instead
1999 inline
2000 DataReady_ptr
2001 Data::getReadyPtr()
2002 {
2003 DataReady_ptr dr=boost::dynamic_pointer_cast<DataReady>(m_data);
2004 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
2005 return dr;
2006 }
2007
2008
2009 inline
2010 const_DataReady_ptr
2011 Data::getReadyPtr() const
2012 {
2013 const_DataReady_ptr dr=boost::dynamic_pointer_cast<const DataReady>(m_data);
2014 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
2015 return dr;
2016 }
2017
2018 inline
2019 DataAbstract::ValueType::value_type*
2020 Data::getSampleDataRW(DataAbstract::ValueType::size_type sampleNo)
2021 {
2022 if (isLazy())
2023 {
2024 throw DataException("Error, attempt to acquire RW access to lazy data. Please call requireWrite() first.");
2025 }
2026 #ifdef EXWRITECHK
2027 if (!getReady()->exclusivewritecalled)
2028 {
2029 throw DataException("Error, call to Data::getSampleDataRW without a preceeding call to requireWrite/exclusiveWrite.");
2030 }
2031 #endif
2032 return getReady()->getSampleDataRW(sampleNo);
2033 }
2034
2035 inline
2036 const DataAbstract::ValueType::value_type*
2037 Data::getSampleDataRO(DataAbstract::ValueType::size_type sampleNo) const
2038 {
2039 DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
2040 if (l!=0)
2041 {
2042 size_t offset=0;
2043 const DataTypes::FloatVectorType* res=l->resolveSample(sampleNo,offset);
2044 return &((*res)[offset]);
2045 }
2046 return getReady()->getSampleDataRO(sampleNo);
2047 }
2048
2049 inline
2050 const DataAbstract::ValueType::value_type*
2051 Data::getDataRO() const
2052 {
2053 if (isLazy())
2054 {
2055 throw DataException("Programmer error - getDataRO must not be called on Lazy Data.");
2056 }
2057 if (getNumSamples()==0)
2058 {
2059 return 0;
2060 }
2061 else
2062 {
2063 return &(getReady()->getVectorRO()[0]);
2064 }
2065 }
2066
2067
2068 /**
2069 Binary Data object operators.
2070 */
2071 inline double rpow(double x,double y)
2072 {
2073 return pow(y,x);
2074 }
2075
2076 /**
2077 \brief
2078 Operator+
2079 Takes two Data objects.
2080 */
2081 ESCRIPT_DLL_API Data operator+(const Data& left, const Data& right);
2082
2083 /**
2084 \brief
2085 Operator-
2086 Takes two Data objects.
2087 */
2088 ESCRIPT_DLL_API Data operator-(const Data& left, const Data& right);
2089
2090 /**
2091 \brief
2092 Operator*
2093 Takes two Data objects.
2094 */
2095 ESCRIPT_DLL_API Data operator*(const Data& left, const Data& right);
2096
2097 /**
2098 \brief
2099 Operator/
2100 Takes two Data objects.
2101 */
2102 ESCRIPT_DLL_API Data operator/(const Data& left, const Data& right);
2103
2104 /**
2105 \brief
2106 Operator+
2107 Takes LHS Data object and RHS python::object.
2108 python::object must be convertable to Data type.
2109 */
2110 ESCRIPT_DLL_API Data operator+(const Data& left, const boost::python::object& right);
2111
2112 /**
2113 \brief
2114 Operator-
2115 Takes LHS Data object and RHS python::object.
2116 python::object must be convertable to Data type.
2117 */
2118 ESCRIPT_DLL_API Data operator-(const Data& left, const boost::python::object& right);
2119
2120 /**
2121 \brief
2122 Operator*
2123 Takes LHS Data object and RHS python::object.
2124 python::object must be convertable to Data type.
2125 */
2126 ESCRIPT_DLL_API Data operator*(const Data& left, const boost::python::object& right);
2127
2128 /**
2129 \brief
2130 Operator/
2131 Takes LHS Data object and RHS python::object.
2132 python::object must be convertable to Data type.
2133 */
2134 ESCRIPT_DLL_API Data operator/(const Data& left, const boost::python::object& right);
2135
2136 /**
2137 \brief
2138 Operator+
2139 Takes LHS python::object and RHS Data object.
2140 python::object must be convertable to Data type.
2141 */
2142 ESCRIPT_DLL_API Data operator+(const boost::python::object& left, const Data& right);
2143
2144 /**
2145 \brief
2146 Operator-
2147 Takes LHS python::object and RHS Data object.
2148 python::object must be convertable to Data type.
2149 */
2150 ESCRIPT_DLL_API Data operator-(const boost::python::object& left, const Data& right);
2151
2152 /**
2153 \brief
2154 Operator*
2155 Takes LHS python::object and RHS Data object.
2156 python::object must be convertable to Data type.
2157 */
2158 ESCRIPT_DLL_API Data operator*(const boost::python::object& left, const Data& right);
2159
2160 /**
2161 \brief
2162 Operator/
2163 Takes LHS python::object and RHS Data object.
2164 python::object must be convertable to Data type.
2165 */
2166 ESCRIPT_DLL_API Data operator/(const boost::python::object& left, const Data& right);
2167
2168
2169
2170 /**
2171 \brief
2172 Output operator
2173 */
2174 ESCRIPT_DLL_API std::ostream& operator<<(std::ostream& o, const Data& data);
2175
2176 /**
2177 \brief
2178 Compute a tensor product of two Data objects
2179 \param arg_0 - Input - Data object
2180 \param arg_1 - Input - Data object
2181 \param axis_offset - Input - axis offset
2182 \param transpose - Input - 0: transpose neither, 1: transpose arg0, 2: transpose arg1
2183 */
2184 ESCRIPT_DLL_API
2185 Data
2186 C_GeneralTensorProduct(Data& arg_0,
2187 Data& arg_1,
2188 int axis_offset=0,
2189 int transpose=0);
2190
2191 /**
2192 \brief
2193 Operator/
2194 Takes RHS Data object.
2195 */
2196 inline
2197 Data
2198 Data::truedivD(const Data& right)
2199 {
2200 return *this / right;
2201 }
2202
2203 /**
2204 \brief
2205 Operator/
2206 Takes RHS python::object.
2207 */
2208 inline
2209 Data
2210 Data::truedivO(const boost::python::object& right)
2211 {
2212 Data tmp(right, getFunctionSpace(), false);
2213 return truedivD(tmp);
2214 }
2215
2216 /**
2217 \brief
2218 Operator/
2219 Takes LHS python::object.
2220 */
2221 inline
2222 Data
2223 Data::rtruedivO(const boost::python::object& left)
2224 {
2225 Data tmp(left, getFunctionSpace(), false);
2226 return tmp.truedivD(*this);
2227 }
2228
2229 /**
2230 \brief
2231 Perform the given binary operation with this and right as operands.
2232 Right is a Data object.
2233 */
2234 template <class BinaryFunction>
2235 inline
2236 void
2237 Data::binaryOp(const Data& right,
2238 BinaryFunction operation)
2239 {
2240 //
2241 // if this has a rank of zero promote it to the rank of the RHS
2242 if (getDataPointRank()==0 && right.getDataPointRank()!=0) {
2243 throw DataException("Error - attempt to update rank zero object with object with rank bigger than zero.");
2244 }
2245
2246 if (isLazy() || right.isLazy())
2247 {
2248 throw DataException("Programmer error - attempt to call binaryOp with Lazy Data.");
2249 }
2250 //
2251 // initially make the temporary a shallow copy
2252 Data tempRight(right);
2253 FunctionSpace fsl=getFunctionSpace();
2254 FunctionSpace fsr=right.getFunctionSpace();
2255 if (fsl!=fsr) {
2256 signed char intres=fsl.getDomain()->preferredInterpolationOnDomain(fsr.getTypeCode(), fsl.getTypeCode());
2257 if (intres==0)
2258 {
2259 std::string msg="Error - attempt to combine incompatible FunctionSpaces.";
2260 msg+=fsl.toString();
2261 msg+=" ";
2262 msg+=fsr.toString();
2263 throw DataException(msg.c_str());
2264 }
2265 else if (intres==1)
2266 {
2267 // an interpolation is required so create a new Data
2268 tempRight=Data(right,fsl);
2269 }
2270 else // reverse interpolation preferred
2271 {
2272 // interpolate onto the RHS function space
2273 Data tempLeft(*this,fsr);
2274 set_m_data(tempLeft.m_data);
2275 }
2276 }
2277 operandCheck(tempRight);
2278 //
2279 // ensure this has the right type for the RHS
2280 typeMatchRight(tempRight);
2281 //
2282 // Need to cast to the concrete types so that the correct binaryOp
2283 // is called.
2284 if (isExpanded()) {
2285 //
2286 // Expanded data will be done in parallel, the right hand side can be
2287 // of any data type
2288 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2289 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2290 escript::binaryOp(*leftC,*(tempRight.getReady()),operation);
2291 } else if (isTagged()) {
2292 //
2293 // Tagged data is operated on serially, the right hand side can be
2294 // either DataConstant or DataTagged
2295 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2296 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2297 if (right.isTagged()) {
2298 DataTagged* rightC=dynamic_cast<DataTagged*>(tempRight.m_data.get());
2299 EsysAssert((rightC!=0), "Programming error - casting to DataTagged.");
2300 escript::binaryOp(*leftC,*rightC,operation);
2301 } else {
2302 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2303 EsysAssert((rightC!=0), "Programming error - casting to DataConstant.");
2304 escript::binaryOp(*leftC,*rightC,operation);
2305 }
2306 } else if (isConstant()) {
2307 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2308 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2309 EsysAssert((leftC!=0 && rightC!=0), "Programming error - casting to DataConstant.");
2310 escript::binaryOp(*leftC,*rightC,operation);
2311 }
2312 }
2313
2314 /**
2315 \brief
2316 Perform the given Data object reduction algorithm on this and return the result.
2317 Given operation combines each element of each data point, thus argument
2318 object (*this) is a rank n Data object, and returned object is a scalar.
2319 Calls escript::algorithm.
2320 */
2321 template <class BinaryFunction>
2322 inline
2323 double
2324 Data::algorithm(BinaryFunction operation, double initial_value) const
2325 {
2326 if (isExpanded()) {
2327 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2328 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2329 return escript::algorithm(*leftC,operation,initial_value);
2330 } else if (isTagged()) {
2331 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2332 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2333 return escript::algorithm(*leftC,operation,initial_value);
2334 } else if (isConstant()) {
2335 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2336 EsysAssert((leftC!=0), "Programming error - casting to DataConstant.");
2337 return escript::algorithm(*leftC,operation,initial_value);
2338 } else if (isEmpty()) {
2339 throw DataException("Error - Operations (algorithm) not permitted on instances of DataEmpty.");
2340 } else if (isLazy()) {
2341 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2342 } else {
2343 throw DataException("Error - Data encapsulates an unknown type.");
2344 }
2345 }
2346
2347 /**
2348 \brief
2349 Perform the given data point reduction algorithm on data and return the result.
2350 Given operation combines each element within each data point into a scalar,
2351 thus argument object is a rank n Data object, and returned object is a
2352 rank 0 Data object.
2353 Calls escript::dp_algorithm.
2354 */
2355 template <class BinaryFunction>
2356 inline
2357 Data
2358 Data::dp_algorithm(BinaryFunction operation, double initial_value) const
2359 {
2360 if (isEmpty()) {
2361 throw DataException("Error - Operations (dp_algorithm) not permitted on instances of DataEmpty.");
2362 }
2363 else if (isExpanded()) {
2364 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2365 DataExpanded* dataE=dynamic_cast<DataExpanded*>(m_data.get());
2366 DataExpanded* resultE=dynamic_cast<DataExpanded*>(result.m_data.get());
2367 EsysAssert((dataE!=0), "Programming error - casting data to DataExpanded.");
2368 EsysAssert((resultE!=0), "Programming error - casting result to DataExpanded.");
2369 escript::dp_algorithm(*dataE,*resultE,operation,initial_value);
2370 return result;
2371 }
2372 else if (isTagged()) {
2373 DataTagged* dataT=dynamic_cast<DataTagged*>(m_data.get());
2374 EsysAssert((dataT!=0), "Programming error - casting data to DataTagged.");
2375 DataTypes::FloatVectorType defval(1);
2376 defval[0]=0;
2377 DataTagged* resultT=new DataTagged(getFunctionSpace(), DataTypes::scalarShape, defval, dataT);
2378 escript::dp_algorithm(*dataT,*resultT,operation,initial_value);
2379 return Data(resultT); // note: the Data object now owns the resultT pointer
2380 }
2381 else if (isConstant()) {
2382 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2383 DataConstant* dataC=dynamic_cast<DataConstant*>(m_data.get());
2384 DataConstant* resultC=dynamic_cast<DataConstant*>(result.m_data.get());
2385 EsysAssert((dataC!=0), "Programming error - casting data to DataConstant.");
2386 EsysAssert((resultC!=0), "Programming error - casting result to DataConstant.");
2387 escript::dp_algorithm(*dataC,*resultC,operation,initial_value);
2388 return result;
2389 } else if (isLazy()) {
2390 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2391 } else {
2392 throw DataException("Error - Data encapsulates an unknown type.");
2393 }
2394 }
2395
2396 /**
2397 \brief
2398 Compute a tensor operation with two Data objects
2399 \param arg_0 - Input - Data object
2400 \param arg_1 - Input - Data object
2401 \param operation - Input - Binary op functor
2402 */
2403 template <typename BinaryFunction>
2404 inline
2405 Data
2406 C_TensorBinaryOperation(Data const &arg_0,
2407 Data const &arg_1,
2408 BinaryFunction operation)
2409 {
2410 if (arg_0.isEmpty() || arg_1.isEmpty())
2411 {
2412 throw DataException("Error - Operations (C_TensorBinaryOperation) not permitted on instances of DataEmpty.");
2413 }
2414 if (arg_0.isLazy() || arg_1.isLazy())
2415 {
2416 throw DataException("Error - Operations not permitted on lazy data.");
2417 }
2418 // Interpolate if necessary and find an appropriate function space
2419 Data arg_0_Z, arg_1_Z;
2420 FunctionSpace fsl=arg_0.getFunctionSpace();
2421 FunctionSpace fsr=arg_1.getFunctionSpace();
2422 if (fsl!=fsr) {
2423 signed char intres=fsl.getDomain()->preferredInterpolationOnDomain(fsr.getTypeCode(), fsl.getTypeCode());
2424 if (intres==0)
2425 {
2426 std::string msg="Error - C_TensorBinaryOperation: arguments have incompatible function spaces.";
2427 msg+=fsl.toString();
2428 msg+=" ";
2429 msg+=fsr.toString();
2430 throw DataException(msg.c_str());
2431 }
2432 else if (intres==1)
2433 {
2434 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2435 arg_0_Z =Data(arg_0);
2436 }
2437 else // reverse interpolation preferred
2438 {
2439 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2440 arg_1_Z = Data(arg_1);
2441 }
2442 } else {
2443 arg_0_Z = Data(arg_0);
2444 arg_1_Z = Data(arg_1);
2445 }
2446 // Get rank and shape of inputs
2447 int rank0 = arg_0_Z.getDataPointRank();
2448 int rank1 = arg_1_Z.getDataPointRank();
2449 DataTypes::ShapeType shape0 = arg_0_Z.getDataPointShape();
2450 DataTypes::ShapeType shape1 = arg_1_Z.getDataPointShape();
2451 int size0 = arg_0_Z.getDataPointSize();
2452 int size1 = arg_1_Z.getDataPointSize();
2453
2454 // We will require that the functions all take the same type
2455 if ((typeid(typename BinaryFunction::first_argument_type)!=typeid(typename BinaryFunction::second_argument_type))
2456 || (typeid(typename BinaryFunction::first_argument_type)!=typeid(typename BinaryFunction::result_type)))
2457 {
2458 throw DataException("Error - Functional used by C_TensorBinaryOperation must use consistent types.");
2459 }
2460
2461
2462 // Declare output Data object
2463 Data res;
2464
2465 if (shape0 == shape1) {
2466 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2467 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2468 const typename decltype(operation)::first_argument_type *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2469 const typename decltype(operation)::second_argument_type *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2470 typename decltype(operation)::result_type *ptr_2 = &(res.getDataAtOffsetRW(0));
2471
2472 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2473 }
2474 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2475
2476 // Prepare the DataConstant input
2477 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2478
2479 // Borrow DataTagged input from Data object
2480 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2481
2482 // Prepare a DataTagged output 2
2483 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2484 res.tag();
2485 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2486
2487 // Prepare offset into DataConstant
2488 int offset_0 = tmp_0->getPointOffset(0,0);
2489 const typename decltype(operation)::first_argument_type *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2490
2491 // Get the pointers to the actual data
2492 const typename decltype(operation)::second_argument_type *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2493 typename decltype(operation)::result_type *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2494
2495 // Compute a result for the default
2496 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2497 // Compute a result for each tag
2498 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2499 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2500 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2501 tmp_2->addTag(i->first);
2502 const typename decltype(operation)::second_argument_type *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2503 typename decltype(operation)::result_type *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2504
2505 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2506 }
2507
2508 }
2509 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2510 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2511 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2512 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2513 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2514
2515 int sampleNo_1,dataPointNo_1;
2516 int numSamples_1 = arg_1_Z.getNumSamples();
2517 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2518 int offset_0 = tmp_0->getPointOffset(0,0);
2519 res.requireWrite();
2520 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2521 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2522 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2523 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2524 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2525 const typename decltype(operation)::first_argument_type *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2526 const typename decltype(operation)::second_argument_type *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2527 typename decltype(operation)::result_type *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2528 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2529 }
2530 }
2531
2532 }
2533 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2534 // Borrow DataTagged input from Data object
2535 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2536
2537 // Prepare the DataConstant input
2538 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2539
2540 // Prepare a DataTagged output 2
2541 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2542 res.tag();
2543 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2544
2545 // Prepare offset into DataConstant
2546 int offset_1 = tmp_1->getPointOffset(0,0);
2547
2548 const typename decltype(operation)::second_argument_type *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2549 // Get the pointers to the actual data
2550 const typename decltype(operation)::first_argument_type *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2551 typename decltype(operation)::result_type *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2552 // Compute a result for the default
2553 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2554 // Compute a result for each tag
2555 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2556 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2557 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2558 tmp_2->addTag(i->first);
2559 const typename decltype(operation)::first_argument_type *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2560 typename decltype(operation)::result_type *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2561 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2562 }
2563
2564 }
2565 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2566 // Borrow DataTagged input from Data object
2567 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2568
2569 // Borrow DataTagged input from Data object
2570 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2571
2572 // Prepare a DataTagged output 2
2573 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2574 res.tag(); // DataTagged output
2575 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2576
2577 // Get the pointers to the actual data
2578 const typename decltype(operation)::first_argument_type *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2579 const typename decltype(operation)::second_argument_type *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2580 typename decltype(operation)::result_type *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2581
2582 // Compute a result for the default
2583 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2584 // Merge the tags
2585 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2586 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2587 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2588 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2589 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2590 }
2591 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2592 tmp_2->addTag(i->first);
2593 }
2594 // Compute a result for each tag
2595 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2596 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2597
2598 const typename decltype(operation)::first_argument_type *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2599 const typename decltype(operation)::second_argument_type *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2600 typename decltype(operation)::result_type *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2601
2602 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2603 }
2604
2605 }
2606 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2607 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2608 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2609 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2610 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2611 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2612
2613 int sampleNo_0,dataPointNo_0;
2614 int numSamples_0 = arg_0_Z.getNumSamples();
2615 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2616 res.requireWrite();
2617 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2618 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2619 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2620 const typename decltype(operation)::first_argument_type *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2621 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2622 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2623 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2624 const typename decltype(operation)::second_argument_type *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2625 typename decltype(operation)::result_type *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2626 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2627 }
2628 }
2629
2630 }
2631 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2632 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2633 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2634 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2635 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2636
2637 int sampleNo_0,dataPointNo_0;
2638 int numSamples_0 = arg_0_Z.getNumSamples();
2639 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2640 int offset_1 = tmp_1->getPointOffset(0,0);
2641 res.requireWrite();
2642 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2643 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2644 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2645 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2646 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2647
2648 const typename decltype(operation)::first_argument_type *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2649 const typename decltype(operation)::second_argument_type *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2650 typename decltype(operation)::result_type *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2651
2652
2653 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2654 }
2655 }
2656
2657 }
2658 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2659 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2660 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2661 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2662 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2663 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2664
2665 int sampleNo_0,dataPointNo_0;
2666 int numSamples_0 = arg_0_Z.getNumSamples();
2667 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2668 res.requireWrite();
2669 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2670 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2671 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2672 const typename decltype(operation)::second_argument_type *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2673 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2674 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2675 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2676 const typename decltype(operation)::first_argument_type *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2677 typename decltype(operation)::result_type *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2678 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2679 }
2680 }
2681
2682 }
2683 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2684 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2685 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2686 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2687 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2688 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2689
2690 int sampleNo_0,dataPointNo_0;
2691 int numSamples_0 = arg_0_Z.getNumSamples();
2692 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2693 res.requireWrite();
2694 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2695 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2696 dataPointNo_0=0;
2697 // for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2698 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2699 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2700 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2701 const typename decltype(operation)::first_argument_type *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2702 const typename decltype(operation)::second_argument_type *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2703 typename decltype(operation)::result_type *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2704 tensor_binary_operation(size0*numDataPointsPerSample_0, ptr_0, ptr_1, ptr_2, operation);
2705 // }
2706 }
2707
2708 }
2709 else {
2710 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2711 }
2712
2713 } else if (0 == rank0) {
2714 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2715 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataConstant output
2716 const typename decltype(operation)::first_argument_type *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2717 const typename decltype(operation)::second_argument_type *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2718 typename decltype(operation)::result_type *ptr_2 = &(res.getDataAtOffsetRW(0));
2719 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2720 }
2721 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2722
2723 // Prepare the DataConstant input
2724 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2725
2726 // Borrow DataTagged input from Data object
2727 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2728
2729 // Prepare a DataTagged output 2
2730 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataTagged output
2731 res.tag();
2732 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2733
2734 // Prepare offset into DataConstant
2735 int offset_0 = tmp_0->getPointOffset(0,0);
2736 const typename decltype(operation)::first_argument_type *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2737
2738 const typename decltype(operation)::second_argument_type *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2739 typename decltype(operation)::result_type *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2740
2741 // Compute a result for the default
2742 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2743 // Compute a result for each tag
2744 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2745 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2746 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2747 tmp_2->addTag(i->first);
2748 const typename decltype(operation)::second_argument_type *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2749 typename decltype(operation)::result_type *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2750 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2751 }
2752
2753 }
2754 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2755
2756 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2757 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2758 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2759 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2760
2761 int sampleNo_1;
2762 int numSamples_1 = arg_1_Z.getNumSamples();
2763 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2764 int offset_0 = tmp_0->getPointOffset(0,0);
2765 const typename decltype(operation)::first_argument_type *ptr_src = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2766 typename decltype(operation)::first_argument_type ptr_0 = ptr_src[0];
2767 int size = size1*numDataPointsPerSample_1;
2768 res.requireWrite();
2769 #pragma omp parallel for private(sampleNo_1) schedule(static)
2770 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2771 int offset_1 = tmp_1->getPointOffset(sampleNo_1,0);
2772 int offset_2 = tmp_2->getPointOffset(sampleNo_1,0);
2773 const typename decltype(operation)::second_argument_type *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2774 typename decltype(operation)::result_type *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2775 tensor_binary_operation(size, ptr_0, ptr_1, ptr_2, operation);
2776 }
2777
2778 }
2779 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2780
2781 // Borrow DataTagged input from Data object
2782 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2783
2784 // Prepare the DataConstant input
2785 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2786
2787 // Prepare a DataTagged output 2
2788 res = Data(0.0, shape1, arg_0_Z.getFunctionSpace()); // DataTagged output
2789 res.tag();
2790 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2791
2792 // Prepare offset into DataConstant
2793 int offset_1 = tmp_1->getPointOffset(0,0);
2794 const typename decltype(operation)::second_argument_type *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2795
2796 // Get the pointers to the actual data
2797 const typename decltype(operation)::first_argument_type *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2798 typename decltype(operation)::result_type *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2799
2800
2801 // Compute a result for the default
2802 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2803 // Compute a result for each tag
2804 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2805 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2806 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2807 tmp_2->addTag(i->first);
2808 const typename decltype(operation)::first_argument_type *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2809 typename decltype(operation)::result_type *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2810
2811 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2812 }
2813
2814 }
2815 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2816
2817 // Borrow DataTagged input from Data object
2818 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2819
2820 // Borrow DataTagged input from Data object
2821 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2822
2823 // Prepare a DataTagged output 2
2824 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());
2825 res.tag(); // DataTagged output
2826 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2827
2828 // Get the pointers to the actual data
2829 const typename decltype(operation)::first_argument_type *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2830 const typename decltype(operation)::second_argument_type *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2831 typename decltype(operation)::result_type *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2832
2833 // Compute a result for the default
2834 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2835 // Merge the tags
2836 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2837 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2838 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2839 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2840 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2841 }
2842 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2843 tmp_2->addTag(i->first);
2844 }
2845 // Compute a result for each tag
2846 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2847 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2848 const typename decltype(operation)::first_argument_type *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2849 const typename decltype(operation)::second_argument_type *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2850 typename decltype(operation)::result_type *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2851
2852 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2853 }
2854
2855 }
2856 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2857
2858 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2859 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2860 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2861 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2862 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2863
2864 int sampleNo_0,dataPointNo_0;
2865 int numSamples_0 = arg_0_Z.getNumSamples();
2866 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2867 res.requireWrite();
2868 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2869 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2870 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2871 const typename decltype(operation)::first_argument_type *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2872 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2873 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2874 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2875 const typename decltype(operation)::second_argument_type *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2876 typename decltype(operation)::result_type *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2877 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2878 }
2879 }
2880
2881 }
2882 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2883 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2884 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2885 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2886 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2887
2888 int sampleNo_0,dataPointNo_0;
2889 int numSamples_0 = arg_0_Z.getNumSamples();
2890 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2891 int offset_1 = tmp_1->getPointOffset(0,0);
2892 res.requireWrite();
2893 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2894 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2895 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2896 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2897 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2898 const typename decltype(operation)::first_argument_type *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2899 const typename decltype(operation)::second_argument_type *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2900 typename decltype(operation)::result_type *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2901 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2902 }
2903 }
2904
2905
2906 }
2907 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2908
2909 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2910 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2911 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2912 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2913 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2914
2915 int sampleNo_0,dataPointNo_0;
2916 int numSamples_0 = arg_0_Z.getNumSamples();
2917 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2918 res.requireWrite();
2919 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2920 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2921 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2922 const typename decltype(operation)::second_argument_type *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2923 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2924 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2925 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2926 const typename decltype(operation)::first_argument_type *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2927 typename decltype(operation)::result_type *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2928 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2929 }
2930 }
2931
2932 }
2933 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2934
2935 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2936 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2937 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2938 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2939 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2940
2941 int sampleNo_0,dataPointNo_0;
2942 int numSamples_0 = arg_0_Z.getNumSamples();
2943 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2944 res.requireWrite();
2945 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2946 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2947 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2948 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2949 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2950 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2951 const typename decltype(operation)::first_argument_type *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2952 const typename decltype(operation)::second_argument_type *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2953 typename decltype(operation)::result_type *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2954 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2955 }
2956 }
2957
2958 }
2959 else {
2960 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2961 }
2962
2963 } else if (0 == rank1) {
2964 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2965 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2966 const typename decltype(operation)::first_argument_type *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2967 const typename decltype(operation)::second_argument_type *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2968 typename decltype(operation)::result_type *ptr_2 = &(res.getDataAtOffsetRW(0));
2969 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2970 }
2971 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2972
2973 // Prepare the DataConstant input
2974 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2975
2976 // Borrow DataTagged input from Data object
2977 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2978
2979 // Prepare a DataTagged output 2
2980 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2981 res.tag();
2982 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2983
2984 // Prepare offset into DataConstant
2985 int offset_0 = tmp_0->getPointOffset(0,0);
2986 const typename decltype(operation)::first_argument_type *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2987
2988 //Get the pointers to the actual data
2989 const typename decltype(operation)::second_argument_type *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2990 typename decltype(operation)::result_type *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2991
2992 // Compute a result for the default
2993 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2994 // Compute a result for each tag
2995 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2996 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2997 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2998 tmp_2->addTag(i->first);
2999 const typename decltype(operation)::second_argument_type *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
3000 typename decltype(operation)::result_type *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
3001 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3002 }
3003 }
3004 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
3005
3006 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3007 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
3008 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
3009 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3010
3011 int sampleNo_1,dataPointNo_1;
3012 int numSamples_1 = arg_1_Z.getNumSamples();
3013 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
3014 int offset_0 = tmp_0->getPointOffset(0,0);
3015 res.requireWrite();
3016 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
3017 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
3018 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
3019 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
3020 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
3021 const typename decltype(operation)::first_argument_type *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3022 const typename decltype(operation)::second_argument_type *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3023 typename decltype(operation)::result_type *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3024 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3025 }
3026 }
3027
3028 }
3029 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
3030
3031 // Borrow DataTagged input from Data object
3032 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
3033
3034 // Prepare the DataConstant input
3035 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
3036
3037 // Prepare a DataTagged output 2
3038 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
3039 res.tag();
3040 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
3041
3042 // Prepare offset into DataConstant
3043 int offset_1 = tmp_1->getPointOffset(0,0);
3044 const typename decltype(operation)::second_argument_type *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3045 // Get the pointers to the actual data
3046 const typename decltype(operation)::first_argument_type *ptr_0 = &(tmp_0->getDefaultValueRO(0));
3047 typename decltype(operation)::result_type *ptr_2 = &(tmp_2->getDefaultValueRW(0));
3048 // Compute a result for the default
3049 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3050 // Compute a result for each tag
3051 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
3052 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
3053 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
3054 tmp_2->addTag(i->first);
3055 const typename decltype(operation)::first_argument_type *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
3056 typename decltype(operation)::result_type *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
3057 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3058 }
3059
3060 }
3061 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
3062
3063 // Borrow DataTagged input from Data object
3064 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
3065
3066 // Borrow DataTagged input from Data object
3067 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
3068
3069 // Prepare a DataTagged output 2
3070 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
3071 res.tag(); // DataTagged output
3072 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
3073
3074 // Get the pointers to the actual data
3075 const typename decltype(operation)::first_argument_type *ptr_0 = &(tmp_0->getDefaultValueRO(0));
3076 const typename decltype(operation)::second_argument_type *ptr_1 = &(tmp_1->getDefaultValueRO(0));
3077 typename decltype(operation)::result_type *ptr_2 = &(tmp_2->getDefaultValueRW(0));
3078
3079 // Compute a result for the default
3080 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3081 // Merge the tags
3082 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
3083 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
3084 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
3085 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
3086 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
3087 }
3088 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
3089 tmp_2->addTag(i->first);
3090 }
3091 // Compute a result for each tag
3092 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
3093 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
3094 const typename decltype(operation)::first_argument_type *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
3095 const typename decltype(operation)::second_argument_type *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
3096 typename decltype(operation)::result_type *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
3097 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3098 }
3099
3100 }
3101 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
3102
3103 // After finding a common function space above the two inputs have the same numSamples and num DPPS
3104 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3105 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
3106 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
3107 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3108
3109 int sampleNo_0,dataPointNo_0;
3110 int numSamples_0 = arg_0_Z.getNumSamples();
3111 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3112 res.requireWrite();
3113 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3114 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3115 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
3116 const typename decltype(operation)::first_argument_type *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3117 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3118 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
3119 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3120 const typename decltype(operation)::second_argument_type *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3121 typename decltype(operation)::result_type *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3122 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3123 }
3124 }
3125
3126 }
3127 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
3128 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3129 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3130 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
3131 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3132
3133 int sampleNo_0;
3134 int numSamples_0 = arg_0_Z.getNumSamples();
3135 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3136 int offset_1 = tmp_1->getPointOffset(0,0);
3137 const typename decltype(operation)::second_argument_type *ptr_src = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3138 typename decltype(operation)::second_argument_type ptr_1 = ptr_src[0];
3139 int size = size0 * numDataPointsPerSample_0;
3140 res.requireWrite();
3141 #pragma omp parallel for private(sampleNo_0) schedule(static)
3142 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3143 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0);
3144 int offset_2 = tmp_2->getPointOffset(sampleNo_0,0);
3145 const typename decltype(operation)::first_argument_type *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3146 typename decltype(operation)::result_type *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3147 tensor_binary_operation(size, ptr_0, ptr_1, ptr_2, operation);
3148 }
3149
3150
3151 }
3152 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
3153
3154 // After finding a common function space above the two inputs have the same numSamples and num DPPS
3155 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3156 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3157 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
3158 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3159
3160 int sampleNo_0,dataPointNo_0;
3161 int numSamples_0 = arg_0_Z.getNumSamples();
3162 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3163 res.requireWrite();
3164 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3165 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3166 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
3167 const typename decltype(operation)::second_argument_type *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3168 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3169 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
3170 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3171 const typename decltype(operation)::first_argument_type *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3172 typename decltype(operation)::result_type *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3173 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3174 }
3175 }
3176
3177 }
3178 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
3179
3180 // After finding a common function space above the two inputs have the same numSamples and num DPPS
3181 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3182 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3183 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
3184 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3185
3186 int sampleNo_0,dataPointNo_0;
3187 int numSamples_0 = arg_0_Z.getNumSamples();
3188 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3189 res.requireWrite();
3190 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3191 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3192 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3193 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
3194 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
3195 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3196 const typename decltype(operation)::first_argument_type *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3197 const typename decltype(operation)::second_argument_type *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3198 typename decltype(operation)::result_type *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3199 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3200 }
3201 }
3202
3203 }
3204 else {
3205 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
3206 }
3207
3208 } else {
3209 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible shapes");
3210 }
3211
3212 return res;
3213 }
3214
3215 template <typename UnaryFunction>
3216 Data
3217 C_TensorUnaryOperation(Data const &arg_0,
3218 UnaryFunction operation)
3219 {
3220 if (arg_0.isEmpty()) // do this before we attempt to interpolate
3221 {
3222 throw DataException("Error - Operations (C_TensorUnaryOperation) not permitted on instances of DataEmpty.");
3223 }
3224 if (arg_0.isLazy())
3225 {
3226 throw DataException("Error - Operations not permitted on lazy data.");
3227 }
3228 // Interpolate if necessary and find an appropriate function space
3229 Data arg_0_Z = Data(arg_0);
3230
3231 // Get rank and shape of inputs
3232 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
3233 int size0 = arg_0_Z.getDataPointSize();
3234
3235 // Sanity check on the types of the function
3236 if (typeid(typename UnaryFunction::argument_type)!=typeid(typename UnaryFunction::result_type))
3237 {
3238 throw DataException("Error - Types for C_TensorUnaryOperation must be consistent");
3239 }
3240
3241 // Declare output Data object
3242 Data res;
3243
3244 if (arg_0_Z.isConstant()) {
3245 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataConstant output
3246 const typename UnaryFunction::argument_type *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
3247 typename UnaryFunction::result_type *ptr_2 = &(res.getDataAtOffsetRW(0));
3248 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
3249 }
3250 else if (arg_0_Z.isTagged()) {
3251
3252 // Borrow DataTagged input from Data object
3253 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
3254
3255 // Prepare a DataTagged output 2
3256 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
3257 res.tag();
3258 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
3259
3260 // Get the pointers to the actual data
3261 const typename decltype(operation)::argument_type *ptr_0 = &(tmp_0->getDefaultValueRO(0));
3262 typename decltype(operation)::result_type *ptr_2 = &(tmp_2->getDefaultValueRW(0));
3263 // Compute a result for the default
3264 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
3265 // Compute a result for each tag
3266 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
3267 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
3268 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
3269 tmp_2->addTag(i->first);
3270 const typename decltype(operation)::argument_type *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
3271 typename decltype(operation)::result_type *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
3272 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
3273 }
3274
3275 }
3276 else if (arg_0_Z.isExpanded()) {
3277
3278 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace(),true); // DataExpanded output
3279 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3280 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3281
3282 int sampleNo_0,dataPointNo_0;
3283 int numSamples_0 = arg_0_Z.getNumSamples();
3284 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3285 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3286 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3287 dataPointNo_0=0;
3288 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
3289 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3290 const typename decltype(operation)::argument_type *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3291 typename decltype(operation)::result_type *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3292 tensor_unary_operation(size0*numDataPointsPerSample_0, ptr_0, ptr_2, operation);
3293 }
3294 }
3295 else {
3296 throw DataException("Error - C_TensorUnaryOperation: unknown combination of inputs");
3297 }
3298
3299 return res;
3300 }
3301
3302 }
3303 #endif

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26