/[escript]/branches/arrexp_2137_win_merge/escript/src/Data.h
ViewVC logotype

Contents of /branches/arrexp_2137_win_merge/escript/src/Data.h

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2226 - (show annotations)
Thu Jan 22 05:11:02 2009 UTC (10 years, 4 months ago) by jfenwick
File MIME type: text/plain
File size: 91789 byte(s)
It continues
1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2008 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 /** \file Data.h */
16
17 #ifndef DATA_H
18 #define DATA_H
19 #include "system_dep.h"
20
21 #include "DataTypes.h"
22 #include "DataAbstract.h"
23 #include "DataAlgorithm.h"
24 #include "FunctionSpace.h"
25 #include "BinaryOp.h"
26 #include "UnaryOp.h"
27 #include "DataException.h"
28
29
30 extern "C" {
31 #include "DataC.h"
32 //#include <omp.h>
33 }
34
35 #include "esysmpi.h"
36 #include <string>
37 #include <algorithm>
38 #include <sstream>
39
40 #include <boost/shared_ptr.hpp>
41 #include <boost/python/object.hpp>
42 #include <boost/python/tuple.hpp>
43 #include <boost/python/numeric.hpp>
44
45 namespace escript {
46
47 //
48 // Forward declaration for various implementations of Data.
49 class DataConstant;
50 class DataTagged;
51 class DataExpanded;
52 class DataLazy;
53
54 /**
55 \brief
56 Data represents a collection of datapoints.
57
58 Description:
59 Internally, the datapoints are actually stored by a DataAbstract object.
60 The specific instance of DataAbstract used may vary over the lifetime
61 of the Data object.
62 Some methods on this class return references (eg getShape()).
63 These references should not be used after an operation which changes the underlying DataAbstract object.
64 Doing so will lead to invalid memory access.
65 This should not affect any methods exposed via boost::python.
66 */
67 class Data {
68
69 public:
70
71 // These typedefs allow function names to be cast to pointers
72 // to functions of the appropriate type when calling unaryOp etc.
73 typedef double (*UnaryDFunPtr)(double);
74 typedef double (*BinaryDFunPtr)(double,double);
75
76
77 /**
78 Constructors.
79 */
80
81 /**
82 \brief
83 Default constructor.
84 Creates a DataEmpty object.
85 */
86 ESCRIPT_DLL_API
87 Data();
88
89 /**
90 \brief
91 Copy constructor.
92 WARNING: Only performs a shallow copy.
93 */
94 ESCRIPT_DLL_API
95 Data(const Data& inData);
96
97 /**
98 \brief
99 Constructor from another Data object. If "what" is different from the
100 function space of inData the inData are tried to be interpolated to what,
101 otherwise a shallow copy of inData is returned.
102 */
103 ESCRIPT_DLL_API
104 Data(const Data& inData,
105 const FunctionSpace& what);
106
107 /**
108 \brief Copy Data from an existing vector
109 */
110
111 ESCRIPT_DLL_API
112 Data(const DataTypes::ValueType& value,
113 const DataTypes::ShapeType& shape,
114 const FunctionSpace& what=FunctionSpace(),
115 bool expanded=false);
116
117 /**
118 \brief
119 Constructor which creates a Data from a DataArrayView shape.
120
121 \param value - Input - Single value applied to all Data.
122 \param dataPointShape - Input - The shape of each data point.
123 \param what - Input - A description of what this data represents.
124 \param expanded - Input - Flag, if true fill the entire container with
125 the given value. Otherwise a more efficient storage
126 mechanism will be used.
127 */
128 ESCRIPT_DLL_API
129 Data(double value,
130 const DataTypes::ShapeType& dataPointShape=DataTypes::ShapeType(),
131 const FunctionSpace& what=FunctionSpace(),
132 bool expanded=false);
133
134 /**
135 \brief
136 Constructor which performs a deep copy of a region from another Data object.
137
138 \param inData - Input - Input Data object.
139 \param region - Input - Region to copy.
140 */
141 ESCRIPT_DLL_API
142 Data(const Data& inData,
143 const DataTypes::RegionType& region);
144
145 /**
146 \brief
147 Constructor which copies data from any object that can be converted into
148 a python numarray.
149
150 \param value - Input - Input data.
151 \param what - Input - A description of what this data represents.
152 \param expanded - Input - Flag, if true fill the entire container with
153 the value. Otherwise a more efficient storage
154 mechanism will be used.
155 */
156 ESCRIPT_DLL_API
157 Data(const boost::python::object& value,
158 const FunctionSpace& what=FunctionSpace(),
159 bool expanded=false);
160
161 /**
162 \brief
163 Constructor which creates a DataConstant.
164 Copies data from any object that can be converted
165 into a numarray. All other parameters are copied from other.
166
167 \param value - Input - Input data.
168 \param other - Input - contains all other parameters.
169 */
170 ESCRIPT_DLL_API
171 Data(const boost::python::object& value,
172 const Data& other);
173
174 /**
175 \brief
176 Constructor which creates a DataConstant of "shape" with constant value.
177 */
178 ESCRIPT_DLL_API
179 Data(double value,
180 const boost::python::tuple& shape=boost::python::make_tuple(),
181 const FunctionSpace& what=FunctionSpace(),
182 bool expanded=false);
183
184
185
186 /**
187 \brief Create a Data using an existing DataAbstract. Warning: The new object assumes ownership of the pointer!
188 Once you have passed the pointer, do not delete it.
189 */
190 ESCRIPT_DLL_API
191 explicit Data(DataAbstract* underlyingdata);
192
193 /**
194 \brief Create a Data based on the supplied DataAbstract
195 */
196 ESCRIPT_DLL_API
197 explicit Data(DataAbstract_ptr underlyingdata);
198
199 /**
200 \brief
201 Destructor
202 */
203 ESCRIPT_DLL_API
204 ~Data();
205
206 /**
207 \brief Make this object a deep copy of "other".
208 */
209 ESCRIPT_DLL_API
210 void
211 copy(const Data& other);
212
213 /**
214 \brief Return a pointer to a deep copy of this object.
215 */
216 ESCRIPT_DLL_API
217 Data
218 copySelf();
219
220
221 /**
222 \brief produce a delayed evaluation version of this Data.
223 */
224 ESCRIPT_DLL_API
225 Data
226 delay();
227
228 /**
229 \brief convert the current data into lazy data.
230 */
231 ESCRIPT_DLL_API
232 void
233 delaySelf();
234
235
236 /**
237 Member access methods.
238 */
239
240 /**
241 \brief
242 switches on update protection
243
244 */
245 ESCRIPT_DLL_API
246 void
247 setProtection();
248
249 /**
250 \brief
251 Returns trueif the data object is protected against update
252
253 */
254 ESCRIPT_DLL_API
255 bool
256 isProtected() const;
257
258 /**
259 \brief
260 Return the values of a data point on this process
261 */
262 ESCRIPT_DLL_API
263 const boost::python::numeric :: array
264 getValueOfDataPoint(int dataPointNo);
265
266 ESCRIPT_DLL_API
267 const boost::python::object
268 getValueOfDataPointAsTuple(int dataPointNo);
269
270 /**
271 \brief
272 sets the values of a data-point from a python object on this process
273 */
274 ESCRIPT_DLL_API
275 void
276 setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object);
277
278 /**
279 \brief
280 sets the values of a data-point from a numarray object on this process
281 */
282 ESCRIPT_DLL_API
283 void
284 setValueOfDataPointToArray(int dataPointNo, const boost::python::object&);
285
286 /**
287 \brief
288 sets the values of a data-point on this process
289 */
290 ESCRIPT_DLL_API
291 void
292 setValueOfDataPoint(int dataPointNo, const double);
293
294 /**
295 \brief
296 Return the value of the specified data-point across all processors
297 */
298 ESCRIPT_DLL_API
299 const boost::python::numeric::array
300 getValueOfGlobalDataPoint(int procNo, int dataPointNo);
301
302 ESCRIPT_DLL_API
303 const boost::python::object
304 getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo);
305
306 /**
307 \brief
308 Return the tag number associated with the given data-point.
309
310 */
311 ESCRIPT_DLL_API
312 int
313 getTagNumber(int dpno);
314
315 /**
316 \brief
317 Return the C wrapper for the Data object.
318 */
319 ESCRIPT_DLL_API
320 escriptDataC
321 getDataC();
322
323
324
325 /**
326 \brief
327 Return the C wrapper for the Data object - const version.
328 */
329 ESCRIPT_DLL_API
330 escriptDataC
331 getDataC() const;
332
333 /**
334 \brief How much space is required to evaulate a sample of the Data.
335 */
336 ESCRIPT_DLL_API
337 size_t
338 getSampleBufferSize() const;
339
340
341
342 /**
343 \brief
344 Write the data as a string. For large amounts of data, a summary is printed.
345 */
346 ESCRIPT_DLL_API
347 std::string
348 toString() const;
349
350 /**
351 \brief
352 Whatever the current Data type make this into a DataExpanded.
353 */
354 ESCRIPT_DLL_API
355 void
356 expand();
357
358 /**
359 \brief
360 If possible convert this Data to DataTagged. This will only allow
361 Constant data to be converted to tagged. An attempt to convert
362 Expanded data to tagged will throw an exception.
363 */
364 ESCRIPT_DLL_API
365 void
366 tag();
367
368 /**
369 \brief If this data is lazy, then convert it to ready data.
370 What type of ready data depends on the expression. For example, Constant+Tagged==Tagged.
371 */
372 ESCRIPT_DLL_API
373 void
374 resolve();
375
376
377 /**
378 \brief Ensures data is ready for write access.
379 This means that the data will be resolved if lazy and will be copied if shared with another Data object.
380 \warning This method should only be called in single threaded sections of code. (It modifies m_data).
381 Do not create any Data objects from this one between calling requireWrite and getSampleDataRW.
382 Doing so might introduce additional sharing.
383 */
384 ESCRIPT_DLL_API
385 void
386 requireWrite();
387
388 /**
389 \brief
390 Return true if this Data is expanded.
391 \note To determine if a sample will contain separate values for each datapoint. Use actsExpanded instead.
392 */
393 ESCRIPT_DLL_API
394 bool
395 isExpanded() const;
396
397 /**
398 \brief
399 Return true if this Data is expanded or resolves to expanded.
400 That is, if it has a separate value for each datapoint in the sample.
401 */
402 ESCRIPT_DLL_API
403 bool
404 actsExpanded() const;
405
406
407 /**
408 \brief
409 Return true if this Data is tagged.
410 */
411 ESCRIPT_DLL_API
412 bool
413 isTagged() const;
414
415 /**
416 \brief
417 Return true if this Data is constant.
418 */
419 ESCRIPT_DLL_API
420 bool
421 isConstant() const;
422
423 /**
424 \brief Return true if this Data is lazy.
425 */
426 ESCRIPT_DLL_API
427 bool
428 isLazy() const;
429
430 /**
431 \brief Return true if this data is ready.
432 */
433 ESCRIPT_DLL_API
434 bool
435 isReady() const;
436
437 /**
438 \brief
439 Return true if this Data holds an instance of DataEmpty. This is _not_ the same as asking if the object
440 contains datapoints.
441 */
442 ESCRIPT_DLL_API
443 bool
444 isEmpty() const;
445
446 /**
447 \brief
448 Return the function space.
449 */
450 ESCRIPT_DLL_API
451 inline
452 const FunctionSpace&
453 getFunctionSpace() const
454 {
455 return m_data->getFunctionSpace();
456 }
457
458 /**
459 \brief
460 Return a copy of the function space.
461 */
462 ESCRIPT_DLL_API
463 const FunctionSpace
464 getCopyOfFunctionSpace() const;
465
466 /**
467 \brief
468 Return the domain.
469 */
470 ESCRIPT_DLL_API
471 inline
472 // const AbstractDomain&
473 const_Domain_ptr
474 getDomain() const
475 {
476 return getFunctionSpace().getDomain();
477 }
478
479
480 /**
481 \brief
482 Return the domain.
483 TODO: For internal use only. This should be removed.
484 */
485 ESCRIPT_DLL_API
486 inline
487 // const AbstractDomain&
488 Domain_ptr
489 getDomainPython() const
490 {
491 return getFunctionSpace().getDomainPython();
492 }
493
494 /**
495 \brief
496 Return a copy of the domain.
497 */
498 ESCRIPT_DLL_API
499 const AbstractDomain
500 getCopyOfDomain() const;
501
502 /**
503 \brief
504 Return the rank of the point data.
505 */
506 ESCRIPT_DLL_API
507 inline
508 unsigned int
509 getDataPointRank() const
510 {
511 return m_data->getRank();
512 }
513
514 /**
515 \brief
516 Return the number of data points
517 */
518 ESCRIPT_DLL_API
519 inline
520 int
521 getNumDataPoints() const
522 {
523 return getNumSamples() * getNumDataPointsPerSample();
524 }
525 /**
526 \brief
527 Return the number of samples.
528 */
529 ESCRIPT_DLL_API
530 inline
531 int
532 getNumSamples() const
533 {
534 return m_data->getNumSamples();
535 }
536
537 /**
538 \brief
539 Return the number of data points per sample.
540 */
541 ESCRIPT_DLL_API
542 inline
543 int
544 getNumDataPointsPerSample() const
545 {
546 return m_data->getNumDPPSample();
547 }
548
549
550 /**
551 \brief
552 Return the number of values in the shape for this object.
553 */
554 ESCRIPT_DLL_API
555 int
556 getNoValues() const
557 {
558 return m_data->getNoValues();
559 }
560
561
562 /**
563 \brief
564 dumps the object into a netCDF file
565 */
566 ESCRIPT_DLL_API
567 void
568 dump(const std::string fileName) const;
569
570 /**
571 \brief
572 Return the sample data for the given sample no. This is not the
573 preferred interface but is provided for use by C code.
574 The buffer parameter is only required for LazyData.
575 \param sampleNo - Input - the given sample no.
576 \param buffer - Vector to compute (and store) sample data in.
577 \return pointer to the sample data.
578 */
579 ESCRIPT_DLL_API
580 inline
581 const DataAbstract::ValueType::value_type*
582 getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, DataTypes::ValueType* buffer=0);
583
584 /**
585 \brief
586 Return the sample data for the given sample no. This is not the
587 preferred interface but is provided for use by C code.
588 \param sampleNo - Input - the given sample no.
589 \return pointer to the sample data.
590 */
591 ESCRIPT_DLL_API
592 inline
593 DataAbstract::ValueType::value_type*
594 getSampleDataRW(DataAbstract::ValueType::size_type sampleNo);
595
596
597 /**
598 \brief
599 Return the sample data for the given tag. If an attempt is made to
600 access data that isn't tagged an exception will be thrown.
601 \param tag - Input - the tag key.
602 */
603 ESCRIPT_DLL_API
604 inline
605 DataAbstract::ValueType::value_type*
606 getSampleDataByTag(int tag)
607 {
608 return m_data->getSampleDataByTag(tag);
609 }
610
611 /**
612 \brief
613 Return a view into the data for the data point specified.
614 NOTE: Construction of the DataArrayView is a relatively expensive
615 operation.
616 \param sampleNo - Input -
617 \param dataPointNo - Input -
618 */
619 ESCRIPT_DLL_API
620 DataTypes::ValueType::const_reference
621 getDataPointRO(int sampleNo, int dataPointNo);
622
623
624 ESCRIPT_DLL_API
625 DataTypes::ValueType::reference
626 getDataPointRW(int sampleNo, int dataPointNo);
627
628
629
630 /**
631 \brief
632 Return the offset for the given sample and point within the sample
633 */
634 ESCRIPT_DLL_API
635 inline
636 DataTypes::ValueType::size_type
637 getDataOffset(int sampleNo,
638 int dataPointNo)
639 {
640 return m_data->getPointOffset(sampleNo,dataPointNo);
641 }
642
643 /**
644 \brief
645 Return a reference to the data point shape.
646 */
647 ESCRIPT_DLL_API
648 inline
649 const DataTypes::ShapeType&
650 getDataPointShape() const
651 {
652 return m_data->getShape();
653 }
654
655 /**
656 \brief
657 Return the data point shape as a tuple of integers.
658 */
659 ESCRIPT_DLL_API
660 const boost::python::tuple
661 getShapeTuple() const;
662
663 /**
664 \brief
665 Return the size of the data point. It is the product of the
666 data point shape dimensions.
667 */
668 ESCRIPT_DLL_API
669 int
670 getDataPointSize() const;
671
672 /**
673 \brief
674 Return the number of doubles stored for this Data.
675 */
676 ESCRIPT_DLL_API
677 DataTypes::ValueType::size_type
678 getLength() const;
679
680
681
682 /**
683 \brief
684 Assign the given value to the tag assocciated with name. Implicitly converts this
685 object to type DataTagged. Throws an exception if this object
686 cannot be converted to a DataTagged object or name cannot be mapped onto a tag key.
687 \param tagKey - Input - Integer key.
688 \param value - Input - Value to associate with given key.
689 ==>*
690 */
691 ESCRIPT_DLL_API
692 void
693 setTaggedValueByName(std::string name,
694 const boost::python::object& value);
695
696 /**
697 \brief
698 Assign the given value to the tag. Implicitly converts this
699 object to type DataTagged if it is constant.
700
701 \param tagKey - Input - Integer key.
702 \param value - Input - Value to associate with given key.
703 ==>*
704 */
705 ESCRIPT_DLL_API
706 void
707 setTaggedValue(int tagKey,
708 const boost::python::object& value);
709
710 /**
711 \brief
712 Assign the given value to the tag. Implicitly converts this
713 object to type DataTagged if it is constant.
714
715 \param tagKey - Input - Integer key.
716 \param pointshape - Input - The shape of the value parameter
717 \param value - Input - Value to associate with given key.
718 \param dataOffset - Input - Offset of the begining of the point within the value parameter
719 */
720 ESCRIPT_DLL_API
721 void
722 setTaggedValueFromCPP(int tagKey,
723 const DataTypes::ShapeType& pointshape,
724 const DataTypes::ValueType& value,
725 int dataOffset=0);
726
727
728
729 /**
730 \brief
731 Copy other Data object into this Data object where mask is positive.
732 */
733 ESCRIPT_DLL_API
734 void
735 copyWithMask(const Data& other,
736 const Data& mask);
737
738 /**
739 Data object operation methods and operators.
740 */
741
742 /**
743 \brief
744 set all values to zero
745 *
746 */
747 ESCRIPT_DLL_API
748 void
749 setToZero();
750
751 /**
752 \brief
753 Interpolates this onto the given functionspace and returns
754 the result as a Data object.
755 *
756 */
757 ESCRIPT_DLL_API
758 Data
759 interpolate(const FunctionSpace& functionspace) const;
760 /**
761 \brief
762 Calculates the gradient of the data at the data points of functionspace.
763 If functionspace is not present the function space of Function(getDomain()) is used.
764 *
765 */
766 ESCRIPT_DLL_API
767 Data
768 gradOn(const FunctionSpace& functionspace) const;
769
770 ESCRIPT_DLL_API
771 Data
772 grad() const;
773
774 /**
775 \brief
776 Calculate the integral over the function space domain.
777 *
778 */
779 ESCRIPT_DLL_API
780 boost::python::object
781 integrate_const() const;
782
783 ESCRIPT_DLL_API
784 boost::python::object
785 integrate();
786
787 ESCRIPT_DLL_API
788 boost::python::object
789 integrateToTuple_const() const;
790
791 ESCRIPT_DLL_API
792 boost::python::object
793 integrateToTuple();
794
795
796
797 /**
798 \brief
799 Returns 1./ Data object
800 *
801 */
802 ESCRIPT_DLL_API
803 Data
804 oneOver() const;
805 /**
806 \brief
807 Return a Data with a 1 for +ive values and a 0 for 0 or -ive values.
808 *
809 */
810 ESCRIPT_DLL_API
811 Data
812 wherePositive() const;
813
814 /**
815 \brief
816 Return a Data with a 1 for -ive values and a 0 for +ive or 0 values.
817 *
818 */
819 ESCRIPT_DLL_API
820 Data
821 whereNegative() const;
822
823 /**
824 \brief
825 Return a Data with a 1 for +ive or 0 values and a 0 for -ive values.
826 *
827 */
828 ESCRIPT_DLL_API
829 Data
830 whereNonNegative() const;
831
832 /**
833 \brief
834 Return a Data with a 1 for -ive or 0 values and a 0 for +ive values.
835 *
836 */
837 ESCRIPT_DLL_API
838 Data
839 whereNonPositive() const;
840
841 /**
842 \brief
843 Return a Data with a 1 for 0 values and a 0 for +ive or -ive values.
844 *
845 */
846 ESCRIPT_DLL_API
847 Data
848 whereZero(double tol=0.0) const;
849
850 /**
851 \brief
852 Return a Data with a 0 for 0 values and a 1 for +ive or -ive values.
853 *
854 */
855 ESCRIPT_DLL_API
856 Data
857 whereNonZero(double tol=0.0) const;
858
859 /**
860 \brief
861 Return the maximum absolute value of this Data object.
862
863 The method is not const because lazy data needs to be expanded before Lsup can be computed.
864 The _const form can be used when the Data object is const, however this will only work for
865 Data which is not Lazy.
866
867 For Data which contain no samples (or tagged Data for which no tags in use have a value)
868 zero is returned.
869 */
870 ESCRIPT_DLL_API
871 double
872 Lsup();
873
874 ESCRIPT_DLL_API
875 double
876 Lsup_const() const;
877
878
879 /**
880 \brief
881 Return the maximum value of this Data object.
882
883 The method is not const because lazy data needs to be expanded before sup can be computed.
884 The _const form can be used when the Data object is const, however this will only work for
885 Data which is not Lazy.
886
887 For Data which contain no samples (or tagged Data for which no tags in use have a value)
888 a large negative value is returned.
889 */
890 ESCRIPT_DLL_API
891 double
892 sup();
893
894 ESCRIPT_DLL_API
895 double
896 sup_const() const;
897
898
899 /**
900 \brief
901 Return the minimum value of this Data object.
902
903 The method is not const because lazy data needs to be expanded before inf can be computed.
904 The _const form can be used when the Data object is const, however this will only work for
905 Data which is not Lazy.
906
907 For Data which contain no samples (or tagged Data for which no tags in use have a value)
908 a large positive value is returned.
909 */
910 ESCRIPT_DLL_API
911 double
912 inf();
913
914 ESCRIPT_DLL_API
915 double
916 inf_const() const;
917
918
919
920 /**
921 \brief
922 Return the absolute value of each data point of this Data object.
923 *
924 */
925 ESCRIPT_DLL_API
926 Data
927 abs() const;
928
929 /**
930 \brief
931 Return the maximum value of each data point of this Data object.
932 *
933 */
934 ESCRIPT_DLL_API
935 Data
936 maxval() const;
937
938 /**
939 \brief
940 Return the minimum value of each data point of this Data object.
941 *
942 */
943 ESCRIPT_DLL_API
944 Data
945 minval() const;
946
947 /**
948 \brief
949 Return the (sample number, data-point number) of the data point with
950 the minimum value in this Data object.
951 */
952 ESCRIPT_DLL_API
953 const boost::python::tuple
954 minGlobalDataPoint() const;
955
956 ESCRIPT_DLL_API
957 void
958 calc_minGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
959 /**
960 \brief
961 Return the sign of each data point of this Data object.
962 -1 for negative values, zero for zero values, 1 for positive values.
963 *
964 */
965 ESCRIPT_DLL_API
966 Data
967 sign() const;
968
969 /**
970 \brief
971 Return the symmetric part of a matrix which is half the matrix plus its transpose.
972 *
973 */
974 ESCRIPT_DLL_API
975 Data
976 symmetric() const;
977
978 /**
979 \brief
980 Return the nonsymmetric part of a matrix which is half the matrix minus its transpose.
981 *
982 */
983 ESCRIPT_DLL_API
984 Data
985 nonsymmetric() const;
986
987 /**
988 \brief
989 Return the trace of a matrix
990 *
991 */
992 ESCRIPT_DLL_API
993 Data
994 trace(int axis_offset) const;
995
996 /**
997 \brief
998 Transpose each data point of this Data object around the given axis.
999 *
1000 */
1001 ESCRIPT_DLL_API
1002 Data
1003 transpose(int axis_offset) const;
1004
1005 /**
1006 \brief
1007 Return the eigenvalues of the symmetric part at each data point of this Data object in increasing values.
1008 Currently this function is restricted to rank 2, square shape, and dimension 3.
1009 *
1010 */
1011 ESCRIPT_DLL_API
1012 Data
1013 eigenvalues() const;
1014
1015 /**
1016 \brief
1017 Return the eigenvalues and corresponding eigenvcetors of the symmetric part at each data point of this Data object.
1018 the eigenvalues are ordered in increasing size where eigenvalues with relative difference less than
1019 tol are treated as equal. The eigenvectors are orthogonal, normalized and the sclaed such that the
1020 first non-zero entry is positive.
1021 Currently this function is restricted to rank 2, square shape, and dimension 3
1022 *
1023 */
1024 ESCRIPT_DLL_API
1025 const boost::python::tuple
1026 eigenvalues_and_eigenvectors(const double tol=1.e-12) const;
1027
1028 /**
1029 \brief
1030 swaps the components axis0 and axis1
1031 *
1032 */
1033 ESCRIPT_DLL_API
1034 Data
1035 swapaxes(const int axis0, const int axis1) const;
1036
1037 /**
1038 \brief
1039 Return the error function erf of each data point of this Data object.
1040 *
1041 */
1042 ESCRIPT_DLL_API
1043 Data
1044 erf() const;
1045
1046 /**
1047 \brief
1048 Return the sin of each data point of this Data object.
1049 *
1050 */
1051 ESCRIPT_DLL_API
1052 Data
1053 sin() const;
1054
1055 /**
1056 \brief
1057 Return the cos of each data point of this Data object.
1058 *
1059 */
1060 ESCRIPT_DLL_API
1061 Data
1062 cos() const;
1063
1064 /**
1065 \brief
1066 Return the tan of each data point of this Data object.
1067 *
1068 */
1069 ESCRIPT_DLL_API
1070 Data
1071 tan() const;
1072
1073 /**
1074 \brief
1075 Return the asin of each data point of this Data object.
1076 *
1077 */
1078 ESCRIPT_DLL_API
1079 Data
1080 asin() const;
1081
1082 /**
1083 \brief
1084 Return the acos of each data point of this Data object.
1085 *
1086 */
1087 ESCRIPT_DLL_API
1088 Data
1089 acos() const;
1090
1091 /**
1092 \brief
1093 Return the atan of each data point of this Data object.
1094 *
1095 */
1096 ESCRIPT_DLL_API
1097 Data
1098 atan() const;
1099
1100 /**
1101 \brief
1102 Return the sinh of each data point of this Data object.
1103 *
1104 */
1105 ESCRIPT_DLL_API
1106 Data
1107 sinh() const;
1108
1109 /**
1110 \brief
1111 Return the cosh of each data point of this Data object.
1112 *
1113 */
1114 ESCRIPT_DLL_API
1115 Data
1116 cosh() const;
1117
1118 /**
1119 \brief
1120 Return the tanh of each data point of this Data object.
1121 *
1122 */
1123 ESCRIPT_DLL_API
1124 Data
1125 tanh() const;
1126
1127 /**
1128 \brief
1129 Return the asinh of each data point of this Data object.
1130 *
1131 */
1132 ESCRIPT_DLL_API
1133 Data
1134 asinh() const;
1135
1136 /**
1137 \brief
1138 Return the acosh of each data point of this Data object.
1139 *
1140 */
1141 ESCRIPT_DLL_API
1142 Data
1143 acosh() const;
1144
1145 /**
1146 \brief
1147 Return the atanh of each data point of this Data object.
1148 *
1149 */
1150 ESCRIPT_DLL_API
1151 Data
1152 atanh() const;
1153
1154 /**
1155 \brief
1156 Return the log to base 10 of each data point of this Data object.
1157 *
1158 */
1159 ESCRIPT_DLL_API
1160 Data
1161 log10() const;
1162
1163 /**
1164 \brief
1165 Return the natural log of each data point of this Data object.
1166 *
1167 */
1168 ESCRIPT_DLL_API
1169 Data
1170 log() const;
1171
1172 /**
1173 \brief
1174 Return the exponential function of each data point of this Data object.
1175 *
1176 */
1177 ESCRIPT_DLL_API
1178 Data
1179 exp() const;
1180
1181 /**
1182 \brief
1183 Return the square root of each data point of this Data object.
1184 *
1185 */
1186 ESCRIPT_DLL_API
1187 Data
1188 sqrt() const;
1189
1190 /**
1191 \brief
1192 Return the negation of each data point of this Data object.
1193 *
1194 */
1195 ESCRIPT_DLL_API
1196 Data
1197 neg() const;
1198
1199 /**
1200 \brief
1201 Return the identity of each data point of this Data object.
1202 Simply returns this object unmodified.
1203 *
1204 */
1205 ESCRIPT_DLL_API
1206 Data
1207 pos() const;
1208
1209 /**
1210 \brief
1211 Return the given power of each data point of this Data object.
1212
1213 \param right Input - the power to raise the object to.
1214 *
1215 */
1216 ESCRIPT_DLL_API
1217 Data
1218 powD(const Data& right) const;
1219
1220 /**
1221 \brief
1222 Return the given power of each data point of this boost python object.
1223
1224 \param right Input - the power to raise the object to.
1225 *
1226 */
1227 ESCRIPT_DLL_API
1228 Data
1229 powO(const boost::python::object& right) const;
1230
1231 /**
1232 \brief
1233 Return the given power of each data point of this boost python object.
1234
1235 \param left Input - the bases
1236 *
1237 */
1238
1239 ESCRIPT_DLL_API
1240 Data
1241 rpowO(const boost::python::object& left) const;
1242
1243 /**
1244 \brief
1245 writes the object to a file in the DX file format
1246 */
1247 ESCRIPT_DLL_API
1248 void
1249 saveDX(std::string fileName) const;
1250
1251 /**
1252 \brief
1253 writes the object to a file in the VTK file format
1254 */
1255 ESCRIPT_DLL_API
1256 void
1257 saveVTK(std::string fileName) const;
1258
1259 /**
1260 \brief
1261 Overloaded operator +=
1262 \param right - Input - The right hand side.
1263 *
1264 */
1265 ESCRIPT_DLL_API
1266 Data& operator+=(const Data& right);
1267 ESCRIPT_DLL_API
1268 Data& operator+=(const boost::python::object& right);
1269
1270 ESCRIPT_DLL_API
1271 Data& operator=(const Data& other);
1272
1273 /**
1274 \brief
1275 Overloaded operator -=
1276 \param right - Input - The right hand side.
1277 *
1278 */
1279 ESCRIPT_DLL_API
1280 Data& operator-=(const Data& right);
1281 ESCRIPT_DLL_API
1282 Data& operator-=(const boost::python::object& right);
1283
1284 /**
1285 \brief
1286 Overloaded operator *=
1287 \param right - Input - The right hand side.
1288 *
1289 */
1290 ESCRIPT_DLL_API
1291 Data& operator*=(const Data& right);
1292 ESCRIPT_DLL_API
1293 Data& operator*=(const boost::python::object& right);
1294
1295 /**
1296 \brief
1297 Overloaded operator /=
1298 \param right - Input - The right hand side.
1299 *
1300 */
1301 ESCRIPT_DLL_API
1302 Data& operator/=(const Data& right);
1303 ESCRIPT_DLL_API
1304 Data& operator/=(const boost::python::object& right);
1305
1306 /**
1307 \brief
1308 Returns true if this can be interpolated to functionspace.
1309 */
1310 ESCRIPT_DLL_API
1311 bool
1312 probeInterpolation(const FunctionSpace& functionspace) const;
1313
1314 /**
1315 Data object slicing methods.
1316 */
1317
1318 /**
1319 \brief
1320 Returns a slice from this Data object.
1321
1322 /description
1323 Implements the [] get operator in python.
1324 Calls getSlice.
1325
1326 \param key - Input - python slice tuple specifying
1327 slice to return.
1328 */
1329 ESCRIPT_DLL_API
1330 Data
1331 getItem(const boost::python::object& key) const;
1332
1333 /**
1334 \brief
1335 Copies slice from value into this Data object.
1336
1337 Implements the [] set operator in python.
1338 Calls setSlice.
1339
1340 \param key - Input - python slice tuple specifying
1341 slice to copy from value.
1342 \param value - Input - Data object to copy from.
1343 */
1344 ESCRIPT_DLL_API
1345 void
1346 setItemD(const boost::python::object& key,
1347 const Data& value);
1348
1349 ESCRIPT_DLL_API
1350 void
1351 setItemO(const boost::python::object& key,
1352 const boost::python::object& value);
1353
1354 // These following public methods should be treated as private.
1355
1356 /**
1357 \brief
1358 Perform the given unary operation on every element of every data point in
1359 this Data object.
1360 */
1361 template <class UnaryFunction>
1362 ESCRIPT_DLL_API
1363 inline
1364 void
1365 unaryOp2(UnaryFunction operation);
1366
1367 /**
1368 \brief
1369 Return a Data object containing the specified slice of
1370 this Data object.
1371 \param region - Input - Region to copy.
1372 *
1373 */
1374 ESCRIPT_DLL_API
1375 Data
1376 getSlice(const DataTypes::RegionType& region) const;
1377
1378 /**
1379 \brief
1380 Copy the specified slice from the given value into this
1381 Data object.
1382 \param value - Input - Data to copy from.
1383 \param region - Input - Region to copy.
1384 *
1385 */
1386 ESCRIPT_DLL_API
1387 void
1388 setSlice(const Data& value,
1389 const DataTypes::RegionType& region);
1390
1391 /**
1392 \brief
1393 print the data values to stdout. Used for debugging
1394 */
1395 ESCRIPT_DLL_API
1396 void
1397 print(void);
1398
1399 /**
1400 \brief
1401 return the MPI rank number of the local data
1402 MPI_COMM_WORLD is assumed and the result of MPI_Comm_size()
1403 is returned
1404 */
1405 ESCRIPT_DLL_API
1406 int
1407 get_MPIRank(void) const;
1408
1409 /**
1410 \brief
1411 return the MPI rank number of the local data
1412 MPI_COMM_WORLD is assumed and the result of MPI_Comm_rank()
1413 is returned
1414 */
1415 ESCRIPT_DLL_API
1416 int
1417 get_MPISize(void) const;
1418
1419 /**
1420 \brief
1421 return the MPI rank number of the local data
1422 MPI_COMM_WORLD is assumed and returned.
1423 */
1424 ESCRIPT_DLL_API
1425 MPI_Comm
1426 get_MPIComm(void) const;
1427
1428 /**
1429 \brief
1430 return the object produced by the factory, which is a DataConstant or DataExpanded
1431 TODO Ownership of this object should be explained in doco.
1432 */
1433 ESCRIPT_DLL_API
1434 DataAbstract*
1435 borrowData(void) const;
1436
1437 ESCRIPT_DLL_API
1438 DataAbstract_ptr
1439 borrowDataPtr(void) const;
1440
1441 ESCRIPT_DLL_API
1442 DataReady_ptr
1443 borrowReadyPtr(void) const;
1444
1445
1446
1447 /**
1448 \brief
1449 Return a pointer to the beginning of the datapoint at the specified offset.
1450 TODO Eventually these should be inlined.
1451 \param i - position(offset) in the underlying datastructure
1452 */
1453
1454 ESCRIPT_DLL_API
1455 DataTypes::ValueType::const_reference
1456 getDataAtOffsetRO(DataTypes::ValueType::size_type i);
1457
1458
1459 ESCRIPT_DLL_API
1460 DataTypes::ValueType::reference
1461 getDataAtOffsetRW(DataTypes::ValueType::size_type i);
1462
1463
1464
1465 /**
1466 \brief Create a buffer for use by getSample
1467 Allocates a DataVector large enough for DataLazy::resolveSample to operate on for the current Data.
1468 Do not use this buffer for other Data instances (unless you are sure they will be the same size).
1469
1470 In multi-threaded sections, this needs to be called on each thread.
1471
1472 \return A DataVector* if Data is lazy, NULL otherwise.
1473 \warning This pointer must be deallocated using freeSampleBuffer to avoid cross library memory issues.
1474 */
1475 ESCRIPT_DLL_API
1476 DataTypes::ValueType*
1477 allocSampleBuffer() const;
1478
1479 /**
1480 \brief Free a buffer allocated with allocSampleBuffer.
1481 \param buffer Input - pointer to the buffer to deallocate.
1482 */
1483 ESCRIPT_DLL_API void freeSampleBuffer(DataTypes::ValueType* buffer);
1484
1485 protected:
1486
1487 private:
1488
1489 double
1490 LsupWorker() const;
1491
1492 double
1493 supWorker() const;
1494
1495 double
1496 infWorker() const;
1497
1498 boost::python::object
1499 integrateWorker() const;
1500
1501 /**
1502 \brief
1503 Check *this and the right operand are compatible. Throws
1504 an exception if they aren't.
1505 \param right - Input - The right hand side.
1506 */
1507 inline
1508 void
1509 operandCheck(const Data& right) const
1510 {
1511 return m_data->operandCheck(*(right.m_data.get()));
1512 }
1513
1514 /**
1515 \brief
1516 Perform the specified reduction algorithm on every element of every data point in
1517 this Data object according to the given function and return the single value result.
1518 */
1519 template <class BinaryFunction>
1520 inline
1521 double
1522 algorithm(BinaryFunction operation,
1523 double initial_value) const;
1524
1525 /**
1526 \brief
1527 Reduce each data-point in this Data object using the given operation. Return a Data
1528 object with the same number of data-points, but with each data-point containing only
1529 one value - the result of the reduction operation on the corresponding data-point in
1530 this Data object
1531 */
1532 template <class BinaryFunction>
1533 inline
1534 Data
1535 dp_algorithm(BinaryFunction operation,
1536 double initial_value) const;
1537
1538 /**
1539 \brief
1540 Perform the given binary operation on all of the data's elements.
1541 The underlying type of the right hand side (right) determines the final
1542 type of *this after the operation. For example if the right hand side
1543 is expanded *this will be expanded if necessary.
1544 RHS is a Data object.
1545 */
1546 template <class BinaryFunction>
1547 inline
1548 void
1549 binaryOp(const Data& right,
1550 BinaryFunction operation);
1551
1552 /**
1553 \brief
1554 Convert the data type of the RHS to match this.
1555 \param right - Input - data type to match.
1556 */
1557 void
1558 typeMatchLeft(Data& right) const;
1559
1560 /**
1561 \brief
1562 Convert the data type of this to match the RHS.
1563 \param right - Input - data type to match.
1564 */
1565 void
1566 typeMatchRight(const Data& right);
1567
1568 /**
1569 \brief
1570 Construct a Data object of the appropriate type.
1571 */
1572
1573 void
1574 initialise(const DataTypes::ValueType& value,
1575 const DataTypes::ShapeType& shape,
1576 const FunctionSpace& what,
1577 bool expanded);
1578
1579 void
1580 initialise(const WrappedArray& value,
1581 const FunctionSpace& what,
1582 bool expanded);
1583
1584 //
1585 // flag to protect the data object against any update
1586 bool m_protected;
1587 mutable bool m_shared;
1588 bool m_lazy;
1589
1590 //
1591 // pointer to the actual data object
1592 // boost::shared_ptr<DataAbstract> m_data;
1593 DataAbstract_ptr m_data;
1594
1595 // If possible please use getReadyPtr instead.
1596 // But see warning below.
1597 const DataReady*
1598 getReady() const;
1599
1600 DataReady*
1601 getReady();
1602
1603
1604 // Be wary of using this for local operations since it (temporarily) increases reference count.
1605 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1606 // getReady() instead
1607 DataReady_ptr
1608 getReadyPtr();
1609
1610 const_DataReady_ptr
1611 getReadyPtr() const;
1612
1613
1614 /**
1615 \brief Update the Data's shared flag
1616 This indicates that the DataAbstract used by this object is now shared (or no longer shared).
1617 For internal use only.
1618 */
1619 void updateShareStatus(bool nowshared) const
1620 {
1621 m_shared=nowshared; // m_shared is mutable
1622 }
1623
1624 // In the isShared() method below:
1625 // A problem would occur if m_data (the address pointed to) were being modified
1626 // while the call m_data->is_shared is being executed.
1627 //
1628 // Q: So why do I think this code can be thread safe/correct?
1629 // A: We need to make some assumptions.
1630 // 1. We assume it is acceptable to return true under some conditions when we aren't shared.
1631 // 2. We assume that no constructions or assignments which will share previously unshared
1632 // will occur while this call is executing. This is consistent with the way Data:: and C are written.
1633 //
1634 // This means that the only transition we need to consider, is when a previously shared object is
1635 // not shared anymore. ie. the other objects have been destroyed or a deep copy has been made.
1636 // In those cases the m_shared flag changes to false after m_data has completed changing.
1637 // For any threads executing before the flag switches they will assume the object is still shared.
1638 bool isShared() const
1639 {
1640 return m_shared;
1641 /* if (m_shared) return true;
1642 if (m_data->isShared())
1643 {
1644 updateShareStatus(true);
1645 return true;
1646 }
1647 return false;*/
1648 }
1649
1650 void forceResolve()
1651 {
1652 if (isLazy())
1653 {
1654 #pragma omp critical (SHARE_XW)
1655 {
1656 if (isLazy())
1657 {
1658 resolve();
1659 }
1660 }
1661 }
1662 }
1663
1664 /**
1665 \brief if another object is sharing out member data make a copy to work with instead.
1666 This code should only be called from single threaded sections of code.
1667 */
1668 void exclusiveWrite()
1669 {
1670 // if (!m_data.unique())
1671 // {
1672 // DataAbstract* t=m_data->deepCopy();
1673 // // m_data=DataAbstract_ptr(t);
1674 // set_m_data(DataAbstract_ptr(t));
1675 // }
1676
1677 // #ifdef _OPENMP
1678 // if (omp_in_parallel())
1679 // {
1680 // *((int*)0)=17;
1681 // throw DataException("Programming error. Please do not run exclusiveWrite() in multi-threaded sections.");
1682 // }
1683 // #endif
1684
1685 if (isShared() || isLazy())
1686 {
1687 #pragma omp critical (SHARE_XW)
1688 {
1689 if (isLazy())
1690 {
1691 resolve();
1692 }
1693 if (isShared())
1694 {
1695 DataAbstract* t=m_data->deepCopy();
1696 set_m_data(DataAbstract_ptr(t));
1697 }
1698
1699 } // end critical
1700 }
1701 }
1702
1703
1704
1705
1706
1707 /**
1708 \brief Modify the data abstract hosted by this Data object
1709 For internal use only.
1710 Passing a pointer to null is permitted (do this in the destructor)
1711 \warning Only to be called in single threaded code or inside a single/critical section. This method needs to be atomic.
1712 */
1713 void set_m_data(DataAbstract_ptr p);
1714
1715 friend class DataAbstract; // To allow calls to updateShareStatus
1716
1717 };
1718
1719 } // end namespace escript
1720
1721
1722 // No, this is not supposed to be at the top of the file
1723 // DataAbstact needs to be declared first, then DataReady needs to be fully declared
1724 // so that I can dynamic cast between them below.
1725 #include "DataReady.h"
1726 #include "DataLazy.h"
1727
1728 namespace escript
1729 {
1730
1731 inline
1732 const DataReady*
1733 Data::getReady() const
1734 {
1735 const DataReady* dr=dynamic_cast<const DataReady*>(m_data.get());
1736 EsysAssert((dr!=0), "Error - casting to DataReady.");
1737 return dr;
1738 }
1739
1740 inline
1741 DataReady*
1742 Data::getReady()
1743 {
1744 DataReady* dr=dynamic_cast<DataReady*>(m_data.get());
1745 EsysAssert((dr!=0), "Error - casting to DataReady.");
1746 return dr;
1747 }
1748
1749 // Be wary of using this for local operations since it (temporarily) increases reference count.
1750 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1751 // getReady() instead
1752 inline
1753 DataReady_ptr
1754 Data::getReadyPtr()
1755 {
1756 DataReady_ptr dr=boost::dynamic_pointer_cast<DataReady>(m_data);
1757 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1758 return dr;
1759 }
1760
1761
1762 inline
1763 const_DataReady_ptr
1764 Data::getReadyPtr() const
1765 {
1766 const_DataReady_ptr dr=boost::dynamic_pointer_cast<const DataReady>(m_data);
1767 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1768 return dr;
1769 }
1770
1771 inline
1772 DataAbstract::ValueType::value_type*
1773 Data::getSampleDataRW(DataAbstract::ValueType::size_type sampleNo)
1774 {
1775 // if (isLazy())
1776 // {
1777 // resolve();
1778 // }
1779 // exclusiveWrite();
1780 if (isLazy())
1781 {
1782 throw DataException("Error, attempt to acquire RW access to lazy data. Please call requireWrite() first.");
1783 }
1784 return getReady()->getSampleDataRW(sampleNo);
1785 }
1786
1787 inline
1788 const DataAbstract::ValueType::value_type*
1789 Data::getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, DataTypes::ValueType* buffer)
1790 {
1791 DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
1792 if (l!=0)
1793 {
1794 size_t offset=0;
1795 if (buffer==NULL)
1796 {
1797 throw DataException("Error, attempt to getSampleDataRO for lazy Data with buffer==NULL");
1798 }
1799 const DataTypes::ValueType* res=l->resolveSample(*buffer,0,sampleNo,offset);
1800 return &((*res)[offset]);
1801 }
1802 return getReady()->getSampleDataRO(sampleNo);
1803 }
1804
1805
1806
1807 /**
1808 Modify a filename for MPI parallel output to multiple files
1809 */
1810 char *Escript_MPI_appendRankToFileName(const char *, int, int);
1811
1812 /**
1813 Binary Data object operators.
1814 */
1815 inline double rpow(double x,double y)
1816 {
1817 return pow(y,x);
1818 }
1819
1820 /**
1821 \brief
1822 Operator+
1823 Takes two Data objects.
1824 */
1825 ESCRIPT_DLL_API Data operator+(const Data& left, const Data& right);
1826
1827 /**
1828 \brief
1829 Operator-
1830 Takes two Data objects.
1831 */
1832 ESCRIPT_DLL_API Data operator-(const Data& left, const Data& right);
1833
1834 /**
1835 \brief
1836 Operator*
1837 Takes two Data objects.
1838 */
1839 ESCRIPT_DLL_API Data operator*(const Data& left, const Data& right);
1840
1841 /**
1842 \brief
1843 Operator/
1844 Takes two Data objects.
1845 */
1846 ESCRIPT_DLL_API Data operator/(const Data& left, const Data& right);
1847
1848 /**
1849 \brief
1850 Operator+
1851 Takes LHS Data object and RHS python::object.
1852 python::object must be convertable to Data type.
1853 */
1854 ESCRIPT_DLL_API Data operator+(const Data& left, const boost::python::object& right);
1855
1856 /**
1857 \brief
1858 Operator-
1859 Takes LHS Data object and RHS python::object.
1860 python::object must be convertable to Data type.
1861 */
1862 ESCRIPT_DLL_API Data operator-(const Data& left, const boost::python::object& right);
1863
1864 /**
1865 \brief
1866 Operator*
1867 Takes LHS Data object and RHS python::object.
1868 python::object must be convertable to Data type.
1869 */
1870 ESCRIPT_DLL_API Data operator*(const Data& left, const boost::python::object& right);
1871
1872 /**
1873 \brief
1874 Operator/
1875 Takes LHS Data object and RHS python::object.
1876 python::object must be convertable to Data type.
1877 */
1878 ESCRIPT_DLL_API Data operator/(const Data& left, const boost::python::object& right);
1879
1880 /**
1881 \brief
1882 Operator+
1883 Takes LHS python::object and RHS Data object.
1884 python::object must be convertable to Data type.
1885 */
1886 ESCRIPT_DLL_API Data operator+(const boost::python::object& left, const Data& right);
1887
1888 /**
1889 \brief
1890 Operator-
1891 Takes LHS python::object and RHS Data object.
1892 python::object must be convertable to Data type.
1893 */
1894 ESCRIPT_DLL_API Data operator-(const boost::python::object& left, const Data& right);
1895
1896 /**
1897 \brief
1898 Operator*
1899 Takes LHS python::object and RHS Data object.
1900 python::object must be convertable to Data type.
1901 */
1902 ESCRIPT_DLL_API Data operator*(const boost::python::object& left, const Data& right);
1903
1904 /**
1905 \brief
1906 Operator/
1907 Takes LHS python::object and RHS Data object.
1908 python::object must be convertable to Data type.
1909 */
1910 ESCRIPT_DLL_API Data operator/(const boost::python::object& left, const Data& right);
1911
1912
1913
1914 /**
1915 \brief
1916 Output operator
1917 */
1918 ESCRIPT_DLL_API std::ostream& operator<<(std::ostream& o, const Data& data);
1919
1920 /**
1921 \brief
1922 Compute a tensor product of two Data objects
1923 \param arg0 - Input - Data object
1924 \param arg1 - Input - Data object
1925 \param axis_offset - Input - axis offset
1926 \param transpose - Input - 0: transpose neither, 1: transpose arg0, 2: transpose arg1
1927 */
1928 ESCRIPT_DLL_API
1929 Data
1930 C_GeneralTensorProduct(Data& arg0,
1931 Data& arg1,
1932 int axis_offset=0,
1933 int transpose=0);
1934
1935 /**
1936 \brief
1937 Perform the given binary operation with this and right as operands.
1938 Right is a Data object.
1939 */
1940 template <class BinaryFunction>
1941 inline
1942 void
1943 Data::binaryOp(const Data& right,
1944 BinaryFunction operation)
1945 {
1946 //
1947 // if this has a rank of zero promote it to the rank of the RHS
1948 if (getDataPointRank()==0 && right.getDataPointRank()!=0) {
1949 throw DataException("Error - attempt to update rank zero object with object with rank bigger than zero.");
1950 }
1951
1952 if (isLazy() || right.isLazy())
1953 {
1954 throw DataException("Programmer error - attempt to call binaryOp with Lazy Data.");
1955 }
1956 //
1957 // initially make the temporary a shallow copy
1958 Data tempRight(right);
1959
1960 if (getFunctionSpace()!=right.getFunctionSpace()) {
1961 if (right.probeInterpolation(getFunctionSpace())) {
1962 //
1963 // an interpolation is required so create a new Data
1964 tempRight=Data(right,this->getFunctionSpace());
1965 } else if (probeInterpolation(right.getFunctionSpace())) {
1966 //
1967 // interpolate onto the RHS function space
1968 Data tempLeft(*this,right.getFunctionSpace());
1969 // m_data=tempLeft.m_data;
1970 set_m_data(tempLeft.m_data);
1971 }
1972 }
1973 operandCheck(tempRight);
1974 //
1975 // ensure this has the right type for the RHS
1976 typeMatchRight(tempRight);
1977 //
1978 // Need to cast to the concrete types so that the correct binaryOp
1979 // is called.
1980 if (isExpanded()) {
1981 //
1982 // Expanded data will be done in parallel, the right hand side can be
1983 // of any data type
1984 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
1985 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
1986 escript::binaryOp(*leftC,*(tempRight.getReady()),operation);
1987 } else if (isTagged()) {
1988 //
1989 // Tagged data is operated on serially, the right hand side can be
1990 // either DataConstant or DataTagged
1991 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
1992 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
1993 if (right.isTagged()) {
1994 DataTagged* rightC=dynamic_cast<DataTagged*>(tempRight.m_data.get());
1995 EsysAssert((rightC!=0), "Programming error - casting to DataTagged.");
1996 escript::binaryOp(*leftC,*rightC,operation);
1997 } else {
1998 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
1999 EsysAssert((rightC!=0), "Programming error - casting to DataConstant.");
2000 escript::binaryOp(*leftC,*rightC,operation);
2001 }
2002 } else if (isConstant()) {
2003 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2004 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2005 EsysAssert((leftC!=0 && rightC!=0), "Programming error - casting to DataConstant.");
2006 escript::binaryOp(*leftC,*rightC,operation);
2007 }
2008 }
2009
2010 /**
2011 \brief
2012 Perform the given Data object reduction algorithm on this and return the result.
2013 Given operation combines each element of each data point, thus argument
2014 object (*this) is a rank n Data object, and returned object is a scalar.
2015 Calls escript::algorithm.
2016 */
2017 template <class BinaryFunction>
2018 inline
2019 double
2020 Data::algorithm(BinaryFunction operation, double initial_value) const
2021 {
2022 if (isExpanded()) {
2023 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2024 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2025 return escript::algorithm(*leftC,operation,initial_value);
2026 } else if (isTagged()) {
2027 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2028 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2029 return escript::algorithm(*leftC,operation,initial_value);
2030 } else if (isConstant()) {
2031 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2032 EsysAssert((leftC!=0), "Programming error - casting to DataConstant.");
2033 return escript::algorithm(*leftC,operation,initial_value);
2034 } else if (isEmpty()) {
2035 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2036 } else if (isLazy()) {
2037 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2038 } else {
2039 throw DataException("Error - Data encapsulates an unknown type.");
2040 }
2041 }
2042
2043 /**
2044 \brief
2045 Perform the given data point reduction algorithm on data and return the result.
2046 Given operation combines each element within each data point into a scalar,
2047 thus argument object is a rank n Data object, and returned object is a
2048 rank 0 Data object.
2049 Calls escript::dp_algorithm.
2050 */
2051 template <class BinaryFunction>
2052 inline
2053 Data
2054 Data::dp_algorithm(BinaryFunction operation, double initial_value) const
2055 {
2056 if (isEmpty()) {
2057 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2058 }
2059 else if (isExpanded()) {
2060 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2061 DataExpanded* dataE=dynamic_cast<DataExpanded*>(m_data.get());
2062 DataExpanded* resultE=dynamic_cast<DataExpanded*>(result.m_data.get());
2063 EsysAssert((dataE!=0), "Programming error - casting data to DataExpanded.");
2064 EsysAssert((resultE!=0), "Programming error - casting result to DataExpanded.");
2065 escript::dp_algorithm(*dataE,*resultE,operation,initial_value);
2066 return result;
2067 }
2068 else if (isTagged()) {
2069 DataTagged* dataT=dynamic_cast<DataTagged*>(m_data.get());
2070 EsysAssert((dataT!=0), "Programming error - casting data to DataTagged.");
2071 DataTypes::ValueType defval(1);
2072 defval[0]=0;
2073 DataTagged* resultT=new DataTagged(getFunctionSpace(), DataTypes::scalarShape, defval, dataT);
2074 escript::dp_algorithm(*dataT,*resultT,operation,initial_value);
2075 return Data(resultT); // note: the Data object now owns the resultT pointer
2076 }
2077 else if (isConstant()) {
2078 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2079 DataConstant* dataC=dynamic_cast<DataConstant*>(m_data.get());
2080 DataConstant* resultC=dynamic_cast<DataConstant*>(result.m_data.get());
2081 EsysAssert((dataC!=0), "Programming error - casting data to DataConstant.");
2082 EsysAssert((resultC!=0), "Programming error - casting result to DataConstant.");
2083 escript::dp_algorithm(*dataC,*resultC,operation,initial_value);
2084 return result;
2085 } else if (isLazy()) {
2086 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2087 } else {
2088 throw DataException("Error - Data encapsulates an unknown type.");
2089 }
2090 }
2091
2092 /**
2093 \brief
2094 Compute a tensor operation with two Data objects
2095 \param arg0 - Input - Data object
2096 \param arg1 - Input - Data object
2097 \param operation - Input - Binary op functor
2098 */
2099 template <typename BinaryFunction>
2100 inline
2101 Data
2102 C_TensorBinaryOperation(Data const &arg_0,
2103 Data const &arg_1,
2104 BinaryFunction operation)
2105 {
2106 if (arg_0.isEmpty() || arg_1.isEmpty())
2107 {
2108 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2109 }
2110 if (arg_0.isLazy() || arg_1.isLazy())
2111 {
2112 throw DataException("Error - Operations not permitted on lazy data.");
2113 }
2114 // Interpolate if necessary and find an appropriate function space
2115 Data arg_0_Z, arg_1_Z;
2116 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2117 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2118 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2119 arg_1_Z = Data(arg_1);
2120 }
2121 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2122 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2123 arg_0_Z =Data(arg_0);
2124 }
2125 else {
2126 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible function spaces.");
2127 }
2128 } else {
2129 arg_0_Z = Data(arg_0);
2130 arg_1_Z = Data(arg_1);
2131 }
2132 // Get rank and shape of inputs
2133 int rank0 = arg_0_Z.getDataPointRank();
2134 int rank1 = arg_1_Z.getDataPointRank();
2135 DataTypes::ShapeType shape0 = arg_0_Z.getDataPointShape();
2136 DataTypes::ShapeType shape1 = arg_1_Z.getDataPointShape();
2137 int size0 = arg_0_Z.getDataPointSize();
2138 int size1 = arg_1_Z.getDataPointSize();
2139 // Declare output Data object
2140 Data res;
2141
2142 if (shape0 == shape1) {
2143 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2144 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2145 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2146 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2147 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2148
2149 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2150 }
2151 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2152
2153 // Prepare the DataConstant input
2154 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2155
2156 // Borrow DataTagged input from Data object
2157 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2158
2159 // Prepare a DataTagged output 2
2160 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2161 res.tag();
2162 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2163
2164 // Prepare offset into DataConstant
2165 int offset_0 = tmp_0->getPointOffset(0,0);
2166 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2167
2168 // Get the pointers to the actual data
2169 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2170 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2171
2172 // Compute a result for the default
2173 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2174 // Compute a result for each tag
2175 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2176 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2177 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2178 tmp_2->addTag(i->first);
2179 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2180 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2181
2182 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2183 }
2184
2185 }
2186 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2187 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2188 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2189 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2190 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2191
2192 int sampleNo_1,dataPointNo_1;
2193 int numSamples_1 = arg_1_Z.getNumSamples();
2194 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2195 int offset_0 = tmp_0->getPointOffset(0,0);
2196 res.requireWrite();
2197 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2198 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2199 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2200 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2201 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2202 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2203 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2204 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2205 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2206 }
2207 }
2208
2209 }
2210 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2211 // Borrow DataTagged input from Data object
2212 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2213
2214 // Prepare the DataConstant input
2215 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2216
2217 // Prepare a DataTagged output 2
2218 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2219 res.tag();
2220 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2221
2222 // Prepare offset into DataConstant
2223 int offset_1 = tmp_1->getPointOffset(0,0);
2224
2225 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2226 // Get the pointers to the actual data
2227 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2228 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2229 // Compute a result for the default
2230 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2231 // Compute a result for each tag
2232 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2233 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2234 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2235 tmp_2->addTag(i->first);
2236 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2237 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2238 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2239 }
2240
2241 }
2242 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2243 // Borrow DataTagged input from Data object
2244 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2245
2246 // Borrow DataTagged input from Data object
2247 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2248
2249 // Prepare a DataTagged output 2
2250 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2251 res.tag(); // DataTagged output
2252 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2253
2254 // Get the pointers to the actual data
2255 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2256 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2257 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2258
2259 // Compute a result for the default
2260 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2261 // Merge the tags
2262 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2263 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2264 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2265 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2266 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2267 }
2268 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2269 tmp_2->addTag(i->first);
2270 }
2271 // Compute a result for each tag
2272 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2273 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2274
2275 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2276 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2277 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2278
2279 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2280 }
2281
2282 }
2283 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2284 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2285 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2286 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2287 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2288 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2289
2290 int sampleNo_0,dataPointNo_0;
2291 int numSamples_0 = arg_0_Z.getNumSamples();
2292 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2293 res.requireWrite();
2294 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2295 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2296 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2297 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2298 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2299 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2300 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2301 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2302 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2303 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2304 }
2305 }
2306
2307 }
2308 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2309 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2310 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2311 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2312 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2313
2314 int sampleNo_0,dataPointNo_0;
2315 int numSamples_0 = arg_0_Z.getNumSamples();
2316 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2317 int offset_1 = tmp_1->getPointOffset(0,0);
2318 res.requireWrite();
2319 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2320 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2321 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2322 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2323 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2324
2325 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2326 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2327 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2328
2329
2330 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2331 }
2332 }
2333
2334 }
2335 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2336 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2337 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2338 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2339 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2340 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2341
2342 int sampleNo_0,dataPointNo_0;
2343 int numSamples_0 = arg_0_Z.getNumSamples();
2344 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2345 res.requireWrite();
2346 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2347 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2348 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2349 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2350 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2351 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2352 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2353 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2354 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2355 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2356 }
2357 }
2358
2359 }
2360 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2361 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2362 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2363 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2364 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2365 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2366
2367 int sampleNo_0,dataPointNo_0;
2368 int numSamples_0 = arg_0_Z.getNumSamples();
2369 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2370 res.requireWrite();
2371 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2372 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2373 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2374 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2375 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2376 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2377 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2378 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2379 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2380 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2381 }
2382 }
2383
2384 }
2385 else {
2386 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2387 }
2388
2389 } else if (0 == rank0) {
2390 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2391 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataConstant output
2392 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2393 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2394 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2395 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2396 }
2397 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2398
2399 // Prepare the DataConstant input
2400 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2401
2402 // Borrow DataTagged input from Data object
2403 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2404
2405 // Prepare a DataTagged output 2
2406 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataTagged output
2407 res.tag();
2408 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2409
2410 // Prepare offset into DataConstant
2411 int offset_0 = tmp_0->getPointOffset(0,0);
2412 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2413
2414 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2415 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2416
2417 // Compute a result for the default
2418 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2419 // Compute a result for each tag
2420 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2421 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2422 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2423 tmp_2->addTag(i->first);
2424 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2425 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2426 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2427 }
2428
2429 }
2430 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2431
2432 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2433 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2434 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2435 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2436
2437 int sampleNo_1,dataPointNo_1;
2438 int numSamples_1 = arg_1_Z.getNumSamples();
2439 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2440 int offset_0 = tmp_0->getPointOffset(0,0);
2441 res.requireWrite();
2442 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2443 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2444 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2445 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2446 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2447 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2448 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2449 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2450 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2451
2452 }
2453 }
2454
2455 }
2456 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2457
2458 // Borrow DataTagged input from Data object
2459 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2460
2461 // Prepare the DataConstant input
2462 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2463
2464 // Prepare a DataTagged output 2
2465 res = Data(0.0, shape1, arg_0_Z.getFunctionSpace()); // DataTagged output
2466 res.tag();
2467 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2468
2469 // Prepare offset into DataConstant
2470 int offset_1 = tmp_1->getPointOffset(0,0);
2471 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2472
2473 // Get the pointers to the actual data
2474 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2475 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2476
2477
2478 // Compute a result for the default
2479 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2480 // Compute a result for each tag
2481 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2482 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2483 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2484 tmp_2->addTag(i->first);
2485 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2486 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2487
2488 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2489 }
2490
2491 }
2492 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2493
2494 // Borrow DataTagged input from Data object
2495 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2496
2497 // Borrow DataTagged input from Data object
2498 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2499
2500 // Prepare a DataTagged output 2
2501 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());
2502 res.tag(); // DataTagged output
2503 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2504
2505 // Get the pointers to the actual data
2506 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2507 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2508 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2509
2510 // Compute a result for the default
2511 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2512 // Merge the tags
2513 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2514 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2515 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2516 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2517 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2518 }
2519 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2520 tmp_2->addTag(i->first);
2521 }
2522 // Compute a result for each tag
2523 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2524 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2525
2526 /* DataArrayView view_0 = tmp_0->getDataPointByTag(i->first);
2527 DataArrayView view_1 = tmp_1->getDataPointByTag(i->first);
2528 DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2529 double *ptr_0 = &view_0.getData(0);
2530 double *ptr_1 = &view_1.getData(0);
2531 double *ptr_2 = &view_2.getData(0);*/
2532
2533 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2534 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2535 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2536
2537 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2538 }
2539
2540 }
2541 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2542
2543 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2544 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2545 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2546 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2547 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2548
2549 int sampleNo_0,dataPointNo_0;
2550 int numSamples_0 = arg_0_Z.getNumSamples();
2551 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2552 res.requireWrite();
2553 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2554 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2555 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2556 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2557 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2558 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2559 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2560 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2561 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2562 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2563 }
2564 }
2565
2566 }
2567 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2568 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2569 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2570 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2571 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2572
2573 int sampleNo_0,dataPointNo_0;
2574 int numSamples_0 = arg_0_Z.getNumSamples();
2575 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2576 int offset_1 = tmp_1->getPointOffset(0,0);
2577 res.requireWrite();
2578 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2579 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2580 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2581 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2582 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2583 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2584 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2585 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2586 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2587 }
2588 }
2589
2590
2591 }
2592 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2593
2594 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2595 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2596 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2597 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2598 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2599
2600 int sampleNo_0,dataPointNo_0;
2601 int numSamples_0 = arg_0_Z.getNumSamples();
2602 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2603 res.requireWrite();
2604 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2605 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2606 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2607 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2608 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2609 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2610 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2611 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2612 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2613 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2614 }
2615 }
2616
2617 }
2618 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2619
2620 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2621 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2622 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2623 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2624 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2625
2626 int sampleNo_0,dataPointNo_0;
2627 int numSamples_0 = arg_0_Z.getNumSamples();
2628 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2629 res.requireWrite();
2630 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2631 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2632 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2633 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2634 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2635 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2636 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2637 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2638 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2639 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2640 }
2641 }
2642
2643 }
2644 else {
2645 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2646 }
2647
2648 } else if (0 == rank1) {
2649 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2650 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2651 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2652 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2653 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2654 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2655 }
2656 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2657
2658 // Prepare the DataConstant input
2659 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2660
2661 // Borrow DataTagged input from Data object
2662 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2663
2664 // Prepare a DataTagged output 2
2665 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2666 res.tag();
2667 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2668
2669 // Prepare offset into DataConstant
2670 int offset_0 = tmp_0->getPointOffset(0,0);
2671 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2672
2673 //Get the pointers to the actual data
2674 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2675 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2676
2677 // Compute a result for the default
2678 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2679 // Compute a result for each tag
2680 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2681 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2682 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2683 tmp_2->addTag(i->first);
2684 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2685 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2686 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2687 }
2688 }
2689 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2690
2691 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2692 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2693 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2694 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2695
2696 int sampleNo_1,dataPointNo_1;
2697 int numSamples_1 = arg_1_Z.getNumSamples();
2698 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2699 int offset_0 = tmp_0->getPointOffset(0,0);
2700 res.requireWrite();
2701 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2702 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2703 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2704 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2705 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2706 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2707 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2708 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2709 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2710 }
2711 }
2712
2713 }
2714 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2715
2716 // Borrow DataTagged input from Data object
2717 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2718
2719 // Prepare the DataConstant input
2720 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2721
2722 // Prepare a DataTagged output 2
2723 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2724 res.tag();
2725 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2726
2727 // Prepare offset into DataConstant
2728 int offset_1 = tmp_1->getPointOffset(0,0);
2729 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2730 // Get the pointers to the actual data
2731 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2732 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2733 // Compute a result for the default
2734 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2735 // Compute a result for each tag
2736 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2737 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2738 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2739 tmp_2->addTag(i->first);
2740 /* DataArrayView view_0 = tmp_0->getDataPointByTag(i->first);
2741 DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2742 double *ptr_0 = &view_0.getData(0);
2743 double *ptr_2 = &view_2.getData(0);*/
2744 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2745 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2746 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2747 }
2748
2749 }
2750 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2751
2752 // Borrow DataTagged input from Data object
2753 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2754
2755 // Borrow DataTagged input from Data object
2756 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2757
2758 // Prepare a DataTagged output 2
2759 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2760 res.tag(); // DataTagged output
2761 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2762
2763 // Get the pointers to the actual data
2764 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2765 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2766 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2767
2768 // Compute a result for the default
2769 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2770 // Merge the tags
2771 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2772 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2773 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2774 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2775 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2776 }
2777 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2778 tmp_2->addTag(i->first);
2779 }
2780 // Compute a result for each tag
2781 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2782 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2783 // DataArrayView view_0 = tmp_0->getDataPointByTag(i->first);
2784 // DataArrayView view_1 = tmp_1->getDataPointByTag(i->first);
2785 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2786 // double *ptr_0 = &view_0.getData(0);
2787 // double *ptr_1 = &view_1.getData(0);
2788 // double *ptr_2 = &view_2.getData(0);
2789
2790 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2791 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2792 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2793 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2794 }
2795
2796 }
2797 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2798
2799 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2800 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2801 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2802 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2803 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2804
2805 int sampleNo_0,dataPointNo_0;
2806 int numSamples_0 = arg_0_Z.getNumSamples();
2807 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2808 res.requireWrite();
2809 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2810 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2811 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2812 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2813 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2814 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2815 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2816 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2817 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2818 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2819 }
2820 }
2821
2822 }
2823 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2824 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2825 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2826 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2827 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2828
2829 int sampleNo_0,dataPointNo_0;
2830 int numSamples_0 = arg_0_Z.getNumSamples();
2831 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2832 int offset_1 = tmp_1->getPointOffset(0,0);
2833 res.requireWrite();
2834 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2835 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2836 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2837 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2838 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2839 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2840 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2841 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2842 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2843 }
2844 }
2845
2846
2847 }
2848 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2849
2850 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2851 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2852 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2853 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2854 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2855
2856 int sampleNo_0,dataPointNo_0;
2857 int numSamples_0 = arg_0_Z.getNumSamples();
2858 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2859 res.requireWrite();
2860 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2861 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2862 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2863 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2864 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2865 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2866 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2867 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2868 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2869 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2870 }
2871 }
2872
2873 }
2874 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2875
2876 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2877 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2878 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2879 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2880 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2881
2882 int sampleNo_0,dataPointNo_0;
2883 int numSamples_0 = arg_0_Z.getNumSamples();
2884 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2885 res.requireWrite();
2886 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2887 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2888 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2889 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2890 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2891 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2892 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2893 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2894 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2895 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2896 }
2897 }
2898
2899 }
2900 else {
2901 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2902 }
2903
2904 } else {
2905 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible shapes");
2906 }
2907
2908 return res;
2909 }
2910
2911 template <typename UnaryFunction>
2912 Data
2913 C_TensorUnaryOperation(Data const &arg_0,
2914 UnaryFunction operation)
2915 {
2916 if (arg_0.isEmpty()) // do this before we attempt to interpolate
2917 {
2918 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2919 }
2920 if (arg_0.isLazy())
2921 {
2922 throw DataException("Error - Operations not permitted on lazy data.");
2923 }
2924 // Interpolate if necessary and find an appropriate function space
2925 Data arg_0_Z = Data(arg_0);
2926
2927 // Get rank and shape of inputs
2928 // int rank0 = arg_0_Z.getDataPointRank();
2929 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
2930 int size0 = arg_0_Z.getDataPointSize();
2931
2932 // Declare output Data object
2933 Data res;
2934
2935 if (arg_0_Z.isConstant()) {
2936 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataConstant output
2937 // double *ptr_0 = &((arg_0_Z.getPointDataView().getData())[0]);
2938 // double *ptr_2 = &((res.getPointDataView().getData())[0]);
2939 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2940 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2941 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2942 }
2943 else if (arg_0_Z.isTagged()) {
2944
2945 // Borrow DataTagged input from Data object
2946 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2947
2948 // Prepare a DataTagged output 2
2949 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2950 res.tag();
2951 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2952
2953 // Get the pointers to the actual data
2954 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2955 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2956 // Compute a result for the default
2957 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2958 // Compute a result for each tag
2959 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2960 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2961 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2962 tmp_2->addTag(i->first);
2963 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2964 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2965 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2966 }
2967
2968 }
2969 else if (arg_0_Z.isExpanded()) {
2970
2971 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace(),true); // DataExpanded output
2972 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2973 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2974
2975 int sampleNo_0,dataPointNo_0;
2976 int numSamples_0 = arg_0_Z.getNumSamples();
2977 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2978 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2979 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2980 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2981 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2982 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2983 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2984 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2985 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2986 }
2987 }
2988 }
2989 else {
2990 throw DataException("Error - C_TensorUnaryOperation: unknown combination of inputs");
2991 }
2992
2993 return res;
2994 }
2995
2996 }
2997 #endif

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26