/[escript]/branches/arrexp_2137_win_merge/escript/src/Data.h
ViewVC logotype

Contents of /branches/arrexp_2137_win_merge/escript/src/Data.h

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2225 - (show annotations)
Wed Jan 21 05:30:12 2009 UTC (10 years, 6 months ago) by jfenwick
File MIME type: text/plain
File size: 91457 byte(s)
And again.

1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2008 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 /** \file Data.h */
16
17 #ifndef DATA_H
18 #define DATA_H
19 #include "system_dep.h"
20
21 #include "DataTypes.h"
22 #include "DataAbstract.h"
23 #include "DataAlgorithm.h"
24 #include "FunctionSpace.h"
25 #include "BinaryOp.h"
26 #include "UnaryOp.h"
27 #include "DataException.h"
28
29
30 extern "C" {
31 #include "DataC.h"
32 #include <omp.h>
33 }
34
35 #include "esysmpi.h"
36 #include <string>
37 #include <algorithm>
38 #include <sstream>
39
40 #include <boost/shared_ptr.hpp>
41 #include <boost/python/object.hpp>
42 #include <boost/python/tuple.hpp>
43 #include <boost/python/numeric.hpp>
44
45 namespace escript {
46
47 //
48 // Forward declaration for various implementations of Data.
49 class DataConstant;
50 class DataTagged;
51 class DataExpanded;
52 class DataLazy;
53
54 /**
55 \brief
56 Data represents a collection of datapoints.
57
58 Description:
59 Internally, the datapoints are actually stored by a DataAbstract object.
60 The specific instance of DataAbstract used may vary over the lifetime
61 of the Data object.
62 Some methods on this class return references (eg getShape()).
63 These references should not be used after an operation which changes the underlying DataAbstract object.
64 Doing so will lead to invalid memory access.
65 This should not affect any methods exposed via boost::python.
66 */
67 class Data {
68
69 public:
70
71 // These typedefs allow function names to be cast to pointers
72 // to functions of the appropriate type when calling unaryOp etc.
73 typedef double (*UnaryDFunPtr)(double);
74 typedef double (*BinaryDFunPtr)(double,double);
75
76
77 /**
78 Constructors.
79 */
80
81 /**
82 \brief
83 Default constructor.
84 Creates a DataEmpty object.
85 */
86 ESCRIPT_DLL_API
87 Data();
88
89 /**
90 \brief
91 Copy constructor.
92 WARNING: Only performs a shallow copy.
93 */
94 ESCRIPT_DLL_API
95 Data(const Data& inData);
96
97 /**
98 \brief
99 Constructor from another Data object. If "what" is different from the
100 function space of inData the inData are tried to be interpolated to what,
101 otherwise a shallow copy of inData is returned.
102 */
103 ESCRIPT_DLL_API
104 Data(const Data& inData,
105 const FunctionSpace& what);
106
107 /**
108 \brief Copy Data from an existing vector
109 */
110
111 ESCRIPT_DLL_API
112 Data(const DataTypes::ValueType& value,
113 const DataTypes::ShapeType& shape,
114 const FunctionSpace& what=FunctionSpace(),
115 bool expanded=false);
116
117 /**
118 \brief
119 Constructor which creates a Data from a DataArrayView shape.
120
121 \param value - Input - Single value applied to all Data.
122 \param dataPointShape - Input - The shape of each data point.
123 \param what - Input - A description of what this data represents.
124 \param expanded - Input - Flag, if true fill the entire container with
125 the given value. Otherwise a more efficient storage
126 mechanism will be used.
127 */
128 ESCRIPT_DLL_API
129 Data(double value,
130 const DataTypes::ShapeType& dataPointShape=DataTypes::ShapeType(),
131 const FunctionSpace& what=FunctionSpace(),
132 bool expanded=false);
133
134 /**
135 \brief
136 Constructor which performs a deep copy of a region from another Data object.
137
138 \param inData - Input - Input Data object.
139 \param region - Input - Region to copy.
140 */
141 ESCRIPT_DLL_API
142 Data(const Data& inData,
143 const DataTypes::RegionType& region);
144
145 /**
146 \brief
147 Constructor which copies data from any object that can be converted into
148 a python numarray.
149
150 \param value - Input - Input data.
151 \param what - Input - A description of what this data represents.
152 \param expanded - Input - Flag, if true fill the entire container with
153 the value. Otherwise a more efficient storage
154 mechanism will be used.
155 */
156 ESCRIPT_DLL_API
157 Data(const boost::python::object& value,
158 const FunctionSpace& what=FunctionSpace(),
159 bool expanded=false);
160
161 /**
162 \brief
163 Constructor which creates a DataConstant.
164 Copies data from any object that can be converted
165 into a numarray. All other parameters are copied from other.
166
167 \param value - Input - Input data.
168 \param other - Input - contains all other parameters.
169 */
170 ESCRIPT_DLL_API
171 Data(const boost::python::object& value,
172 const Data& other);
173
174 /**
175 \brief
176 Constructor which creates a DataConstant of "shape" with constant value.
177 */
178 ESCRIPT_DLL_API
179 Data(double value,
180 const boost::python::tuple& shape=boost::python::make_tuple(),
181 const FunctionSpace& what=FunctionSpace(),
182 bool expanded=false);
183
184
185
186 /**
187 \brief Create a Data using an existing DataAbstract. Warning: The new object assumes ownership of the pointer!
188 Once you have passed the pointer, do not delete it.
189 */
190 ESCRIPT_DLL_API
191 explicit Data(DataAbstract* underlyingdata);
192
193 /**
194 \brief Create a Data based on the supplied DataAbstract
195 */
196 ESCRIPT_DLL_API
197 explicit Data(DataAbstract_ptr underlyingdata);
198
199 /**
200 \brief
201 Destructor
202 */
203 ESCRIPT_DLL_API
204 ~Data();
205
206 /**
207 \brief Make this object a deep copy of "other".
208 */
209 ESCRIPT_DLL_API
210 void
211 copy(const Data& other);
212
213 /**
214 \brief Return a pointer to a deep copy of this object.
215 */
216 ESCRIPT_DLL_API
217 Data
218 copySelf();
219
220
221 /**
222 \brief produce a delayed evaluation version of this Data.
223 */
224 ESCRIPT_DLL_API
225 Data
226 delay();
227
228 /**
229 \brief convert the current data into lazy data.
230 */
231 ESCRIPT_DLL_API
232 void
233 delaySelf();
234
235
236 /**
237 Member access methods.
238 */
239
240 /**
241 \brief
242 switches on update protection
243
244 */
245 ESCRIPT_DLL_API
246 void
247 setProtection();
248
249 /**
250 \brief
251 Returns trueif the data object is protected against update
252
253 */
254 ESCRIPT_DLL_API
255 bool
256 isProtected() const;
257
258 /**
259 \brief
260 Return the values of a data point on this process
261 */
262 ESCRIPT_DLL_API
263 const boost::python::numeric :: array
264 getValueOfDataPoint(int dataPointNo);
265
266 ESCRIPT_DLL_API
267 const boost::python::object
268 getValueOfDataPointAsTuple(int dataPointNo);
269
270 /**
271 \brief
272 sets the values of a data-point from a python object on this process
273 */
274 ESCRIPT_DLL_API
275 void
276 setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object);
277
278 /**
279 \brief
280 sets the values of a data-point from a numarray object on this process
281 */
282 ESCRIPT_DLL_API
283 void
284 setValueOfDataPointToArray(int dataPointNo, const boost::python::object&);
285
286 /**
287 \brief
288 sets the values of a data-point on this process
289 */
290 ESCRIPT_DLL_API
291 void
292 setValueOfDataPoint(int dataPointNo, const double);
293
294 /**
295 \brief
296 Return the value of the specified data-point across all processors
297 */
298 ESCRIPT_DLL_API
299 const boost::python::numeric::array
300 getValueOfGlobalDataPoint(int procNo, int dataPointNo);
301
302 ESCRIPT_DLL_API
303 const boost::python::object
304 getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo);
305
306 /**
307 \brief
308 Return the tag number associated with the given data-point.
309
310 */
311 ESCRIPT_DLL_API
312 int
313 getTagNumber(int dpno);
314
315 /**
316 \brief
317 Return the C wrapper for the Data object.
318 */
319 ESCRIPT_DLL_API
320 escriptDataC
321 getDataC();
322
323
324
325 /**
326 \brief
327 Return the C wrapper for the Data object - const version.
328 */
329 ESCRIPT_DLL_API
330 escriptDataC
331 getDataC() const;
332
333 /**
334 \brief How much space is required to evaulate a sample of the Data.
335 */
336 ESCRIPT_DLL_API
337 size_t
338 getSampleBufferSize() const;
339
340
341
342 /**
343 \brief
344 Write the data as a string. For large amounts of data, a summary is printed.
345 */
346 ESCRIPT_DLL_API
347 std::string
348 toString() const;
349
350 /**
351 \brief
352 Whatever the current Data type make this into a DataExpanded.
353 */
354 ESCRIPT_DLL_API
355 void
356 expand();
357
358 /**
359 \brief
360 If possible convert this Data to DataTagged. This will only allow
361 Constant data to be converted to tagged. An attempt to convert
362 Expanded data to tagged will throw an exception.
363 */
364 ESCRIPT_DLL_API
365 void
366 tag();
367
368 /**
369 \brief If this data is lazy, then convert it to ready data.
370 What type of ready data depends on the expression. For example, Constant+Tagged==Tagged.
371 */
372 ESCRIPT_DLL_API
373 void
374 resolve();
375
376
377 /**
378 \brief Ensures data is ready for write access.
379 This means that the data will be resolved if lazy and will be copied if shared with another Data object.
380 \warning This method should only be called in single threaded sections of code. (It modifies m_data).
381 Do not create any Data objects from this one between calling requireWrite and getSampleDataRW.
382 Doing so might introduce additional sharing.
383 */
384 ESCRIPT_DLL_API
385 void
386 requireWrite();
387
388 /**
389 \brief
390 Return true if this Data is expanded.
391 \note To determine if a sample will contain separate values for each datapoint. Use actsExpanded instead.
392 */
393 ESCRIPT_DLL_API
394 bool
395 isExpanded() const;
396
397 /**
398 \brief
399 Return true if this Data is expanded or resolves to expanded.
400 That is, if it has a separate value for each datapoint in the sample.
401 */
402 ESCRIPT_DLL_API
403 bool
404 actsExpanded() const;
405
406
407 /**
408 \brief
409 Return true if this Data is tagged.
410 */
411 ESCRIPT_DLL_API
412 bool
413 isTagged() const;
414
415 /**
416 \brief
417 Return true if this Data is constant.
418 */
419 ESCRIPT_DLL_API
420 bool
421 isConstant() const;
422
423 /**
424 \brief Return true if this Data is lazy.
425 */
426 ESCRIPT_DLL_API
427 bool
428 isLazy() const;
429
430 /**
431 \brief Return true if this data is ready.
432 */
433 ESCRIPT_DLL_API
434 bool
435 isReady() const;
436
437 /**
438 \brief
439 Return true if this Data holds an instance of DataEmpty. This is _not_ the same as asking if the object
440 contains datapoints.
441 */
442 ESCRIPT_DLL_API
443 bool
444 isEmpty() const;
445
446 /**
447 \brief
448 Return the function space.
449 */
450 ESCRIPT_DLL_API
451 inline
452 const FunctionSpace&
453 getFunctionSpace() const
454 {
455 return m_data->getFunctionSpace();
456 }
457
458 /**
459 \brief
460 Return a copy of the function space.
461 */
462 ESCRIPT_DLL_API
463 const FunctionSpace
464 getCopyOfFunctionSpace() const;
465
466 /**
467 \brief
468 Return the domain.
469 */
470 ESCRIPT_DLL_API
471 inline
472 // const AbstractDomain&
473 const_Domain_ptr
474 getDomain() const
475 {
476 return getFunctionSpace().getDomain();
477 }
478
479
480 /**
481 \brief
482 Return the domain.
483 TODO: For internal use only. This should be removed.
484 */
485 ESCRIPT_DLL_API
486 inline
487 // const AbstractDomain&
488 Domain_ptr
489 getDomainPython() const
490 {
491 return getFunctionSpace().getDomainPython();
492 }
493
494 /**
495 \brief
496 Return a copy of the domain.
497 */
498 ESCRIPT_DLL_API
499 const AbstractDomain
500 getCopyOfDomain() const;
501
502 /**
503 \brief
504 Return the rank of the point data.
505 */
506 ESCRIPT_DLL_API
507 inline
508 unsigned int
509 getDataPointRank() const
510 {
511 return m_data->getRank();
512 }
513
514 /**
515 \brief
516 Return the number of data points
517 */
518 ESCRIPT_DLL_API
519 inline
520 int
521 getNumDataPoints() const
522 {
523 return getNumSamples() * getNumDataPointsPerSample();
524 }
525 /**
526 \brief
527 Return the number of samples.
528 */
529 ESCRIPT_DLL_API
530 inline
531 int
532 getNumSamples() const
533 {
534 return m_data->getNumSamples();
535 }
536
537 /**
538 \brief
539 Return the number of data points per sample.
540 */
541 ESCRIPT_DLL_API
542 inline
543 int
544 getNumDataPointsPerSample() const
545 {
546 return m_data->getNumDPPSample();
547 }
548
549
550 /**
551 \brief
552 Return the number of values in the shape for this object.
553 */
554 ESCRIPT_DLL_API
555 int
556 getNoValues() const
557 {
558 return m_data->getNoValues();
559 }
560
561
562 /**
563 \brief
564 dumps the object into a netCDF file
565 */
566 ESCRIPT_DLL_API
567 void
568 dump(const std::string fileName) const;
569
570 /**
571 \brief
572 Return the sample data for the given sample no. This is not the
573 preferred interface but is provided for use by C code.
574 The buffer parameter is only required for LazyData.
575 \param sampleNo - Input - the given sample no.
576 \param buffer - Vector to compute (and store) sample data in.
577 \return pointer to the sample data.
578 */
579 ESCRIPT_DLL_API
580 inline
581 const DataAbstract::ValueType::value_type*
582 getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, DataTypes::ValueType* buffer=0);
583
584 /**
585 \brief
586 Return the sample data for the given sample no. This is not the
587 preferred interface but is provided for use by C code.
588 \param sampleNo - Input - the given sample no.
589 \return pointer to the sample data.
590 */
591 ESCRIPT_DLL_API
592 inline
593 DataAbstract::ValueType::value_type*
594 getSampleDataRW(DataAbstract::ValueType::size_type sampleNo);
595
596
597 /**
598 \brief
599 Return the sample data for the given tag. If an attempt is made to
600 access data that isn't tagged an exception will be thrown.
601 \param tag - Input - the tag key.
602 */
603 ESCRIPT_DLL_API
604 inline
605 DataAbstract::ValueType::value_type*
606 getSampleDataByTag(int tag)
607 {
608 return m_data->getSampleDataByTag(tag);
609 }
610
611 /**
612 \brief
613 Return a view into the data for the data point specified.
614 NOTE: Construction of the DataArrayView is a relatively expensive
615 operation.
616 \param sampleNo - Input -
617 \param dataPointNo - Input -
618 */
619 ESCRIPT_DLL_API
620 DataTypes::ValueType::const_reference
621 getDataPointRO(int sampleNo, int dataPointNo);
622
623
624 ESCRIPT_DLL_API
625 DataTypes::ValueType::reference
626 getDataPointRW(int sampleNo, int dataPointNo);
627
628
629
630 /**
631 \brief
632 Return the offset for the given sample and point within the sample
633 */
634 ESCRIPT_DLL_API
635 inline
636 DataTypes::ValueType::size_type
637 getDataOffset(int sampleNo,
638 int dataPointNo)
639 {
640 return m_data->getPointOffset(sampleNo,dataPointNo);
641 }
642
643 /**
644 \brief
645 Return a reference to the data point shape.
646 */
647 ESCRIPT_DLL_API
648 inline
649 const DataTypes::ShapeType&
650 getDataPointShape() const
651 {
652 return m_data->getShape();
653 }
654
655 /**
656 \brief
657 Return the data point shape as a tuple of integers.
658 */
659 ESCRIPT_DLL_API
660 const boost::python::tuple
661 getShapeTuple() const;
662
663 /**
664 \brief
665 Return the size of the data point. It is the product of the
666 data point shape dimensions.
667 */
668 ESCRIPT_DLL_API
669 int
670 getDataPointSize() const;
671
672 /**
673 \brief
674 Return the number of doubles stored for this Data.
675 */
676 ESCRIPT_DLL_API
677 DataTypes::ValueType::size_type
678 getLength() const;
679
680
681
682 /**
683 \brief
684 Assign the given value to the tag assocciated with name. Implicitly converts this
685 object to type DataTagged. Throws an exception if this object
686 cannot be converted to a DataTagged object or name cannot be mapped onto a tag key.
687 \param tagKey - Input - Integer key.
688 \param value - Input - Value to associate with given key.
689 ==>*
690 */
691 ESCRIPT_DLL_API
692 void
693 setTaggedValueByName(std::string name,
694 const boost::python::object& value);
695
696 /**
697 \brief
698 Assign the given value to the tag. Implicitly converts this
699 object to type DataTagged if it is constant.
700
701 \param tagKey - Input - Integer key.
702 \param value - Input - Value to associate with given key.
703 ==>*
704 */
705 ESCRIPT_DLL_API
706 void
707 setTaggedValue(int tagKey,
708 const boost::python::object& value);
709
710 /**
711 \brief
712 Assign the given value to the tag. Implicitly converts this
713 object to type DataTagged if it is constant.
714
715 \param tagKey - Input - Integer key.
716 \param pointshape - Input - The shape of the value parameter
717 \param value - Input - Value to associate with given key.
718 \param dataOffset - Input - Offset of the begining of the point within the value parameter
719 */
720 ESCRIPT_DLL_API
721 void
722 setTaggedValueFromCPP(int tagKey,
723 const DataTypes::ShapeType& pointshape,
724 const DataTypes::ValueType& value,
725 int dataOffset=0);
726
727
728
729 /**
730 \brief
731 Copy other Data object into this Data object where mask is positive.
732 */
733 ESCRIPT_DLL_API
734 void
735 copyWithMask(const Data& other,
736 const Data& mask);
737
738 /**
739 Data object operation methods and operators.
740 */
741
742 /**
743 \brief
744 set all values to zero
745 *
746 */
747 ESCRIPT_DLL_API
748 void
749 setToZero();
750
751 /**
752 \brief
753 Interpolates this onto the given functionspace and returns
754 the result as a Data object.
755 *
756 */
757 ESCRIPT_DLL_API
758 Data
759 interpolate(const FunctionSpace& functionspace) const;
760 /**
761 \brief
762 Calculates the gradient of the data at the data points of functionspace.
763 If functionspace is not present the function space of Function(getDomain()) is used.
764 *
765 */
766 ESCRIPT_DLL_API
767 Data
768 gradOn(const FunctionSpace& functionspace) const;
769
770 ESCRIPT_DLL_API
771 Data
772 grad() const;
773
774 /**
775 \brief
776 Calculate the integral over the function space domain.
777 *
778 */
779 ESCRIPT_DLL_API
780 boost::python::object
781 integrate_const() const;
782
783 ESCRIPT_DLL_API
784 boost::python::object
785 integrate();
786
787 ESCRIPT_DLL_API
788 boost::python::object
789 integrateToTuple_const() const;
790
791 ESCRIPT_DLL_API
792 boost::python::object
793 integrateToTuple();
794
795
796
797 /**
798 \brief
799 Returns 1./ Data object
800 *
801 */
802 ESCRIPT_DLL_API
803 Data
804 oneOver() const;
805 /**
806 \brief
807 Return a Data with a 1 for +ive values and a 0 for 0 or -ive values.
808 *
809 */
810 ESCRIPT_DLL_API
811 Data
812 wherePositive() const;
813
814 /**
815 \brief
816 Return a Data with a 1 for -ive values and a 0 for +ive or 0 values.
817 *
818 */
819 ESCRIPT_DLL_API
820 Data
821 whereNegative() const;
822
823 /**
824 \brief
825 Return a Data with a 1 for +ive or 0 values and a 0 for -ive values.
826 *
827 */
828 ESCRIPT_DLL_API
829 Data
830 whereNonNegative() const;
831
832 /**
833 \brief
834 Return a Data with a 1 for -ive or 0 values and a 0 for +ive values.
835 *
836 */
837 ESCRIPT_DLL_API
838 Data
839 whereNonPositive() const;
840
841 /**
842 \brief
843 Return a Data with a 1 for 0 values and a 0 for +ive or -ive values.
844 *
845 */
846 ESCRIPT_DLL_API
847 Data
848 whereZero(double tol=0.0) const;
849
850 /**
851 \brief
852 Return a Data with a 0 for 0 values and a 1 for +ive or -ive values.
853 *
854 */
855 ESCRIPT_DLL_API
856 Data
857 whereNonZero(double tol=0.0) const;
858
859 /**
860 \brief
861 Return the maximum absolute value of this Data object.
862
863 The method is not const because lazy data needs to be expanded before Lsup can be computed.
864 The _const form can be used when the Data object is const, however this will only work for
865 Data which is not Lazy.
866
867 For Data which contain no samples (or tagged Data for which no tags in use have a value)
868 zero is returned.
869 */
870 ESCRIPT_DLL_API
871 double
872 Lsup();
873
874 ESCRIPT_DLL_API
875 double
876 Lsup_const() const;
877
878
879 /**
880 \brief
881 Return the maximum value of this Data object.
882
883 The method is not const because lazy data needs to be expanded before sup can be computed.
884 The _const form can be used when the Data object is const, however this will only work for
885 Data which is not Lazy.
886
887 For Data which contain no samples (or tagged Data for which no tags in use have a value)
888 a large negative value is returned.
889 */
890 ESCRIPT_DLL_API
891 double
892 sup();
893
894 ESCRIPT_DLL_API
895 double
896 sup_const() const;
897
898
899 /**
900 \brief
901 Return the minimum value of this Data object.
902
903 The method is not const because lazy data needs to be expanded before inf can be computed.
904 The _const form can be used when the Data object is const, however this will only work for
905 Data which is not Lazy.
906
907 For Data which contain no samples (or tagged Data for which no tags in use have a value)
908 a large positive value is returned.
909 */
910 ESCRIPT_DLL_API
911 double
912 inf();
913
914 ESCRIPT_DLL_API
915 double
916 inf_const() const;
917
918
919
920 /**
921 \brief
922 Return the absolute value of each data point of this Data object.
923 *
924 */
925 ESCRIPT_DLL_API
926 Data
927 abs() const;
928
929 /**
930 \brief
931 Return the maximum value of each data point of this Data object.
932 *
933 */
934 ESCRIPT_DLL_API
935 Data
936 maxval() const;
937
938 /**
939 \brief
940 Return the minimum value of each data point of this Data object.
941 *
942 */
943 ESCRIPT_DLL_API
944 Data
945 minval() const;
946
947 /**
948 \brief
949 Return the (sample number, data-point number) of the data point with
950 the minimum value in this Data object.
951 */
952 ESCRIPT_DLL_API
953 const boost::python::tuple
954 minGlobalDataPoint() const;
955
956 ESCRIPT_DLL_API
957 void
958 calc_minGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
959 /**
960 \brief
961 Return the sign of each data point of this Data object.
962 -1 for negative values, zero for zero values, 1 for positive values.
963 *
964 */
965 ESCRIPT_DLL_API
966 Data
967 sign() const;
968
969 /**
970 \brief
971 Return the symmetric part of a matrix which is half the matrix plus its transpose.
972 *
973 */
974 ESCRIPT_DLL_API
975 Data
976 symmetric() const;
977
978 /**
979 \brief
980 Return the nonsymmetric part of a matrix which is half the matrix minus its transpose.
981 *
982 */
983 ESCRIPT_DLL_API
984 Data
985 nonsymmetric() const;
986
987 /**
988 \brief
989 Return the trace of a matrix
990 *
991 */
992 ESCRIPT_DLL_API
993 Data
994 trace(int axis_offset) const;
995
996 /**
997 \brief
998 Transpose each data point of this Data object around the given axis.
999 *
1000 */
1001 ESCRIPT_DLL_API
1002 Data
1003 transpose(int axis_offset) const;
1004
1005 /**
1006 \brief
1007 Return the eigenvalues of the symmetric part at each data point of this Data object in increasing values.
1008 Currently this function is restricted to rank 2, square shape, and dimension 3.
1009 *
1010 */
1011 ESCRIPT_DLL_API
1012 Data
1013 eigenvalues() const;
1014
1015 /**
1016 \brief
1017 Return the eigenvalues and corresponding eigenvcetors of the symmetric part at each data point of this Data object.
1018 the eigenvalues are ordered in increasing size where eigenvalues with relative difference less than
1019 tol are treated as equal. The eigenvectors are orthogonal, normalized and the sclaed such that the
1020 first non-zero entry is positive.
1021 Currently this function is restricted to rank 2, square shape, and dimension 3
1022 *
1023 */
1024 ESCRIPT_DLL_API
1025 const boost::python::tuple
1026 eigenvalues_and_eigenvectors(const double tol=1.e-12) const;
1027
1028 /**
1029 \brief
1030 swaps the components axis0 and axis1
1031 *
1032 */
1033 ESCRIPT_DLL_API
1034 Data
1035 swapaxes(const int axis0, const int axis1) const;
1036
1037 /**
1038 \brief
1039 Return the error function erf of each data point of this Data object.
1040 *
1041 */
1042 ESCRIPT_DLL_API
1043 Data
1044 erf() const;
1045
1046 /**
1047 \brief
1048 Return the sin of each data point of this Data object.
1049 *
1050 */
1051 ESCRIPT_DLL_API
1052 Data
1053 sin() const;
1054
1055 /**
1056 \brief
1057 Return the cos of each data point of this Data object.
1058 *
1059 */
1060 ESCRIPT_DLL_API
1061 Data
1062 cos() const;
1063
1064 /**
1065 \brief
1066 Return the tan of each data point of this Data object.
1067 *
1068 */
1069 ESCRIPT_DLL_API
1070 Data
1071 tan() const;
1072
1073 /**
1074 \brief
1075 Return the asin of each data point of this Data object.
1076 *
1077 */
1078 ESCRIPT_DLL_API
1079 Data
1080 asin() const;
1081
1082 /**
1083 \brief
1084 Return the acos of each data point of this Data object.
1085 *
1086 */
1087 ESCRIPT_DLL_API
1088 Data
1089 acos() const;
1090
1091 /**
1092 \brief
1093 Return the atan of each data point of this Data object.
1094 *
1095 */
1096 ESCRIPT_DLL_API
1097 Data
1098 atan() const;
1099
1100 /**
1101 \brief
1102 Return the sinh of each data point of this Data object.
1103 *
1104 */
1105 ESCRIPT_DLL_API
1106 Data
1107 sinh() const;
1108
1109 /**
1110 \brief
1111 Return the cosh of each data point of this Data object.
1112 *
1113 */
1114 ESCRIPT_DLL_API
1115 Data
1116 cosh() const;
1117
1118 /**
1119 \brief
1120 Return the tanh of each data point of this Data object.
1121 *
1122 */
1123 ESCRIPT_DLL_API
1124 Data
1125 tanh() const;
1126
1127 /**
1128 \brief
1129 Return the asinh of each data point of this Data object.
1130 *
1131 */
1132 ESCRIPT_DLL_API
1133 Data
1134 asinh() const;
1135
1136 /**
1137 \brief
1138 Return the acosh of each data point of this Data object.
1139 *
1140 */
1141 ESCRIPT_DLL_API
1142 Data
1143 acosh() const;
1144
1145 /**
1146 \brief
1147 Return the atanh of each data point of this Data object.
1148 *
1149 */
1150 ESCRIPT_DLL_API
1151 Data
1152 atanh() const;
1153
1154 /**
1155 \brief
1156 Return the log to base 10 of each data point of this Data object.
1157 *
1158 */
1159 ESCRIPT_DLL_API
1160 Data
1161 log10() const;
1162
1163 /**
1164 \brief
1165 Return the natural log of each data point of this Data object.
1166 *
1167 */
1168 ESCRIPT_DLL_API
1169 Data
1170 log() const;
1171
1172 /**
1173 \brief
1174 Return the exponential function of each data point of this Data object.
1175 *
1176 */
1177 ESCRIPT_DLL_API
1178 Data
1179 exp() const;
1180
1181 /**
1182 \brief
1183 Return the square root of each data point of this Data object.
1184 *
1185 */
1186 ESCRIPT_DLL_API
1187 Data
1188 sqrt() const;
1189
1190 /**
1191 \brief
1192 Return the negation of each data point of this Data object.
1193 *
1194 */
1195 ESCRIPT_DLL_API
1196 Data
1197 neg() const;
1198
1199 /**
1200 \brief
1201 Return the identity of each data point of this Data object.
1202 Simply returns this object unmodified.
1203 *
1204 */
1205 ESCRIPT_DLL_API
1206 Data
1207 pos() const;
1208
1209 /**
1210 \brief
1211 Return the given power of each data point of this Data object.
1212
1213 \param right Input - the power to raise the object to.
1214 *
1215 */
1216 ESCRIPT_DLL_API
1217 Data
1218 powD(const Data& right) const;
1219
1220 /**
1221 \brief
1222 Return the given power of each data point of this boost python object.
1223
1224 \param right Input - the power to raise the object to.
1225 *
1226 */
1227 ESCRIPT_DLL_API
1228 Data
1229 powO(const boost::python::object& right) const;
1230
1231 /**
1232 \brief
1233 Return the given power of each data point of this boost python object.
1234
1235 \param left Input - the bases
1236 *
1237 */
1238
1239 ESCRIPT_DLL_API
1240 Data
1241 rpowO(const boost::python::object& left) const;
1242
1243 /**
1244 \brief
1245 writes the object to a file in the DX file format
1246 */
1247 ESCRIPT_DLL_API
1248 void
1249 saveDX(std::string fileName) const;
1250
1251 /**
1252 \brief
1253 writes the object to a file in the VTK file format
1254 */
1255 ESCRIPT_DLL_API
1256 void
1257 saveVTK(std::string fileName) const;
1258
1259 /**
1260 \brief
1261 Overloaded operator +=
1262 \param right - Input - The right hand side.
1263 *
1264 */
1265 ESCRIPT_DLL_API
1266 Data& operator+=(const Data& right);
1267 ESCRIPT_DLL_API
1268 Data& operator+=(const boost::python::object& right);
1269
1270 ESCRIPT_DLL_API
1271 Data& operator=(const Data& other);
1272
1273 /**
1274 \brief
1275 Overloaded operator -=
1276 \param right - Input - The right hand side.
1277 *
1278 */
1279 ESCRIPT_DLL_API
1280 Data& operator-=(const Data& right);
1281 ESCRIPT_DLL_API
1282 Data& operator-=(const boost::python::object& right);
1283
1284 /**
1285 \brief
1286 Overloaded operator *=
1287 \param right - Input - The right hand side.
1288 *
1289 */
1290 ESCRIPT_DLL_API
1291 Data& operator*=(const Data& right);
1292 ESCRIPT_DLL_API
1293 Data& operator*=(const boost::python::object& right);
1294
1295 /**
1296 \brief
1297 Overloaded operator /=
1298 \param right - Input - The right hand side.
1299 *
1300 */
1301 ESCRIPT_DLL_API
1302 Data& operator/=(const Data& right);
1303 ESCRIPT_DLL_API
1304 Data& operator/=(const boost::python::object& right);
1305
1306 /**
1307 \brief
1308 Returns true if this can be interpolated to functionspace.
1309 */
1310 ESCRIPT_DLL_API
1311 bool
1312 probeInterpolation(const FunctionSpace& functionspace) const;
1313
1314 /**
1315 Data object slicing methods.
1316 */
1317
1318 /**
1319 \brief
1320 Returns a slice from this Data object.
1321
1322 /description
1323 Implements the [] get operator in python.
1324 Calls getSlice.
1325
1326 \param key - Input - python slice tuple specifying
1327 slice to return.
1328 */
1329 ESCRIPT_DLL_API
1330 Data
1331 getItem(const boost::python::object& key) const;
1332
1333 /**
1334 \brief
1335 Copies slice from value into this Data object.
1336
1337 Implements the [] set operator in python.
1338 Calls setSlice.
1339
1340 \param key - Input - python slice tuple specifying
1341 slice to copy from value.
1342 \param value - Input - Data object to copy from.
1343 */
1344 ESCRIPT_DLL_API
1345 void
1346 setItemD(const boost::python::object& key,
1347 const Data& value);
1348
1349 ESCRIPT_DLL_API
1350 void
1351 setItemO(const boost::python::object& key,
1352 const boost::python::object& value);
1353
1354 // These following public methods should be treated as private.
1355
1356 /**
1357 \brief
1358 Perform the given unary operation on every element of every data point in
1359 this Data object.
1360 */
1361 template <class UnaryFunction>
1362 ESCRIPT_DLL_API
1363 inline
1364 void
1365 unaryOp2(UnaryFunction operation);
1366
1367 /**
1368 \brief
1369 Return a Data object containing the specified slice of
1370 this Data object.
1371 \param region - Input - Region to copy.
1372 *
1373 */
1374 ESCRIPT_DLL_API
1375 Data
1376 getSlice(const DataTypes::RegionType& region) const;
1377
1378 /**
1379 \brief
1380 Copy the specified slice from the given value into this
1381 Data object.
1382 \param value - Input - Data to copy from.
1383 \param region - Input - Region to copy.
1384 *
1385 */
1386 ESCRIPT_DLL_API
1387 void
1388 setSlice(const Data& value,
1389 const DataTypes::RegionType& region);
1390
1391 /**
1392 \brief
1393 print the data values to stdout. Used for debugging
1394 */
1395 ESCRIPT_DLL_API
1396 void
1397 print(void);
1398
1399 /**
1400 \brief
1401 return the MPI rank number of the local data
1402 MPI_COMM_WORLD is assumed and the result of MPI_Comm_size()
1403 is returned
1404 */
1405 ESCRIPT_DLL_API
1406 int
1407 get_MPIRank(void) const;
1408
1409 /**
1410 \brief
1411 return the MPI rank number of the local data
1412 MPI_COMM_WORLD is assumed and the result of MPI_Comm_rank()
1413 is returned
1414 */
1415 ESCRIPT_DLL_API
1416 int
1417 get_MPISize(void) const;
1418
1419 /**
1420 \brief
1421 return the MPI rank number of the local data
1422 MPI_COMM_WORLD is assumed and returned.
1423 */
1424 ESCRIPT_DLL_API
1425 MPI_Comm
1426 get_MPIComm(void) const;
1427
1428 /**
1429 \brief
1430 return the object produced by the factory, which is a DataConstant or DataExpanded
1431 TODO Ownership of this object should be explained in doco.
1432 */
1433 ESCRIPT_DLL_API
1434 DataAbstract*
1435 borrowData(void) const;
1436
1437 ESCRIPT_DLL_API
1438 DataAbstract_ptr
1439 borrowDataPtr(void) const;
1440
1441 ESCRIPT_DLL_API
1442 DataReady_ptr
1443 borrowReadyPtr(void) const;
1444
1445
1446
1447 /**
1448 \brief
1449 Return a pointer to the beginning of the datapoint at the specified offset.
1450 TODO Eventually these should be inlined.
1451 \param i - position(offset) in the underlying datastructure
1452 */
1453
1454 ESCRIPT_DLL_API
1455 DataTypes::ValueType::const_reference
1456 getDataAtOffsetRO(DataTypes::ValueType::size_type i);
1457
1458
1459 ESCRIPT_DLL_API
1460 DataTypes::ValueType::reference
1461 getDataAtOffsetRW(DataTypes::ValueType::size_type i);
1462
1463
1464
1465 /**
1466 \brief Create a buffer for use by getSample
1467 Allocates a DataVector large enough for DataLazy::resolveSample to operate on for the current Data.
1468 Do not use this buffer for other Data instances (unless you are sure they will be the same size).
1469
1470 In multi-threaded sections, this needs to be called on each thread.
1471
1472 \return A DataVector* if Data is lazy, NULL otherwise.
1473 \warning This pointer must be deallocated using freeSampleBuffer to avoid cross library memory issues.
1474 */
1475 ESCRIPT_DLL_API
1476 DataTypes::ValueType*
1477 allocSampleBuffer() const;
1478
1479 /**
1480 \brief Free a buffer allocated with allocSampleBuffer.
1481 \param buffer Input - pointer to the buffer to deallocate.
1482 */
1483 ESCRIPT_DLL_API void freeSampleBuffer(DataTypes::ValueType* buffer);
1484
1485 protected:
1486
1487 private:
1488
1489 double
1490 LsupWorker() const;
1491
1492 double
1493 supWorker() const;
1494
1495 double
1496 infWorker() const;
1497
1498 boost::python::object
1499 integrateWorker() const;
1500
1501 /**
1502 \brief
1503 Check *this and the right operand are compatible. Throws
1504 an exception if they aren't.
1505 \param right - Input - The right hand side.
1506 */
1507 inline
1508 void
1509 operandCheck(const Data& right) const
1510 {
1511 return m_data->operandCheck(*(right.m_data.get()));
1512 }
1513
1514 /**
1515 \brief
1516 Perform the specified reduction algorithm on every element of every data point in
1517 this Data object according to the given function and return the single value result.
1518 */
1519 template <class BinaryFunction>
1520 inline
1521 double
1522 algorithm(BinaryFunction operation,
1523 double initial_value) const;
1524
1525 /**
1526 \brief
1527 Reduce each data-point in this Data object using the given operation. Return a Data
1528 object with the same number of data-points, but with each data-point containing only
1529 one value - the result of the reduction operation on the corresponding data-point in
1530 this Data object
1531 */
1532 template <class BinaryFunction>
1533 inline
1534 Data
1535 dp_algorithm(BinaryFunction operation,
1536 double initial_value) const;
1537
1538 /**
1539 \brief
1540 Perform the given binary operation on all of the data's elements.
1541 The underlying type of the right hand side (right) determines the final
1542 type of *this after the operation. For example if the right hand side
1543 is expanded *this will be expanded if necessary.
1544 RHS is a Data object.
1545 */
1546 template <class BinaryFunction>
1547 inline
1548 void
1549 binaryOp(const Data& right,
1550 BinaryFunction operation);
1551
1552 /**
1553 \brief
1554 Convert the data type of the RHS to match this.
1555 \param right - Input - data type to match.
1556 */
1557 void
1558 typeMatchLeft(Data& right) const;
1559
1560 /**
1561 \brief
1562 Convert the data type of this to match the RHS.
1563 \param right - Input - data type to match.
1564 */
1565 void
1566 typeMatchRight(const Data& right);
1567
1568 /**
1569 \brief
1570 Construct a Data object of the appropriate type.
1571 */
1572
1573 void
1574 initialise(const DataTypes::ValueType& value,
1575 const DataTypes::ShapeType& shape,
1576 const FunctionSpace& what,
1577 bool expanded);
1578
1579 void
1580 initialise(const WrappedArray& value,
1581 const FunctionSpace& what,
1582 bool expanded);
1583
1584 //
1585 // flag to protect the data object against any update
1586 bool m_protected;
1587 mutable bool m_shared;
1588 bool m_lazy;
1589
1590 //
1591 // pointer to the actual data object
1592 // boost::shared_ptr<DataAbstract> m_data;
1593 DataAbstract_ptr m_data;
1594
1595 // If possible please use getReadyPtr instead.
1596 // But see warning below.
1597 const DataReady*
1598 getReady() const;
1599
1600 DataReady*
1601 getReady();
1602
1603
1604 // Be wary of using this for local operations since it (temporarily) increases reference count.
1605 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1606 // getReady() instead
1607 DataReady_ptr
1608 getReadyPtr();
1609
1610 const_DataReady_ptr
1611 getReadyPtr() const;
1612
1613
1614 /**
1615 \brief Update the Data's shared flag
1616 This indicates that the DataAbstract used by this object is now shared (or no longer shared).
1617 For internal use only.
1618 */
1619 void updateShareStatus(bool nowshared) const
1620 {
1621 m_shared=nowshared; // m_shared is mutable
1622 }
1623
1624 // In the isShared() method below:
1625 // A problem would occur if m_data (the address pointed to) were being modified
1626 // while the call m_data->is_shared is being executed.
1627 //
1628 // Q: So why do I think this code can be thread safe/correct?
1629 // A: We need to make some assumptions.
1630 // 1. We assume it is acceptable to return true under some conditions when we aren't shared.
1631 // 2. We assume that no constructions or assignments which will share previously unshared
1632 // will occur while this call is executing. This is consistent with the way Data:: and C are written.
1633 //
1634 // This means that the only transition we need to consider, is when a previously shared object is
1635 // not shared anymore. ie. the other objects have been destroyed or a deep copy has been made.
1636 // In those cases the m_shared flag changes to false after m_data has completed changing.
1637 // For any threads executing before the flag switches they will assume the object is still shared.
1638 bool isShared() const
1639 {
1640 if (m_shared) return true;
1641 if (m_data->isShared())
1642 {
1643 updateShareStatus(true);
1644 return true;
1645 }
1646 return false;
1647 }
1648
1649 /**
1650 \brief if another object is sharing out member data make a copy to work with instead.
1651 This code should only be called from single threaded sections of code.
1652 */
1653 void exclusiveWrite()
1654 {
1655 // if (!m_data.unique())
1656 // {
1657 // DataAbstract* t=m_data->deepCopy();
1658 // // m_data=DataAbstract_ptr(t);
1659 // set_m_data(DataAbstract_ptr(t));
1660 // }
1661
1662 #ifdef _OPENMP
1663 if (omp_in_parallel())
1664 {
1665 *((int*)0)=17;
1666 throw DataException("Programming error. Please do not run exclusiveWrite() in multi-threaded sections.");
1667 }
1668 #endif
1669
1670 if (isShared())
1671 {
1672 DataAbstract* t=m_data->deepCopy();
1673 set_m_data(DataAbstract_ptr(t));
1674 }
1675 }
1676
1677
1678
1679
1680
1681 /**
1682 \brief Modify the data abstract hosted by this Data object
1683 For internal use only.
1684 Passing a pointer to null is permitted (do this in the destructor)
1685 \warning Only to be called in single threaded code or inside a single/critical section. This method needs to be atomic.
1686 */
1687 void set_m_data(DataAbstract_ptr p);
1688
1689 friend class DataAbstract; // To allow calls to updateShareStatus
1690
1691 };
1692
1693 } // end namespace escript
1694
1695
1696 // No, this is not supposed to be at the top of the file
1697 // DataAbstact needs to be declared first, then DataReady needs to be fully declared
1698 // so that I can dynamic cast between them below.
1699 #include "DataReady.h"
1700 #include "DataLazy.h"
1701
1702 namespace escript
1703 {
1704
1705 inline
1706 const DataReady*
1707 Data::getReady() const
1708 {
1709 const DataReady* dr=dynamic_cast<const DataReady*>(m_data.get());
1710 EsysAssert((dr!=0), "Error - casting to DataReady.");
1711 return dr;
1712 }
1713
1714 inline
1715 DataReady*
1716 Data::getReady()
1717 {
1718 DataReady* dr=dynamic_cast<DataReady*>(m_data.get());
1719 EsysAssert((dr!=0), "Error - casting to DataReady.");
1720 return dr;
1721 }
1722
1723 // Be wary of using this for local operations since it (temporarily) increases reference count.
1724 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1725 // getReady() instead
1726 inline
1727 DataReady_ptr
1728 Data::getReadyPtr()
1729 {
1730 DataReady_ptr dr=boost::dynamic_pointer_cast<DataReady>(m_data);
1731 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1732 return dr;
1733 }
1734
1735
1736 inline
1737 const_DataReady_ptr
1738 Data::getReadyPtr() const
1739 {
1740 const_DataReady_ptr dr=boost::dynamic_pointer_cast<const DataReady>(m_data);
1741 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1742 return dr;
1743 }
1744
1745 inline
1746 DataAbstract::ValueType::value_type*
1747 Data::getSampleDataRW(DataAbstract::ValueType::size_type sampleNo)
1748 {
1749 // if (isLazy())
1750 // {
1751 // resolve();
1752 // }
1753 // exclusiveWrite();
1754 if (isLazy())
1755 {
1756 throw DataException("Error, attempt to acquire RW access to lazy data. Please call requireWrite() first.");
1757 }
1758 return getReady()->getSampleDataRW(sampleNo);
1759 }
1760
1761 inline
1762 const DataAbstract::ValueType::value_type*
1763 Data::getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, DataTypes::ValueType* buffer)
1764 {
1765 DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
1766 if (l!=0)
1767 {
1768 size_t offset=0;
1769 if (buffer==NULL)
1770 {
1771 throw DataException("Error, attempt to getSampleDataRO for lazy Data with buffer==NULL");
1772 }
1773 const DataTypes::ValueType* res=l->resolveSample(*buffer,0,sampleNo,offset);
1774 return &((*res)[offset]);
1775 }
1776 return getReady()->getSampleDataRO(sampleNo);
1777 }
1778
1779
1780
1781 /**
1782 Modify a filename for MPI parallel output to multiple files
1783 */
1784 char *Escript_MPI_appendRankToFileName(const char *, int, int);
1785
1786 /**
1787 Binary Data object operators.
1788 */
1789 inline double rpow(double x,double y)
1790 {
1791 return pow(y,x);
1792 }
1793
1794 /**
1795 \brief
1796 Operator+
1797 Takes two Data objects.
1798 */
1799 ESCRIPT_DLL_API Data operator+(const Data& left, const Data& right);
1800
1801 /**
1802 \brief
1803 Operator-
1804 Takes two Data objects.
1805 */
1806 ESCRIPT_DLL_API Data operator-(const Data& left, const Data& right);
1807
1808 /**
1809 \brief
1810 Operator*
1811 Takes two Data objects.
1812 */
1813 ESCRIPT_DLL_API Data operator*(const Data& left, const Data& right);
1814
1815 /**
1816 \brief
1817 Operator/
1818 Takes two Data objects.
1819 */
1820 ESCRIPT_DLL_API Data operator/(const Data& left, const Data& right);
1821
1822 /**
1823 \brief
1824 Operator+
1825 Takes LHS Data object and RHS python::object.
1826 python::object must be convertable to Data type.
1827 */
1828 ESCRIPT_DLL_API Data operator+(const Data& left, const boost::python::object& right);
1829
1830 /**
1831 \brief
1832 Operator-
1833 Takes LHS Data object and RHS python::object.
1834 python::object must be convertable to Data type.
1835 */
1836 ESCRIPT_DLL_API Data operator-(const Data& left, const boost::python::object& right);
1837
1838 /**
1839 \brief
1840 Operator*
1841 Takes LHS Data object and RHS python::object.
1842 python::object must be convertable to Data type.
1843 */
1844 ESCRIPT_DLL_API Data operator*(const Data& left, const boost::python::object& right);
1845
1846 /**
1847 \brief
1848 Operator/
1849 Takes LHS Data object and RHS python::object.
1850 python::object must be convertable to Data type.
1851 */
1852 ESCRIPT_DLL_API Data operator/(const Data& left, const boost::python::object& right);
1853
1854 /**
1855 \brief
1856 Operator+
1857 Takes LHS python::object and RHS Data object.
1858 python::object must be convertable to Data type.
1859 */
1860 ESCRIPT_DLL_API Data operator+(const boost::python::object& left, const Data& right);
1861
1862 /**
1863 \brief
1864 Operator-
1865 Takes LHS python::object and RHS Data object.
1866 python::object must be convertable to Data type.
1867 */
1868 ESCRIPT_DLL_API Data operator-(const boost::python::object& left, const Data& right);
1869
1870 /**
1871 \brief
1872 Operator*
1873 Takes LHS python::object and RHS Data object.
1874 python::object must be convertable to Data type.
1875 */
1876 ESCRIPT_DLL_API Data operator*(const boost::python::object& left, const Data& right);
1877
1878 /**
1879 \brief
1880 Operator/
1881 Takes LHS python::object and RHS Data object.
1882 python::object must be convertable to Data type.
1883 */
1884 ESCRIPT_DLL_API Data operator/(const boost::python::object& left, const Data& right);
1885
1886
1887
1888 /**
1889 \brief
1890 Output operator
1891 */
1892 ESCRIPT_DLL_API std::ostream& operator<<(std::ostream& o, const Data& data);
1893
1894 /**
1895 \brief
1896 Compute a tensor product of two Data objects
1897 \param arg0 - Input - Data object
1898 \param arg1 - Input - Data object
1899 \param axis_offset - Input - axis offset
1900 \param transpose - Input - 0: transpose neither, 1: transpose arg0, 2: transpose arg1
1901 */
1902 ESCRIPT_DLL_API
1903 Data
1904 C_GeneralTensorProduct(Data& arg0,
1905 Data& arg1,
1906 int axis_offset=0,
1907 int transpose=0);
1908
1909 /**
1910 \brief
1911 Perform the given binary operation with this and right as operands.
1912 Right is a Data object.
1913 */
1914 template <class BinaryFunction>
1915 inline
1916 void
1917 Data::binaryOp(const Data& right,
1918 BinaryFunction operation)
1919 {
1920 //
1921 // if this has a rank of zero promote it to the rank of the RHS
1922 if (getDataPointRank()==0 && right.getDataPointRank()!=0) {
1923 throw DataException("Error - attempt to update rank zero object with object with rank bigger than zero.");
1924 }
1925
1926 if (isLazy() || right.isLazy())
1927 {
1928 throw DataException("Programmer error - attempt to call binaryOp with Lazy Data.");
1929 }
1930 //
1931 // initially make the temporary a shallow copy
1932 Data tempRight(right);
1933
1934 if (getFunctionSpace()!=right.getFunctionSpace()) {
1935 if (right.probeInterpolation(getFunctionSpace())) {
1936 //
1937 // an interpolation is required so create a new Data
1938 tempRight=Data(right,this->getFunctionSpace());
1939 } else if (probeInterpolation(right.getFunctionSpace())) {
1940 //
1941 // interpolate onto the RHS function space
1942 Data tempLeft(*this,right.getFunctionSpace());
1943 // m_data=tempLeft.m_data;
1944 set_m_data(tempLeft.m_data);
1945 }
1946 }
1947 operandCheck(tempRight);
1948 //
1949 // ensure this has the right type for the RHS
1950 typeMatchRight(tempRight);
1951 //
1952 // Need to cast to the concrete types so that the correct binaryOp
1953 // is called.
1954 if (isExpanded()) {
1955 //
1956 // Expanded data will be done in parallel, the right hand side can be
1957 // of any data type
1958 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
1959 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
1960 escript::binaryOp(*leftC,*(tempRight.getReady()),operation);
1961 } else if (isTagged()) {
1962 //
1963 // Tagged data is operated on serially, the right hand side can be
1964 // either DataConstant or DataTagged
1965 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
1966 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
1967 if (right.isTagged()) {
1968 DataTagged* rightC=dynamic_cast<DataTagged*>(tempRight.m_data.get());
1969 EsysAssert((rightC!=0), "Programming error - casting to DataTagged.");
1970 escript::binaryOp(*leftC,*rightC,operation);
1971 } else {
1972 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
1973 EsysAssert((rightC!=0), "Programming error - casting to DataConstant.");
1974 escript::binaryOp(*leftC,*rightC,operation);
1975 }
1976 } else if (isConstant()) {
1977 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
1978 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
1979 EsysAssert((leftC!=0 && rightC!=0), "Programming error - casting to DataConstant.");
1980 escript::binaryOp(*leftC,*rightC,operation);
1981 }
1982 }
1983
1984 /**
1985 \brief
1986 Perform the given Data object reduction algorithm on this and return the result.
1987 Given operation combines each element of each data point, thus argument
1988 object (*this) is a rank n Data object, and returned object is a scalar.
1989 Calls escript::algorithm.
1990 */
1991 template <class BinaryFunction>
1992 inline
1993 double
1994 Data::algorithm(BinaryFunction operation, double initial_value) const
1995 {
1996 if (isExpanded()) {
1997 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
1998 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
1999 return escript::algorithm(*leftC,operation,initial_value);
2000 } else if (isTagged()) {
2001 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2002 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2003 return escript::algorithm(*leftC,operation,initial_value);
2004 } else if (isConstant()) {
2005 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2006 EsysAssert((leftC!=0), "Programming error - casting to DataConstant.");
2007 return escript::algorithm(*leftC,operation,initial_value);
2008 } else if (isEmpty()) {
2009 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2010 } else if (isLazy()) {
2011 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2012 } else {
2013 throw DataException("Error - Data encapsulates an unknown type.");
2014 }
2015 }
2016
2017 /**
2018 \brief
2019 Perform the given data point reduction algorithm on data and return the result.
2020 Given operation combines each element within each data point into a scalar,
2021 thus argument object is a rank n Data object, and returned object is a
2022 rank 0 Data object.
2023 Calls escript::dp_algorithm.
2024 */
2025 template <class BinaryFunction>
2026 inline
2027 Data
2028 Data::dp_algorithm(BinaryFunction operation, double initial_value) const
2029 {
2030 if (isEmpty()) {
2031 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2032 }
2033 else if (isExpanded()) {
2034 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2035 DataExpanded* dataE=dynamic_cast<DataExpanded*>(m_data.get());
2036 DataExpanded* resultE=dynamic_cast<DataExpanded*>(result.m_data.get());
2037 EsysAssert((dataE!=0), "Programming error - casting data to DataExpanded.");
2038 EsysAssert((resultE!=0), "Programming error - casting result to DataExpanded.");
2039 escript::dp_algorithm(*dataE,*resultE,operation,initial_value);
2040 return result;
2041 }
2042 else if (isTagged()) {
2043 DataTagged* dataT=dynamic_cast<DataTagged*>(m_data.get());
2044 EsysAssert((dataT!=0), "Programming error - casting data to DataTagged.");
2045 DataTypes::ValueType defval(1);
2046 defval[0]=0;
2047 DataTagged* resultT=new DataTagged(getFunctionSpace(), DataTypes::scalarShape, defval, dataT);
2048 escript::dp_algorithm(*dataT,*resultT,operation,initial_value);
2049 return Data(resultT); // note: the Data object now owns the resultT pointer
2050 }
2051 else if (isConstant()) {
2052 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
2053 DataConstant* dataC=dynamic_cast<DataConstant*>(m_data.get());
2054 DataConstant* resultC=dynamic_cast<DataConstant*>(result.m_data.get());
2055 EsysAssert((dataC!=0), "Programming error - casting data to DataConstant.");
2056 EsysAssert((resultC!=0), "Programming error - casting result to DataConstant.");
2057 escript::dp_algorithm(*dataC,*resultC,operation,initial_value);
2058 return result;
2059 } else if (isLazy()) {
2060 throw DataException("Error - Operations not permitted on instances of DataLazy.");
2061 } else {
2062 throw DataException("Error - Data encapsulates an unknown type.");
2063 }
2064 }
2065
2066 /**
2067 \brief
2068 Compute a tensor operation with two Data objects
2069 \param arg0 - Input - Data object
2070 \param arg1 - Input - Data object
2071 \param operation - Input - Binary op functor
2072 */
2073 template <typename BinaryFunction>
2074 inline
2075 Data
2076 C_TensorBinaryOperation(Data const &arg_0,
2077 Data const &arg_1,
2078 BinaryFunction operation)
2079 {
2080 if (arg_0.isEmpty() || arg_1.isEmpty())
2081 {
2082 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2083 }
2084 if (arg_0.isLazy() || arg_1.isLazy())
2085 {
2086 throw DataException("Error - Operations not permitted on lazy data.");
2087 }
2088 // Interpolate if necessary and find an appropriate function space
2089 Data arg_0_Z, arg_1_Z;
2090 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
2091 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
2092 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2093 arg_1_Z = Data(arg_1);
2094 }
2095 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
2096 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2097 arg_0_Z =Data(arg_0);
2098 }
2099 else {
2100 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible function spaces.");
2101 }
2102 } else {
2103 arg_0_Z = Data(arg_0);
2104 arg_1_Z = Data(arg_1);
2105 }
2106 // Get rank and shape of inputs
2107 int rank0 = arg_0_Z.getDataPointRank();
2108 int rank1 = arg_1_Z.getDataPointRank();
2109 DataTypes::ShapeType shape0 = arg_0_Z.getDataPointShape();
2110 DataTypes::ShapeType shape1 = arg_1_Z.getDataPointShape();
2111 int size0 = arg_0_Z.getDataPointSize();
2112 int size1 = arg_1_Z.getDataPointSize();
2113 // Declare output Data object
2114 Data res;
2115
2116 if (shape0 == shape1) {
2117 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2118 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2119 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2120 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2121 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2122
2123 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2124 }
2125 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2126
2127 // Prepare the DataConstant input
2128 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2129
2130 // Borrow DataTagged input from Data object
2131 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2132
2133 // Prepare a DataTagged output 2
2134 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2135 res.tag();
2136 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2137
2138 // Prepare offset into DataConstant
2139 int offset_0 = tmp_0->getPointOffset(0,0);
2140 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2141
2142 // Get the pointers to the actual data
2143 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2144 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2145
2146 // Compute a result for the default
2147 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2148 // Compute a result for each tag
2149 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2150 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2151 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2152 tmp_2->addTag(i->first);
2153 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2154 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2155
2156 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2157 }
2158
2159 }
2160 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2161 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2162 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2163 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2164 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2165
2166 int sampleNo_1,dataPointNo_1;
2167 int numSamples_1 = arg_1_Z.getNumSamples();
2168 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2169 int offset_0 = tmp_0->getPointOffset(0,0);
2170 res.requireWrite();
2171 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2172 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2173 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2174 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2175 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2176 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2177 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2178 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2179 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2180 }
2181 }
2182
2183 }
2184 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2185 // Borrow DataTagged input from Data object
2186 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2187
2188 // Prepare the DataConstant input
2189 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2190
2191 // Prepare a DataTagged output 2
2192 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2193 res.tag();
2194 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2195
2196 // Prepare offset into DataConstant
2197 int offset_1 = tmp_1->getPointOffset(0,0);
2198
2199 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2200 // Get the pointers to the actual data
2201 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2202 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2203 // Compute a result for the default
2204 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2205 // Compute a result for each tag
2206 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2207 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2208 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2209 tmp_2->addTag(i->first);
2210 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2211 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2212 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2213 }
2214
2215 }
2216 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2217 // Borrow DataTagged input from Data object
2218 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2219
2220 // Borrow DataTagged input from Data object
2221 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2222
2223 // Prepare a DataTagged output 2
2224 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2225 res.tag(); // DataTagged output
2226 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2227
2228 // Get the pointers to the actual data
2229 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2230 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2231 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2232
2233 // Compute a result for the default
2234 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2235 // Merge the tags
2236 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2237 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2238 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2239 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2240 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2241 }
2242 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2243 tmp_2->addTag(i->first);
2244 }
2245 // Compute a result for each tag
2246 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2247 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2248
2249 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2250 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2251 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2252
2253 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2254 }
2255
2256 }
2257 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2258 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2259 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2260 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2261 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2262 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2263
2264 int sampleNo_0,dataPointNo_0;
2265 int numSamples_0 = arg_0_Z.getNumSamples();
2266 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2267 res.requireWrite();
2268 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2269 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2270 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2271 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2272 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2273 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2274 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2275 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2276 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2277 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2278 }
2279 }
2280
2281 }
2282 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2283 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2284 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2285 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2286 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2287
2288 int sampleNo_0,dataPointNo_0;
2289 int numSamples_0 = arg_0_Z.getNumSamples();
2290 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2291 int offset_1 = tmp_1->getPointOffset(0,0);
2292 res.requireWrite();
2293 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2294 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2295 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2296 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2297 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2298
2299 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2300 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2301 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2302
2303
2304 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2305 }
2306 }
2307
2308 }
2309 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2310 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2311 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2312 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2313 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2314 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2315
2316 int sampleNo_0,dataPointNo_0;
2317 int numSamples_0 = arg_0_Z.getNumSamples();
2318 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2319 res.requireWrite();
2320 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2321 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2322 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2323 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2324 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2325 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2326 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2327 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2328 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2329 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2330 }
2331 }
2332
2333 }
2334 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2335 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2336 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2337 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2338 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2339 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2340
2341 int sampleNo_0,dataPointNo_0;
2342 int numSamples_0 = arg_0_Z.getNumSamples();
2343 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2344 res.requireWrite();
2345 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2346 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2347 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2348 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2349 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2350 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2351 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2352 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2353 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2354 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2355 }
2356 }
2357
2358 }
2359 else {
2360 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2361 }
2362
2363 } else if (0 == rank0) {
2364 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2365 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataConstant output
2366 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2367 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2368 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2369 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2370 }
2371 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2372
2373 // Prepare the DataConstant input
2374 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2375
2376 // Borrow DataTagged input from Data object
2377 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2378
2379 // Prepare a DataTagged output 2
2380 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataTagged output
2381 res.tag();
2382 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2383
2384 // Prepare offset into DataConstant
2385 int offset_0 = tmp_0->getPointOffset(0,0);
2386 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2387
2388 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2389 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2390
2391 // Compute a result for the default
2392 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2393 // Compute a result for each tag
2394 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2395 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2396 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2397 tmp_2->addTag(i->first);
2398 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2399 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2400 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2401 }
2402
2403 }
2404 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2405
2406 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2407 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2408 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2409 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2410
2411 int sampleNo_1,dataPointNo_1;
2412 int numSamples_1 = arg_1_Z.getNumSamples();
2413 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2414 int offset_0 = tmp_0->getPointOffset(0,0);
2415 res.requireWrite();
2416 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2417 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2418 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2419 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2420 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2421 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2422 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2423 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2424 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2425
2426 }
2427 }
2428
2429 }
2430 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2431
2432 // Borrow DataTagged input from Data object
2433 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2434
2435 // Prepare the DataConstant input
2436 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2437
2438 // Prepare a DataTagged output 2
2439 res = Data(0.0, shape1, arg_0_Z.getFunctionSpace()); // DataTagged output
2440 res.tag();
2441 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2442
2443 // Prepare offset into DataConstant
2444 int offset_1 = tmp_1->getPointOffset(0,0);
2445 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2446
2447 // Get the pointers to the actual data
2448 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2449 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2450
2451
2452 // Compute a result for the default
2453 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2454 // Compute a result for each tag
2455 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2456 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2457 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2458 tmp_2->addTag(i->first);
2459 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2460 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2461
2462 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2463 }
2464
2465 }
2466 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2467
2468 // Borrow DataTagged input from Data object
2469 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2470
2471 // Borrow DataTagged input from Data object
2472 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2473
2474 // Prepare a DataTagged output 2
2475 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());
2476 res.tag(); // DataTagged output
2477 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2478
2479 // Get the pointers to the actual data
2480 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2481 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2482 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2483
2484 // Compute a result for the default
2485 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2486 // Merge the tags
2487 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2488 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2489 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2490 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2491 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2492 }
2493 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2494 tmp_2->addTag(i->first);
2495 }
2496 // Compute a result for each tag
2497 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2498 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2499
2500 /* DataArrayView view_0 = tmp_0->getDataPointByTag(i->first);
2501 DataArrayView view_1 = tmp_1->getDataPointByTag(i->first);
2502 DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2503 double *ptr_0 = &view_0.getData(0);
2504 double *ptr_1 = &view_1.getData(0);
2505 double *ptr_2 = &view_2.getData(0);*/
2506
2507 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2508 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2509 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2510
2511 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2512 }
2513
2514 }
2515 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2516
2517 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2518 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2519 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2520 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2521 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2522
2523 int sampleNo_0,dataPointNo_0;
2524 int numSamples_0 = arg_0_Z.getNumSamples();
2525 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2526 res.requireWrite();
2527 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2528 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2529 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2530 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2531 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2532 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2533 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2534 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2535 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2536 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2537 }
2538 }
2539
2540 }
2541 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2542 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2543 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2544 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2545 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2546
2547 int sampleNo_0,dataPointNo_0;
2548 int numSamples_0 = arg_0_Z.getNumSamples();
2549 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2550 int offset_1 = tmp_1->getPointOffset(0,0);
2551 res.requireWrite();
2552 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2553 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2554 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2555 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2556 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2557 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2558 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2559 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2560 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2561 }
2562 }
2563
2564
2565 }
2566 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2567
2568 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2569 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2570 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2571 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2572 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2573
2574 int sampleNo_0,dataPointNo_0;
2575 int numSamples_0 = arg_0_Z.getNumSamples();
2576 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2577 res.requireWrite();
2578 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2579 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2580 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2581 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2582 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2583 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2584 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2585 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2586 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2587 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2588 }
2589 }
2590
2591 }
2592 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2593
2594 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2595 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2596 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2597 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2598 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2599
2600 int sampleNo_0,dataPointNo_0;
2601 int numSamples_0 = arg_0_Z.getNumSamples();
2602 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2603 res.requireWrite();
2604 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2605 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2606 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2607 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2608 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2609 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2610 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2611 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2612 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2613 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2614 }
2615 }
2616
2617 }
2618 else {
2619 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2620 }
2621
2622 } else if (0 == rank1) {
2623 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2624 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2625 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2626 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2627 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2628 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2629 }
2630 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2631
2632 // Prepare the DataConstant input
2633 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2634
2635 // Borrow DataTagged input from Data object
2636 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2637
2638 // Prepare a DataTagged output 2
2639 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2640 res.tag();
2641 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2642
2643 // Prepare offset into DataConstant
2644 int offset_0 = tmp_0->getPointOffset(0,0);
2645 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2646
2647 //Get the pointers to the actual data
2648 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2649 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2650
2651 // Compute a result for the default
2652 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2653 // Compute a result for each tag
2654 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2655 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2656 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2657 tmp_2->addTag(i->first);
2658 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2659 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2660 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2661 }
2662 }
2663 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2664
2665 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2666 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2667 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2668 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2669
2670 int sampleNo_1,dataPointNo_1;
2671 int numSamples_1 = arg_1_Z.getNumSamples();
2672 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2673 int offset_0 = tmp_0->getPointOffset(0,0);
2674 res.requireWrite();
2675 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2676 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2677 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2678 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2679 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2680 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2681 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2682 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2683 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2684 }
2685 }
2686
2687 }
2688 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2689
2690 // Borrow DataTagged input from Data object
2691 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2692
2693 // Prepare the DataConstant input
2694 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2695
2696 // Prepare a DataTagged output 2
2697 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2698 res.tag();
2699 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2700
2701 // Prepare offset into DataConstant
2702 int offset_1 = tmp_1->getPointOffset(0,0);
2703 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2704 // Get the pointers to the actual data
2705 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2706 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2707 // Compute a result for the default
2708 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2709 // Compute a result for each tag
2710 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2711 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2712 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2713 tmp_2->addTag(i->first);
2714 /* DataArrayView view_0 = tmp_0->getDataPointByTag(i->first);
2715 DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2716 double *ptr_0 = &view_0.getData(0);
2717 double *ptr_2 = &view_2.getData(0);*/
2718 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2719 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2720 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2721 }
2722
2723 }
2724 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2725
2726 // Borrow DataTagged input from Data object
2727 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2728
2729 // Borrow DataTagged input from Data object
2730 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2731
2732 // Prepare a DataTagged output 2
2733 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2734 res.tag(); // DataTagged output
2735 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2736
2737 // Get the pointers to the actual data
2738 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2739 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2740 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2741
2742 // Compute a result for the default
2743 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2744 // Merge the tags
2745 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2746 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2747 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2748 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2749 tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2750 }
2751 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2752 tmp_2->addTag(i->first);
2753 }
2754 // Compute a result for each tag
2755 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2756 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2757 // DataArrayView view_0 = tmp_0->getDataPointByTag(i->first);
2758 // DataArrayView view_1 = tmp_1->getDataPointByTag(i->first);
2759 // DataArrayView view_2 = tmp_2->getDataPointByTag(i->first);
2760 // double *ptr_0 = &view_0.getData(0);
2761 // double *ptr_1 = &view_1.getData(0);
2762 // double *ptr_2 = &view_2.getData(0);
2763
2764 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2765 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2766 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2767 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2768 }
2769
2770 }
2771 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2772
2773 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2774 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2775 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2776 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2777 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2778
2779 int sampleNo_0,dataPointNo_0;
2780 int numSamples_0 = arg_0_Z.getNumSamples();
2781 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2782 res.requireWrite();
2783 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2784 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2785 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2786 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2787 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2788 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2789 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2790 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2791 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2792 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2793 }
2794 }
2795
2796 }
2797 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2798 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2799 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2800 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2801 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2802
2803 int sampleNo_0,dataPointNo_0;
2804 int numSamples_0 = arg_0_Z.getNumSamples();
2805 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2806 int offset_1 = tmp_1->getPointOffset(0,0);
2807 res.requireWrite();
2808 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2809 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2810 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2811 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2812 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2813 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2814 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2815 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2816 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2817 }
2818 }
2819
2820
2821 }
2822 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2823
2824 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2825 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2826 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2827 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2828 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2829
2830 int sampleNo_0,dataPointNo_0;
2831 int numSamples_0 = arg_0_Z.getNumSamples();
2832 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2833 res.requireWrite();
2834 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2835 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2836 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2837 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2838 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2839 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2840 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2841 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2842 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2843 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2844 }
2845 }
2846
2847 }
2848 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2849
2850 // After finding a common function space above the two inputs have the same numSamples and num DPPS
2851 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2852 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2853 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2854 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2855
2856 int sampleNo_0,dataPointNo_0;
2857 int numSamples_0 = arg_0_Z.getNumSamples();
2858 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2859 res.requireWrite();
2860 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2861 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2862 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2863 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2864 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2865 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2866 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2867 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2868 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2869 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2870 }
2871 }
2872
2873 }
2874 else {
2875 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2876 }
2877
2878 } else {
2879 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible shapes");
2880 }
2881
2882 return res;
2883 }
2884
2885 template <typename UnaryFunction>
2886 Data
2887 C_TensorUnaryOperation(Data const &arg_0,
2888 UnaryFunction operation)
2889 {
2890 if (arg_0.isEmpty()) // do this before we attempt to interpolate
2891 {
2892 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2893 }
2894 if (arg_0.isLazy())
2895 {
2896 throw DataException("Error - Operations not permitted on lazy data.");
2897 }
2898 // Interpolate if necessary and find an appropriate function space
2899 Data arg_0_Z = Data(arg_0);
2900
2901 // Get rank and shape of inputs
2902 // int rank0 = arg_0_Z.getDataPointRank();
2903 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
2904 int size0 = arg_0_Z.getDataPointSize();
2905
2906 // Declare output Data object
2907 Data res;
2908
2909 if (arg_0_Z.isConstant()) {
2910 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataConstant output
2911 // double *ptr_0 = &((arg_0_Z.getPointDataView().getData())[0]);
2912 // double *ptr_2 = &((res.getPointDataView().getData())[0]);
2913 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2914 double *ptr_2 = &(res.getDataAtOffsetRW(0));
2915 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2916 }
2917 else if (arg_0_Z.isTagged()) {
2918
2919 // Borrow DataTagged input from Data object
2920 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2921
2922 // Prepare a DataTagged output 2
2923 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2924 res.tag();
2925 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2926
2927 // Get the pointers to the actual data
2928 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2929 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2930 // Compute a result for the default
2931 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2932 // Compute a result for each tag
2933 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2934 DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2935 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2936 tmp_2->addTag(i->first);
2937 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2938 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2939 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2940 }
2941
2942 }
2943 else if (arg_0_Z.isExpanded()) {
2944
2945 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace(),true); // DataExpanded output
2946 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2947 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2948
2949 int sampleNo_0,dataPointNo_0;
2950 int numSamples_0 = arg_0_Z.getNumSamples();
2951 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2952 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2953 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2954 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2955 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2956 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2957 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2958 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2959 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
2960 }
2961 }
2962 }
2963 else {
2964 throw DataException("Error - C_TensorUnaryOperation: unknown combination of inputs");
2965 }
2966
2967 return res;
2968 }
2969
2970 }
2971 #endif

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26