/[escript]/trunk/escript/src/DataExpanded.cpp
ViewVC logotype

Contents of /trunk/escript/src/DataExpanded.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 4286 - (show annotations)
Thu Mar 7 04:28:11 2013 UTC (6 years, 5 months ago) by caltinay
File size: 32574 byte(s)
Assorted spelling fixes.

1
2 /*****************************************************************************
3 *
4 * Copyright (c) 2003-2013 by University of Queensland
5 * http://www.uq.edu.au
6 *
7 * Primary Business: Queensland, Australia
8 * Licensed under the Open Software License version 3.0
9 * http://www.opensource.org/licenses/osl-3.0.php
10 *
11 * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12 * Development since 2012 by School of Earth Sciences
13 *
14 *****************************************************************************/
15
16
17 #include "Data.h"
18 #include "DataExpanded.h"
19 #include "DataException.h"
20 #include "DataConstant.h"
21 #include "DataTagged.h"
22 #include <limits>
23
24 #include "esysUtils/Esys_MPI.h"
25
26 #ifdef USE_NETCDF
27 #include <netcdfcpp.h>
28 #endif
29
30 #include <boost/python/extract.hpp>
31 #include "DataMaths.h"
32
33 //#define MKLRANDOM
34
35 #ifdef MKLRANDOM
36 #include <mkl_vsl.h>
37 #else
38 #include <boost/random/mersenne_twister.hpp>
39 #endif
40
41 using namespace std;
42 using namespace boost::python;
43 using namespace boost;
44 using namespace escript::DataTypes;
45
46
47 // #define CHECK_FOR_EX_WRITE if (!checkNoSharing()) {throw DataException("Attempt to modify shared object");}
48
49 #define CHECK_FOR_EX_WRITE if (!checkNoSharing()) {std::ostringstream ss; ss << " Attempt to modify shared object. line " << __LINE__ << " of " << __FILE__; *((int*)0)=17;throw DataException(ss.str());}
50
51 namespace escript {
52
53 DataExpanded::DataExpanded(const WrappedArray& value,
54 const FunctionSpace& what)
55 : parent(what,value.getShape())
56 {
57 //
58 // initialise the data array for this object
59 initialise(what.getNumSamples(),what.getNumDPPSample());
60 //
61 // copy the given value to every data point
62 copy(value);
63 }
64
65 DataExpanded::DataExpanded(const DataExpanded& other)
66 : parent(other.getFunctionSpace(), other.getShape()),
67 m_data(other.m_data)
68 {
69 }
70
71 DataExpanded::DataExpanded(const DataConstant& other)
72 : parent(other.getFunctionSpace(), other.getShape())
73 {
74 //
75 // initialise the data array for this object
76 initialise(other.getNumSamples(),other.getNumDPPSample());
77 //
78 // DataConstant only has one value, copy this to every data point
79 copy(other);
80 }
81
82 DataExpanded::DataExpanded(const DataTagged& other)
83 : parent(other.getFunctionSpace(), other.getShape())
84 {
85 //
86 // initialise the data array for this object
87 initialise(other.getNumSamples(),other.getNumDPPSample());
88 //
89 // for each data point in this object, extract and copy the corresponding data
90 // value from the given DataTagged object
91 int i,j;
92 DataTypes::ValueType::size_type numRows=m_data.getNumRows();
93 DataTypes::ValueType::size_type numCols=m_data.getNumCols();
94 #pragma omp parallel for private(i,j) schedule(static)
95 for (i=0;i<numRows;i++) {
96 for (j=0;j<numCols;j++) {
97 try {
98 DataTypes::copyPoint(getVectorRW(), getPointOffset(i,j), getNoValues(),
99 other.getVectorRO(),
100 other.getPointOffset(i,j));
101 }
102 catch (std::exception& e) {
103 cout << e.what() << endl;
104 }
105 }
106 }
107 }
108
109 DataExpanded::DataExpanded(const DataExpanded& other,
110 const DataTypes::RegionType& region)
111 : parent(other.getFunctionSpace(),DataTypes::getResultSliceShape(region))
112 {
113 //
114 // get the shape of the slice
115 // DataTypes::ShapeType shape(DataTypes::getResultSliceShape(region));
116 //
117 // initialise this Data object to the shape of the slice
118 initialise(other.getNumSamples(),other.getNumDPPSample());
119 //
120 // copy the data
121 DataTypes::RegionLoopRangeType region_loop_range=DataTypes::getSliceRegionLoopRange(region);
122 DataTypes::ValueType::size_type numRows=m_data.getNumRows();
123 DataTypes::ValueType::size_type numCols=m_data.getNumCols();
124 int i,j;
125 #pragma omp parallel for private(i,j) schedule(static)
126 for (i=0;i<numRows;i++) {
127 for (j=0;j<numCols;j++) {
128 try {
129 DataTypes::copySlice(getVectorRW(),getShape(),getPointOffset(i,j),
130 other.getVectorRO(),
131 other.getShape(),
132 other.getPointOffset(i,j),
133 region_loop_range);
134 }
135 catch (std::exception& e) {
136 cout << e.what() << endl;
137 }
138 }
139 }
140 }
141
142 DataExpanded::DataExpanded(const FunctionSpace& what,
143 const DataTypes::ShapeType &shape,
144 const DataTypes::ValueType &data)
145 : parent(what,shape)
146 {
147 EsysAssert(data.size()%getNoValues()==0,
148 "DataExpanded Constructor - size of supplied data is not a multiple of shape size.");
149
150 if (data.size()==getNoValues())
151 {
152 ValueType& vec=m_data.getData();
153 //
154 // create the view of the data
155 initialise(what.getNumSamples(),what.getNumDPPSample());
156 // now we copy this value to all elements
157 for (int i=0;i<getLength();)
158 {
159 for (unsigned int j=0;j<getNoValues();++j,++i)
160 {
161 vec[i]=data[j];
162 }
163 }
164 }
165 else
166 {
167 //
168 // copy the data in the correct format
169 m_data.getData()=data;
170 }
171
172
173 }
174
175 DataExpanded::DataExpanded(const FunctionSpace& what,
176 const DataTypes::ShapeType &shape,
177 const double v)
178 : parent(what,shape)
179 {
180 ValueType& vec=m_data.getData();
181 //
182 // create the view of the data
183 initialise(what.getNumSamples(),what.getNumDPPSample());
184 // now we copy this value to all elements
185 const int L=getLength();
186 int i;
187 #pragma omp parallel for schedule(static) private(i)
188 for (i=0;i<L;++i)
189 {
190 vec[i]=v;
191 }
192 }
193
194
195
196 DataExpanded::~DataExpanded()
197 {
198 }
199
200 DataAbstract*
201 DataExpanded::deepCopy()
202 {
203 return new DataExpanded(*this);
204 }
205
206
207 DataAbstract*
208 DataExpanded::getSlice(const DataTypes::RegionType& region) const
209 {
210 return new DataExpanded(*this,region);
211 }
212
213 void
214 DataExpanded::setSlice(const DataAbstract* value,
215 const DataTypes::RegionType& region)
216 {
217 const DataExpanded* tempDataExp=dynamic_cast<const DataExpanded*>(value);
218 if (tempDataExp==0) {
219 throw DataException("Programming error - casting to DataExpanded.");
220 }
221 CHECK_FOR_EX_WRITE
222 //
223 // get shape of slice
224 DataTypes::ShapeType shape(DataTypes::getResultSliceShape(region));
225 DataTypes::RegionLoopRangeType region_loop_range=DataTypes::getSliceRegionLoopRange(region);
226 //
227 // check shape
228 if (getRank()!=region.size()) {
229 throw DataException("Error - Invalid slice region.");
230 }
231 if (tempDataExp->getRank()>0 && !DataTypes::checkShape(value->getShape(), shape)) {
232 throw DataException (DataTypes::createShapeErrorMessage(
233 "Error - Couldn't copy slice due to shape mismatch.",shape, value->getShape()));
234 }
235 //
236 // copy the data from the slice into this object
237 DataTypes::ValueType::size_type numRows=m_data.getNumRows();
238 DataTypes::ValueType::size_type numCols=m_data.getNumCols();
239 int i, j;
240 ValueType& vec=getVectorRW();
241 const ShapeType& mshape=getShape();
242 const ValueType& tVec=tempDataExp->getVectorRO();
243 const ShapeType& tShape=tempDataExp->getShape();
244 #pragma omp parallel for private(i,j) schedule(static)
245 for (i=0;i<numRows;i++) {
246 for (j=0;j<numCols;j++) {
247 DataTypes::copySliceFrom(vec,mshape,getPointOffset(i,j),
248 tVec,
249 tShape,
250 tempDataExp->getPointOffset(i,j),
251 region_loop_range);
252
253 }
254 }
255 }
256
257 void
258 DataExpanded::copy(const DataConstant& value)
259 {
260 EsysAssert((checkShape(getShape(), value.getShape())),
261 createShapeErrorMessage("Error - Couldn't copy due to shape mismatch.",value.getShape(),getShape()));
262
263 //
264 // copy a single value to every data point in this object
265 int nRows=m_data.getNumRows();
266 int nCols=m_data.getNumCols();
267 int i,j;
268 #pragma omp parallel for private(i,j) schedule(static)
269 for (i=0;i<nRows;i++) {
270 for (j=0;j<nCols;j++) {
271 DataTypes::copyPoint(getVectorRW(), getPointOffset(i,j), getNoValues(), value.getVectorRO(), 0);
272 }
273 }
274 }
275
276 void
277 DataExpanded::copy(const WrappedArray& value)
278 {
279 // check the input shape matches this shape
280 if (!DataTypes::checkShape(getShape(),value.getShape())) {
281 throw DataException(DataTypes::createShapeErrorMessage(
282 "Error - (DataExpanded) Cannot copy due to shape mismatch.",
283 value.getShape(),getShape()));
284 }
285 getVectorRW().copyFromArray(value, getNumDPPSample()*getNumSamples());
286 }
287
288
289 void
290 DataExpanded::initialise(int noSamples,
291 int noDataPointsPerSample)
292 {
293 if (noSamples==0) //retain the default empty object
294 {
295 return;
296 }
297 //
298 // resize data array to the required size
299 m_data.resize(noSamples,noDataPointsPerSample,getNoValues());
300 }
301
302 bool
303 DataExpanded::hasNaN() const
304 {
305 const ValueType& v=m_data.getData();
306 for (ValueType::size_type i=0;i<v.size();++i)
307 {
308 if (nancheck(v[i]))
309 {
310 return true;
311 }
312 }
313 return false;
314 }
315
316
317 string
318 DataExpanded::toString() const
319 {
320 stringstream temp;
321 FunctionSpace fs=getFunctionSpace();
322
323 int offset=0;
324 for (int i=0;i<m_data.getNumRows();i++) {
325 for (int j=0;j<m_data.getNumCols();j++) {
326 offset=getPointOffset(i,j);
327 stringstream suffix;
328 suffix << "( id: " << i << ", ref: " << fs.getReferenceIDOfSample(i) << ", pnt: " << j << ")";
329 temp << DataTypes::pointToString(getVectorRO(),getShape(),offset,suffix.str());
330 if (!(i==(m_data.getNumRows()-1) && j==(m_data.getNumCols()-1))) {
331 temp << endl;
332 }
333 }
334 }
335 string result=temp.str();
336 if (result.empty())
337 {
338 return "(data contains no samples)\n";
339 }
340 return temp.str();
341 }
342
343 DataTypes::ValueType::size_type
344 DataExpanded::getPointOffset(int sampleNo,
345 int dataPointNo) const
346 {
347 return m_data.index(sampleNo,dataPointNo);
348 }
349
350 DataTypes::ValueType::size_type
351 DataExpanded::getPointOffset(int sampleNo,
352 int dataPointNo)
353 {
354 return m_data.index(sampleNo,dataPointNo);
355 }
356
357 DataTypes::ValueType::size_type
358 DataExpanded::getLength() const
359 {
360 return m_data.size();
361 }
362
363
364
365 void
366 DataExpanded::copyToDataPoint(const int sampleNo, const int dataPointNo, const double value) {
367 CHECK_FOR_EX_WRITE
368 //
369 // Get the number of samples and data-points per sample.
370 int numSamples = getNumSamples();
371 int numDataPointsPerSample = getNumDPPSample();
372 int dataPointRank = getRank();
373 ShapeType dataPointShape = getShape();
374 if (numSamples*numDataPointsPerSample > 0) {
375 //TODO: global error handling
376 if ((sampleNo >= numSamples) || (sampleNo < 0 )) {
377 throw DataException("Error - DataExpanded::copyDataPoint invalid sampleNo.");
378 }
379 if ((dataPointNo >= numDataPointsPerSample) || (dataPointNo < 0)) {
380 throw DataException("Error - DataExpanded::copyDataPoint invalid dataPointNoInSample.");
381 }
382 ValueType::size_type offset = getPointOffset(sampleNo, dataPointNo);
383 ValueType& vec=getVectorRW();
384 if (dataPointRank==0) {
385 vec[offset]=value;
386 } else if (dataPointRank==1) {
387 for (int i=0; i<dataPointShape[0]; i++) {
388 vec[offset+i]=value;
389 }
390 } else if (dataPointRank==2) {
391 for (int i=0; i<dataPointShape[0]; i++) {
392 for (int j=0; j<dataPointShape[1]; j++) {
393 vec[offset+getRelIndex(dataPointShape,i,j)]=value;
394 }
395 }
396 } else if (dataPointRank==3) {
397 for (int i=0; i<dataPointShape[0]; i++) {
398 for (int j=0; j<dataPointShape[1]; j++) {
399 for (int k=0; k<dataPointShape[2]; k++) {
400 vec[offset+getRelIndex(dataPointShape,i,j,k)]=value;
401 }
402 }
403 }
404 } else if (dataPointRank==4) {
405 for (int i=0; i<dataPointShape[0]; i++) {
406 for (int j=0; j<dataPointShape[1]; j++) {
407 for (int k=0; k<dataPointShape[2]; k++) {
408 for (int l=0; l<dataPointShape[3]; l++) {
409 vec[offset+getRelIndex(dataPointShape,i,j,k,l)]=value;
410 }
411 }
412 }
413 }
414 }
415 }
416 }
417
418 void
419 DataExpanded::copyToDataPoint(const int sampleNo, const int dataPointNo, const WrappedArray& value) {
420 CHECK_FOR_EX_WRITE
421 //
422 // Get the number of samples and data-points per sample.
423 int numSamples = getNumSamples();
424 int numDataPointsPerSample = getNumDPPSample();
425 //
426 // check rank:
427 if (value.getRank()!=getRank())
428 throw DataException("Rank of value does not match Data object rank");
429 if (numSamples*numDataPointsPerSample > 0) {
430 //TODO: global error handling
431 if ((sampleNo >= numSamples) || (sampleNo < 0 )) {
432 throw DataException("Error - DataExpanded::copyDataPoint invalid sampleNo.");
433 }
434 if ((dataPointNo >= numDataPointsPerSample) || (dataPointNo < 0)) {
435 throw DataException("Error - DataExpanded::copyDataPoint invalid dataPointNoInSample.");
436 }
437 ValueType::size_type offset = getPointOffset(sampleNo, dataPointNo);
438 ValueType& vec=getVectorRW();
439 vec.copyFromArrayToOffset(value,offset,1);
440 }
441 }
442
443 void
444 DataExpanded::symmetric(DataAbstract* ev)
445 {
446 int sampleNo,dataPointNo;
447 int numSamples = getNumSamples();
448 int numDataPointsPerSample = getNumDPPSample();
449 DataExpanded* temp_ev=dynamic_cast<DataExpanded*>(ev);
450 if (temp_ev==0) {
451 throw DataException("Error - DataExpanded::symmetric: casting to DataExpanded failed (probably a programming error).");
452 }
453 const ValueType& vec=getVectorRO();
454 const ShapeType& shape=getShape();
455 ValueType& evVec=temp_ev->getVectorRW();
456 const ShapeType& evShape=temp_ev->getShape();
457 #pragma omp parallel for private(sampleNo,dataPointNo) schedule(static)
458 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
459 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
460 DataMaths::symmetric(vec,shape,getPointOffset(sampleNo,dataPointNo),
461 evVec,evShape,ev->getPointOffset(sampleNo,dataPointNo));
462 }
463 }
464 }
465 void
466 DataExpanded::nonsymmetric(DataAbstract* ev)
467 {
468 int sampleNo,dataPointNo;
469 int numSamples = getNumSamples();
470 int numDataPointsPerSample = getNumDPPSample();
471 DataExpanded* temp_ev=dynamic_cast<DataExpanded*>(ev);
472 if (temp_ev==0) {
473 throw DataException("Error - DataExpanded::nonsymmetric: casting to DataExpanded failed (probably a programming error).");
474 }
475 const ValueType& vec=getVectorRO();
476 const ShapeType& shape=getShape();
477 ValueType& evVec=temp_ev->getVectorRW();
478 const ShapeType& evShape=temp_ev->getShape();
479 #pragma omp parallel for private(sampleNo,dataPointNo) schedule(static)
480 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
481 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
482 DataMaths::nonsymmetric(vec,shape,getPointOffset(sampleNo,dataPointNo),
483 evVec,evShape,ev->getPointOffset(sampleNo,dataPointNo));
484 }
485 }
486 }
487 void
488 DataExpanded::trace(DataAbstract* ev, int axis_offset)
489 {
490 int sampleNo,dataPointNo;
491 int numSamples = getNumSamples();
492 int numDataPointsPerSample = getNumDPPSample();
493 DataExpanded* temp_ev=dynamic_cast<DataExpanded*>(ev);
494 if (temp_ev==0) {
495 throw DataException("Error - DataExpanded::trace: casting to DataExpanded failed (probably a programming error).");
496 }
497 const ValueType& vec=getVectorRO();
498 const ShapeType& shape=getShape();
499 ValueType& evVec=temp_ev->getVectorRW();
500 const ShapeType& evShape=temp_ev->getShape();
501 #pragma omp parallel for private(sampleNo,dataPointNo) schedule(static)
502 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
503 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
504 DataMaths::trace(vec,shape,getPointOffset(sampleNo,dataPointNo),
505 evVec,evShape,ev->getPointOffset(sampleNo,dataPointNo),axis_offset);
506 }
507 }
508 }
509
510 void
511 DataExpanded::transpose(DataAbstract* ev, int axis_offset)
512 {
513 int sampleNo,dataPointNo;
514 int numSamples = getNumSamples();
515 int numDataPointsPerSample = getNumDPPSample();
516 DataExpanded* temp_ev=dynamic_cast<DataExpanded*>(ev);
517 if (temp_ev==0) {
518 throw DataException("Error - DataExpanded::transpose: casting to DataExpanded failed (probably a programming error).");
519 }
520 const ValueType& vec=getVectorRO();
521 const ShapeType& shape=getShape();
522 ValueType& evVec=temp_ev->getVectorRW();
523 const ShapeType& evShape=temp_ev->getShape();
524 #pragma omp parallel for private(sampleNo,dataPointNo) schedule(static)
525 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
526 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
527 DataMaths::transpose(vec,shape,getPointOffset(sampleNo,dataPointNo),
528 evVec,evShape,ev->getPointOffset(sampleNo,dataPointNo),axis_offset);
529 }
530 }
531 }
532
533 void
534 DataExpanded::swapaxes(DataAbstract* ev, int axis0, int axis1)
535 {
536 int sampleNo,dataPointNo;
537 int numSamples = getNumSamples();
538 int numDataPointsPerSample = getNumDPPSample();
539 DataExpanded* temp_ev=dynamic_cast<DataExpanded*>(ev);
540 if (temp_ev==0) {
541 throw DataException("Error - DataExpanded::swapaxes: casting to DataExpanded failed (probably a programming error).");
542 }
543 const ValueType& vec=getVectorRO();
544 const ShapeType& shape=getShape();
545 ValueType& evVec=temp_ev->getVectorRW();
546 const ShapeType& evShape=temp_ev->getShape();
547 #pragma omp parallel for private(sampleNo,dataPointNo) schedule(static)
548 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
549 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
550 DataMaths::swapaxes(vec,shape,getPointOffset(sampleNo,dataPointNo),
551 evVec,evShape,ev->getPointOffset(sampleNo,dataPointNo),axis0,axis1);
552 }
553 }
554 }
555 void
556 DataExpanded::eigenvalues(DataAbstract* ev)
557 {
558 int sampleNo,dataPointNo;
559 int numSamples = getNumSamples();
560 int numDataPointsPerSample = getNumDPPSample();
561 DataExpanded* temp_ev=dynamic_cast<DataExpanded*>(ev);
562 if (temp_ev==0) {
563 throw DataException("Error - DataExpanded::eigenvalues: casting to DataExpanded failed (probably a programming error).");
564 }
565 const ValueType& vec=getVectorRO();
566 const ShapeType& shape=getShape();
567 ValueType& evVec=temp_ev->getVectorRW();
568 const ShapeType& evShape=temp_ev->getShape();
569 #pragma omp parallel for private(sampleNo,dataPointNo) schedule(static)
570 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
571 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
572 DataMaths::eigenvalues(vec,shape,getPointOffset(sampleNo,dataPointNo),
573 evVec,evShape,ev->getPointOffset(sampleNo,dataPointNo));
574 }
575 }
576 }
577 void
578 DataExpanded::eigenvalues_and_eigenvectors(DataAbstract* ev,DataAbstract* V,const double tol)
579 {
580 int numSamples = getNumSamples();
581 int numDataPointsPerSample = getNumDPPSample();
582 int sampleNo,dataPointNo;
583 DataExpanded* temp_ev=dynamic_cast<DataExpanded*>(ev);
584 if (temp_ev==0) {
585 throw DataException("Error - DataExpanded::eigenvalues_and_eigenvectors: casting to DataExpanded failed (probably a programming error).");
586 }
587 DataExpanded* temp_V=dynamic_cast<DataExpanded*>(V);
588 if (temp_V==0) {
589 throw DataException("Error - DataExpanded::eigenvalues_and_eigenvectors: casting to DataExpanded failed (probably a programming error).");
590 }
591 const ValueType& vec=getVectorRO();
592 const ShapeType& shape=getShape();
593 ValueType& evVec=temp_ev->getVectorRW();
594 const ShapeType& evShape=temp_ev->getShape();
595 ValueType& VVec=temp_V->getVectorRW();
596 const ShapeType& VShape=temp_V->getShape();
597 #pragma omp parallel for private(sampleNo,dataPointNo) schedule(static)
598 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
599 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
600 DataMaths::eigenvalues_and_eigenvectors(vec,shape,getPointOffset(sampleNo,dataPointNo),
601 evVec,evShape,ev->getPointOffset(sampleNo,dataPointNo),
602 VVec, VShape,V->getPointOffset(sampleNo,dataPointNo),tol);
603 }
604 }
605 }
606
607
608 int
609 DataExpanded::matrixInverse(DataAbstract* out) const
610 {
611 DataExpanded* temp=dynamic_cast<DataExpanded*>(out);
612 if (temp==0)
613 {
614 throw DataException("Error - DataExpanded::matrixInverse: casting to DataExpanded failed (probably a programming error).");
615 }
616
617 if (getRank()!=2)
618 {
619 throw DataException("Error - DataExpanded::matrixInverse: input must be rank 2.");
620
621 }
622 int sampleNo;
623 const int numdpps=getNumDPPSample();
624 const int numSamples = getNumSamples();
625 const ValueType& vec=m_data.getData();
626 int errcode=0;
627 #pragma omp parallel private(sampleNo)
628 {
629 int errorcode=0;
630 LapackInverseHelper h(getShape()[0]);
631 #pragma omp for schedule(static)
632 for (sampleNo = 0; sampleNo < numSamples; sampleNo++)
633 {
634 // not sure I like all those virtual calls to getPointOffset
635 DataTypes::ValueType::size_type offset=getPointOffset(sampleNo,0);
636 int res=DataMaths::matrix_inverse(vec, getShape(), offset, temp->getVectorRW(), temp->getShape(), offset, numdpps, h);
637 if (res>errorcode)
638 {
639 errorcode=res;
640 #pragma omp critical
641 {
642 errcode=errorcode; // I'm not especially concerned which error gets reported as long as one is
643 }
644 }
645 }
646 }
647 return errcode;
648 if (errcode)
649 {
650 DataMaths::matrixInverseError(errcode); // throws exceptions
651 }
652 }
653
654 void
655 DataExpanded::setToZero(){
656 // TODO: Surely there is a more efficient way to do this????
657 // Why is there no memset here? Parallel issues?
658 // A: This ensures that memory is touched by the correct thread.
659 CHECK_FOR_EX_WRITE
660 int numSamples = getNumSamples();
661 int numDataPointsPerSample = getNumDPPSample();
662 DataTypes::ValueType::size_type n = getNoValues();
663 double* p;
664 int sampleNo,dataPointNo, i;
665 #pragma omp parallel for private(sampleNo,dataPointNo,p,i) schedule(static)
666 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
667 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
668 p=&(m_data[getPointOffset(sampleNo,dataPointNo)]);
669 for (i=0; i<n ;++i) p[i]=0.;
670 }
671 }
672 }
673
674 /* Append MPI rank to file name if multiple MPI processes */
675 char *Escript_MPI_appendRankToFileName(const char *fileName, int mpi_size, int mpi_rank) {
676 /* Make plenty of room for the mpi_rank number and terminating '\0' */
677 char *newFileName = (char *)malloc(strlen(fileName)+20);
678 strncpy(newFileName, fileName, strlen(fileName)+1);
679 if (mpi_size>1) sprintf(newFileName+strlen(newFileName), ".%04d", mpi_rank);
680 return(newFileName);
681 }
682
683 void
684 DataExpanded::dump(const std::string fileName) const
685 {
686 #ifdef USE_NETCDF
687 const int ldims=2+DataTypes::maxRank;
688 const NcDim* ncdims[ldims];
689 NcVar *var, *ids;
690 int rank = getRank();
691 int type= getFunctionSpace().getTypeCode();
692 int ndims =0;
693 long dims[ldims];
694 const double* d_ptr=&(m_data[0]);
695 const DataTypes::ShapeType& shape = getShape();
696 int mpi_iam=getFunctionSpace().getDomain()->getMPIRank();
697 int mpi_num=getFunctionSpace().getDomain()->getMPISize();
698 #ifdef ESYS_MPI
699 MPI_Status status;
700 #endif
701
702 #ifdef ESYS_MPI
703 /* Serialize NetCDF I/O */
704 if (mpi_iam>0) MPI_Recv(&ndims, 0, MPI_INT, mpi_iam-1, 81801, MPI_COMM_WORLD, &status);
705 #endif
706
707 // netCDF error handler
708 NcError err(NcError::verbose_nonfatal);
709 // Create the file.
710 char *newFileName = Escript_MPI_appendRankToFileName(fileName.c_str(), mpi_num, mpi_iam);
711 NcFile dataFile(newFileName, NcFile::Replace);
712 // check if writing was successful
713 if (!dataFile.is_valid())
714 throw DataException("Error - DataExpanded:: opening of netCDF file for output failed.");
715 if (!dataFile.add_att("type_id",2) )
716 throw DataException("Error - DataExpanded:: appending data type to netCDF file failed.");
717 if (!dataFile.add_att("rank",rank) )
718 throw DataException("Error - DataExpanded:: appending rank attribute to netCDF file failed.");
719 if (!dataFile.add_att("function_space_type",type))
720 throw DataException("Error - DataExpanded:: appending function space attribute to netCDF file failed.");
721 ndims=rank+2;
722 if ( rank >0 ) {
723 dims[0]=shape[0];
724 if (! (ncdims[0] = dataFile.add_dim("d0",shape[0])) )
725 throw DataException("Error - DataExpanded:: appending ncdimension 0 to netCDF file failed.");
726 }
727 if ( rank >1 ) {
728 dims[1]=shape[1];
729 if (! (ncdims[1] = dataFile.add_dim("d1",shape[1])) )
730 throw DataException("Error - DataExpanded:: appending ncdimension 1 to netCDF file failed.");
731 }
732 if ( rank >2 ) {
733 dims[2]=shape[2];
734 if (! (ncdims[2] = dataFile.add_dim("d2", shape[2])) )
735 throw DataException("Error - DataExpanded:: appending ncdimension 2 to netCDF file failed.");
736 }
737 if ( rank >3 ) {
738 dims[3]=shape[3];
739 if (! (ncdims[3] = dataFile.add_dim("d3", shape[3])) )
740 throw DataException("Error - DataExpanded:: appending ncdimension 3 to netCDF file failed.");
741 }
742 dims[rank]=getFunctionSpace().getNumDataPointsPerSample();
743 if (! (ncdims[rank] = dataFile.add_dim("num_data_points_per_sample", dims[rank])) )
744 throw DataException("Error - DataExpanded:: appending num_data_points_per_sample to netCDF file failed.");
745 dims[rank+1]=getFunctionSpace().getNumSamples();
746 if (! (ncdims[rank+1] = dataFile.add_dim("num_samples", dims[rank+1])) )
747 throw DataException("Error - DataExpanded:: appending num_sample to netCDF file failed.");
748
749 if (getFunctionSpace().getNumSamples()>0)
750 {
751
752 if (! ( ids = dataFile.add_var("id", ncInt, ncdims[rank+1])) )
753 throw DataException("Error - DataExpanded:: appending reference id to netCDF file failed.");
754 const int* ids_p=getFunctionSpace().borrowSampleReferenceIDs();
755 if (! (ids->put(ids_p,dims[rank+1])) )
756 throw DataException("Error - DataExpanded:: copy reference id to netCDF buffer failed.");
757 if (! ( var = dataFile.add_var("data", ncDouble, ndims, ncdims)) )
758 throw DataException("Error - DataExpanded:: appending variable to netCDF file failed.");
759 if (! (var->put(d_ptr,dims)) )
760 throw DataException("Error - DataExpanded:: copy data to netCDF buffer failed.");
761 }
762 #ifdef ESYS_MPI
763 if (mpi_iam<mpi_num-1) MPI_Send(&ndims, 0, MPI_INT, mpi_iam+1, 81801, MPI_COMM_WORLD);
764 #endif
765 #else
766 throw DataException("Error - DataExpanded:: dump is not configured with netCDF. Please contact your installation manager.");
767 #endif
768 }
769
770 void
771 DataExpanded::setTaggedValue(int tagKey,
772 const DataTypes::ShapeType& pointshape,
773 const DataTypes::ValueType& value,
774 int dataOffset)
775 {
776 CHECK_FOR_EX_WRITE
777 int numSamples = getNumSamples();
778 int numDataPointsPerSample = getNumDPPSample();
779 int sampleNo,dataPointNo, i;
780 DataTypes::ValueType::size_type n = getNoValues();
781 double* p;
782 const double* in=&value[0+dataOffset];
783
784 if (value.size() != n) {
785 throw DataException("Error - DataExpanded::setTaggedValue: number of input values does not match number of values per data points.");
786 }
787
788 #pragma omp parallel for private(sampleNo,dataPointNo,p,i) schedule(static)
789 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
790 if (getFunctionSpace().getTagFromSampleNo(sampleNo) == tagKey ) {
791 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
792 p=&(m_data[getPointOffset(sampleNo,dataPointNo)]);
793 for (i=0; i<n ;++i) p[i]=in[i];
794 }
795 }
796 }
797 }
798
799
800 void
801 DataExpanded::reorderByReferenceIDs(int *reference_ids)
802 {
803 CHECK_FOR_EX_WRITE
804 int numSamples = getNumSamples();
805 DataTypes::ValueType::size_type n = getNoValues() * getNumDPPSample();
806 int sampleNo, sampleNo2,i;
807 double* p,*p2;
808 register double rtmp;
809 FunctionSpace fs=getFunctionSpace();
810
811 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
812 const int id_in=reference_ids[sampleNo];
813 const int id=fs.getReferenceIDOfSample(sampleNo);
814 if (id!=id_in) {
815 bool matched=false;
816 for (sampleNo2 = sampleNo+1; sampleNo2 < numSamples; sampleNo2++) {
817 if (id == reference_ids[sampleNo2]) {
818 p=&(m_data[getPointOffset(sampleNo,0)]);
819 p2=&(m_data[getPointOffset(sampleNo2,0)]);
820 for (i=0; i<n ;i++) {
821 rtmp=p[i];
822 p[i]=p2[i];
823 p2[i]=rtmp;
824 }
825 reference_ids[sampleNo]=id;
826 reference_ids[sampleNo2]=id_in;
827 matched=true;
828 break;
829 }
830 }
831 if (! matched) {
832 throw DataException("Error - DataExpanded::reorderByReferenceIDs: unable to reorder sample data by reference ids");
833 }
834 }
835 }
836 }
837
838 DataTypes::ValueType&
839 DataExpanded::getVectorRW()
840 {
841 CHECK_FOR_EX_WRITE
842 return m_data.getData();
843 }
844
845 const DataTypes::ValueType&
846 DataExpanded::getVectorRO() const
847 {
848 return m_data.getData();
849 }
850
851
852 #ifndef MKLRANDOM
853 namespace {
854
855 boost::mt19937 base; // used to seed all the other generators
856 vector<boost::mt19937> gens;
857
858
859 void seedGens(long seed)
860 {
861 #ifdef _OPENMP
862 int numthreads=omp_get_max_threads();
863 #else
864 int numthreads=1;
865 #endif
866 if (gens.size()==0) // we haven't instantiated the generators yet
867 {
868 gens.resize(numthreads); // yes this means all the generators will be owned by one thread in a NUMA sense
869 } // I don't think it is worth a more complicated solution at this point
870 if (seed!=0)
871 {
872 base.seed((uint32_t)seed); // without this cast, icc gets confused
873 for (int i=0;i<numthreads;++i)
874 {
875 uint32_t b=base();
876 gens[i].seed(b); // initialise each generator with successive random values
877 }
878 }
879 }
880
881
882 }
883 #endif
884
885 // Idea here is to create an array of seeds by feeding the original seed into the random generator
886 // The code at the beginning of the function to compute the seed if one is given is
887 // just supposed to introduce some variety (and ensure that multiple ranks don't get the same seed).
888 // I make no claim about how well these initial seeds are distributed
889 void DataExpanded::randomFill(long seed)
890 {
891 CHECK_FOR_EX_WRITE
892 static unsigned prevseed=0; // So if we create a bunch of objects we don't get the same start seed
893 if (seed==0) // for each one
894 {
895 if (prevseed==0)
896 {
897 time_t s=time(0);
898 seed=s;
899 }
900 else
901 {
902 seed=prevseed+419; // these numbers are arbitrary
903 if (seed>3040101) // I want to avoid overflow on 32bit systems
904 {
905 seed=((int)(seed)%0xABCD)+1;
906 }
907 }
908 }
909 // now we need to consider MPI since we don't want each rank to start with the same seed
910 seed+=getFunctionSpace().getDomain()->getMPIRank()*getFunctionSpace().getDomain()->getMPISize()*3;
911 prevseed=seed;
912
913 #ifdef MKLRANDOM
914
915 #ifdef _OPENMP
916 int numthreads=omp_get_max_threads();
917 #else
918 int numthreads=1;
919 #endif
920 double* seeds=new double[numthreads];
921 VSLStreamStatePtr sstream;
922
923 int status=vslNewStream(&sstream, VSL_BRNG_MT19937, seed); // use a Mersenne Twister
924 numeric_limits<double> dlim;
925 vdRngUniform(VSL_METHOD_DUNIFORM_STD, sstream , numthreads, seeds, -1, 1);
926 vslDeleteStream(&sstream);
927 DataVector& dv=getVectorRW();
928 size_t dvsize=dv.size();
929 #pragma omp parallel
930 {
931 int tnum=0;
932 #ifdef _OPENMP
933 tnum=omp_get_thread_num();
934 #endif
935 VSLStreamStatePtr stream;
936 // the 12345 is a hack to give us a better chance of getting different integer seeds.
937 int status=vslNewStream(&stream, VSL_BRNG_MT19937, seeds[tnum]*12345); // use a Mersenne Twister
938 int bigchunk=(dvsize/numthreads+1);
939 int smallchunk=dvsize-bigchunk*(numthreads-1);
940 int chunksize=(tnum<(numthreads-1))?bigchunk:smallchunk;
941 vdRngUniform(VSL_METHOD_DUNIFORM_STD, stream, chunksize, &(dv[bigchunk*tnum]), 0,1);
942 vslDeleteStream(&stream);
943 }
944 delete[] seeds;
945 #else
946 boost::mt19937::result_type RMAX=base.max();
947 seedGens(seed);
948 DataVector& dv=getVectorRW();
949 long i;
950 const size_t dvsize=dv.size();
951
952 #pragma omp parallel private(i)
953 {
954 int tnum=0;
955 #ifdef _OPENMP
956 tnum=omp_get_thread_num();
957 #endif
958 boost::mt19937& generator=gens[tnum];
959
960 #pragma omp for schedule(static)
961 for (i=0;i<dvsize;++i)
962 {
963
964
965
966
967
968 #ifdef _WIN32
969 dv[i]=((double)generator())/RMAX;
970 #else
971 dv[i]=((double)generator())/RMAX;
972 #endif
973 }
974 }
975 #endif
976 }
977
978 } // end of namespace

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26