/[escript]/trunk/escript/src/DataExpanded.cpp
ViewVC logotype

Contents of /trunk/escript/src/DataExpanded.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3675 - (show annotations)
Thu Nov 17 00:53:38 2011 UTC (7 years, 11 months ago) by jfenwick
File size: 32778 byte(s)
pasowrap joins the trunk.

1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2010 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 #include "Data.h"
16 #include "DataExpanded.h"
17 #include "DataException.h"
18 #include "DataConstant.h"
19 #include "DataTagged.h"
20 #include <limits>
21
22 #include "esysUtils/Esys_MPI.h"
23
24 #ifdef USE_NETCDF
25 #include <netcdfcpp.h>
26 #endif
27
28 #include <boost/python/extract.hpp>
29 #include "DataMaths.h"
30
31 //#define MKLRANDOM
32
33 #ifdef MKLRANDOM
34 #include <mkl_vsl.h>
35 #else
36 #include <boost/random/mersenne_twister.hpp>
37 #endif
38
39 using namespace std;
40 using namespace boost::python;
41 using namespace boost;
42 using namespace escript::DataTypes;
43
44
45 // #define CHECK_FOR_EX_WRITE if (!checkNoSharing()) {throw DataException("Attempt to modify shared object");}
46
47 #define CHECK_FOR_EX_WRITE if (!checkNoSharing()) {std::ostringstream ss; ss << " Attempt to modify shared object. line " << __LINE__ << " of " << __FILE__; *((int*)0)=17;throw DataException(ss.str());}
48
49 namespace escript {
50
51 DataExpanded::DataExpanded(const WrappedArray& value,
52 const FunctionSpace& what)
53 : parent(what,value.getShape())
54 {
55 //
56 // initialise the data array for this object
57 initialise(what.getNumSamples(),what.getNumDPPSample());
58 //
59 // copy the given value to every data point
60 copy(value);
61 }
62
63 DataExpanded::DataExpanded(const DataExpanded& other)
64 : parent(other.getFunctionSpace(), other.getShape()),
65 m_data(other.m_data)
66 {
67 }
68
69 DataExpanded::DataExpanded(const DataConstant& other)
70 : parent(other.getFunctionSpace(), other.getShape())
71 {
72 //
73 // initialise the data array for this object
74 initialise(other.getNumSamples(),other.getNumDPPSample());
75 //
76 // DataConstant only has one value, copy this to every data point
77 copy(other);
78 }
79
80 DataExpanded::DataExpanded(const DataTagged& other)
81 : parent(other.getFunctionSpace(), other.getShape())
82 {
83 //
84 // initialise the data array for this object
85 initialise(other.getNumSamples(),other.getNumDPPSample());
86 //
87 // for each data point in this object, extract and copy the corresponding data
88 // value from the given DataTagged object
89 int i,j;
90 DataTypes::ValueType::size_type numRows=m_data.getNumRows();
91 DataTypes::ValueType::size_type numCols=m_data.getNumCols();
92 #pragma omp parallel for private(i,j) schedule(static)
93 for (i=0;i<numRows;i++) {
94 for (j=0;j<numCols;j++) {
95 try {
96 DataTypes::copyPoint(getVectorRW(), getPointOffset(i,j), getNoValues(),
97 other.getVectorRO(),
98 other.getPointOffset(i,j));
99 }
100 catch (std::exception& e) {
101 cout << e.what() << endl;
102 }
103 }
104 }
105 }
106
107 DataExpanded::DataExpanded(const DataExpanded& other,
108 const DataTypes::RegionType& region)
109 : parent(other.getFunctionSpace(),DataTypes::getResultSliceShape(region))
110 {
111 //
112 // get the shape of the slice
113 // DataTypes::ShapeType shape(DataTypes::getResultSliceShape(region));
114 //
115 // initialise this Data object to the shape of the slice
116 initialise(other.getNumSamples(),other.getNumDPPSample());
117 //
118 // copy the data
119 DataTypes::RegionLoopRangeType region_loop_range=DataTypes::getSliceRegionLoopRange(region);
120 DataTypes::ValueType::size_type numRows=m_data.getNumRows();
121 DataTypes::ValueType::size_type numCols=m_data.getNumCols();
122 int i,j;
123 #pragma omp parallel for private(i,j) schedule(static)
124 for (i=0;i<numRows;i++) {
125 for (j=0;j<numCols;j++) {
126 try {
127 DataTypes::copySlice(getVectorRW(),getShape(),getPointOffset(i,j),
128 other.getVectorRO(),
129 other.getShape(),
130 other.getPointOffset(i,j),
131 region_loop_range);
132 }
133 catch (std::exception& e) {
134 cout << e.what() << endl;
135 }
136 }
137 }
138 }
139
140 DataExpanded::DataExpanded(const FunctionSpace& what,
141 const DataTypes::ShapeType &shape,
142 const DataTypes::ValueType &data)
143 : parent(what,shape)
144 {
145 EsysAssert(data.size()%getNoValues()==0,
146 "DataExpanded Constructor - size of supplied data is not a multiple of shape size.");
147
148 if (data.size()==getNoValues())
149 {
150 ValueType& vec=m_data.getData();
151 //
152 // create the view of the data
153 initialise(what.getNumSamples(),what.getNumDPPSample());
154 // now we copy this value to all elements
155 for (int i=0;i<getLength();)
156 {
157 for (unsigned int j=0;j<getNoValues();++j,++i)
158 {
159 vec[i]=data[j];
160 }
161 }
162 }
163 else
164 {
165 //
166 // copy the data in the correct format
167 m_data.getData()=data;
168 }
169
170
171 }
172
173 DataExpanded::DataExpanded(const FunctionSpace& what,
174 const DataTypes::ShapeType &shape,
175 const double v)
176 : parent(what,shape)
177 {
178 ValueType& vec=m_data.getData();
179 //
180 // create the view of the data
181 initialise(what.getNumSamples(),what.getNumDPPSample());
182 // now we copy this value to all elements
183 const int L=getLength();
184 int i;
185 #pragma omp parallel for schedule(static) private(i)
186 for (i=0;i<L;++i)
187 {
188 vec[i]=v;
189 }
190 }
191
192
193
194 DataExpanded::~DataExpanded()
195 {
196 }
197
198 DataAbstract*
199 DataExpanded::deepCopy()
200 {
201 return new DataExpanded(*this);
202 }
203
204
205 DataAbstract*
206 DataExpanded::getSlice(const DataTypes::RegionType& region) const
207 {
208 return new DataExpanded(*this,region);
209 }
210
211 void
212 DataExpanded::setSlice(const DataAbstract* value,
213 const DataTypes::RegionType& region)
214 {
215 const DataExpanded* tempDataExp=dynamic_cast<const DataExpanded*>(value);
216 if (tempDataExp==0) {
217 throw DataException("Programming error - casting to DataExpanded.");
218 }
219 CHECK_FOR_EX_WRITE
220 //
221 // get shape of slice
222 DataTypes::ShapeType shape(DataTypes::getResultSliceShape(region));
223 DataTypes::RegionLoopRangeType region_loop_range=DataTypes::getSliceRegionLoopRange(region);
224 //
225 // check shape
226 if (getRank()!=region.size()) {
227 throw DataException("Error - Invalid slice region.");
228 }
229 if (tempDataExp->getRank()>0 && !DataTypes::checkShape(value->getShape(), shape)) {
230 throw DataException (DataTypes::createShapeErrorMessage(
231 "Error - Couldn't copy slice due to shape mismatch.",shape, value->getShape()));
232 }
233 //
234 // copy the data from the slice into this object
235 DataTypes::ValueType::size_type numRows=m_data.getNumRows();
236 DataTypes::ValueType::size_type numCols=m_data.getNumCols();
237 int i, j;
238 ValueType& vec=getVectorRW();
239 const ShapeType& mshape=getShape();
240 const ValueType& tVec=tempDataExp->getVectorRO();
241 const ShapeType& tShape=tempDataExp->getShape();
242 #pragma omp parallel for private(i,j) schedule(static)
243 for (i=0;i<numRows;i++) {
244 for (j=0;j<numCols;j++) {
245 DataTypes::copySliceFrom(vec,mshape,getPointOffset(i,j),
246 tVec,
247 tShape,
248 tempDataExp->getPointOffset(i,j),
249 region_loop_range);
250
251 }
252 }
253 }
254
255 void
256 DataExpanded::copy(const DataConstant& value)
257 {
258 EsysAssert((checkShape(getShape(), value.getShape())),
259 createShapeErrorMessage("Error - Couldn't copy due to shape mismatch.",value.getShape(),getShape()));
260
261 //
262 // copy a single value to every data point in this object
263 int nRows=m_data.getNumRows();
264 int nCols=m_data.getNumCols();
265 int i,j;
266 #pragma omp parallel for private(i,j) schedule(static)
267 for (i=0;i<nRows;i++) {
268 for (j=0;j<nCols;j++) {
269 DataTypes::copyPoint(getVectorRW(), getPointOffset(i,j), getNoValues(), value.getVectorRO(), 0);
270 }
271 }
272 }
273
274 void
275 DataExpanded::copy(const WrappedArray& value)
276 {
277 // check the input shape matches this shape
278 if (!DataTypes::checkShape(getShape(),value.getShape())) {
279 throw DataException(DataTypes::createShapeErrorMessage(
280 "Error - (DataExpanded) Cannot copy due to shape mismatch.",
281 value.getShape(),getShape()));
282 }
283 getVectorRW().copyFromArray(value, getNumDPPSample()*getNumSamples());
284 }
285
286
287 void
288 DataExpanded::initialise(int noSamples,
289 int noDataPointsPerSample)
290 {
291 if (noSamples==0) //retain the default empty object
292 {
293 return;
294 }
295 //
296 // resize data array to the required size
297 m_data.resize(noSamples,noDataPointsPerSample,getNoValues());
298 }
299
300 bool
301 DataExpanded::hasNaN() const
302 {
303 const ValueType& v=m_data.getData();
304 for (ValueType::size_type i=0;i<v.size();++i)
305 {
306 if (nancheck(v[i]))
307 {
308 return true;
309 }
310 }
311 return false;
312 }
313
314
315 string
316 DataExpanded::toString() const
317 {
318 stringstream temp;
319 FunctionSpace fs=getFunctionSpace();
320
321 int offset=0;
322 for (int i=0;i<m_data.getNumRows();i++) {
323 for (int j=0;j<m_data.getNumCols();j++) {
324 offset=getPointOffset(i,j);
325 stringstream suffix;
326 suffix << "( id: " << i << ", ref: " << fs.getReferenceIDOfSample(i) << ", pnt: " << j << ")";
327 temp << DataTypes::pointToString(getVectorRO(),getShape(),offset,suffix.str());
328 if (!(i==(m_data.getNumRows()-1) && j==(m_data.getNumCols()-1))) {
329 temp << endl;
330 }
331 }
332 }
333 string result=temp.str();
334 if (result.empty())
335 {
336 return "(data contains no samples)\n";
337 }
338 return temp.str();
339 }
340
341 DataTypes::ValueType::size_type
342 DataExpanded::getPointOffset(int sampleNo,
343 int dataPointNo) const
344 {
345 return m_data.index(sampleNo,dataPointNo);
346 }
347
348 DataTypes::ValueType::size_type
349 DataExpanded::getPointOffset(int sampleNo,
350 int dataPointNo)
351 {
352 return m_data.index(sampleNo,dataPointNo);
353 }
354
355 DataTypes::ValueType::size_type
356 DataExpanded::getLength() const
357 {
358 return m_data.size();
359 }
360
361
362
363 void
364 DataExpanded::copyToDataPoint(const int sampleNo, const int dataPointNo, const double value) {
365 CHECK_FOR_EX_WRITE
366 //
367 // Get the number of samples and data-points per sample.
368 int numSamples = getNumSamples();
369 int numDataPointsPerSample = getNumDPPSample();
370 int dataPointRank = getRank();
371 ShapeType dataPointShape = getShape();
372 if (numSamples*numDataPointsPerSample > 0) {
373 //TODO: global error handling
374 if ((sampleNo >= numSamples) || (sampleNo < 0 )) {
375
376 ostringstream oss;
377 oss << "sampleNo=" << sampleNo << " numSamples=" << numSamples << endl;
378 cerr << oss.str();
379
380
381 throw DataException("Error - DataExpanded::copyToDataPoint[scalar] invalid sampleNo.");
382 }
383 if ((dataPointNo >= numDataPointsPerSample) || (dataPointNo < 0)) {
384 throw DataException("Error - DataExpanded::copyToDataPoint[scalar] invalid dataPointNoInSample.");
385 }
386 ValueType::size_type offset = getPointOffset(sampleNo, dataPointNo);
387 ValueType& vec=getVectorRW();
388 if (dataPointRank==0) {
389 vec[offset]=value;
390 } else if (dataPointRank==1) {
391 for (int i=0; i<dataPointShape[0]; i++) {
392 vec[offset+i]=value;
393 }
394 } else if (dataPointRank==2) {
395 for (int i=0; i<dataPointShape[0]; i++) {
396 for (int j=0; j<dataPointShape[1]; j++) {
397 vec[offset+getRelIndex(dataPointShape,i,j)]=value;
398 }
399 }
400 } else if (dataPointRank==3) {
401 for (int i=0; i<dataPointShape[0]; i++) {
402 for (int j=0; j<dataPointShape[1]; j++) {
403 for (int k=0; k<dataPointShape[2]; k++) {
404 vec[offset+getRelIndex(dataPointShape,i,j,k)]=value;
405 }
406 }
407 }
408 } else if (dataPointRank==4) {
409 for (int i=0; i<dataPointShape[0]; i++) {
410 for (int j=0; j<dataPointShape[1]; j++) {
411 for (int k=0; k<dataPointShape[2]; k++) {
412 for (int l=0; l<dataPointShape[3]; l++) {
413 vec[offset+getRelIndex(dataPointShape,i,j,k,l)]=value;
414 }
415 }
416 }
417 }
418 }
419 }
420 }
421
422 void
423 DataExpanded::copyToDataPoint(const int sampleNo, const int dataPointNo, const WrappedArray& value) {
424 CHECK_FOR_EX_WRITE
425 //
426 // Get the number of samples and data-points per sample.
427 int numSamples = getNumSamples();
428 int numDataPointsPerSample = getNumDPPSample();
429 //
430 // check rank:
431 if (value.getRank()!=getRank())
432 throw DataException("Rank of value does not match Data object rank");
433 if (numSamples*numDataPointsPerSample > 0) {
434 //TODO: global error handling
435 if ((sampleNo >= numSamples) || (sampleNo < 0 )) {
436
437 // ostringstream oss;
438 // oss << "sampleNo=" << sampleNo << " numSamples=" << numSamples << endl;
439 // throw DataException(oss.str());
440
441 throw DataException("Error - DataExpanded::copyToDataPoint[wrapped] invalid sampleNo.");
442 }
443 if ((dataPointNo >= numDataPointsPerSample) || (dataPointNo < 0)) {
444 throw DataException("Error - DataExpanded::copyToDataPoint[wrapped] invalid dataPointNoInSample.");
445 }
446 ValueType::size_type offset = getPointOffset(sampleNo, dataPointNo);
447 ValueType& vec=getVectorRW();
448 vec.copyFromArrayToOffset(value,offset,1);
449 }
450 }
451
452 void
453 DataExpanded::symmetric(DataAbstract* ev)
454 {
455 int sampleNo,dataPointNo;
456 int numSamples = getNumSamples();
457 int numDataPointsPerSample = getNumDPPSample();
458 DataExpanded* temp_ev=dynamic_cast<DataExpanded*>(ev);
459 if (temp_ev==0) {
460 throw DataException("Error - DataExpanded::symmetric: casting to DataExpanded failed (propably a programming error).");
461 }
462 const ValueType& vec=getVectorRO();
463 const ShapeType& shape=getShape();
464 ValueType& evVec=temp_ev->getVectorRW();
465 const ShapeType& evShape=temp_ev->getShape();
466 #pragma omp parallel for private(sampleNo,dataPointNo) schedule(static)
467 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
468 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
469 DataMaths::symmetric(vec,shape,getPointOffset(sampleNo,dataPointNo),
470 evVec,evShape,ev->getPointOffset(sampleNo,dataPointNo));
471 }
472 }
473 }
474 void
475 DataExpanded::nonsymmetric(DataAbstract* ev)
476 {
477 int sampleNo,dataPointNo;
478 int numSamples = getNumSamples();
479 int numDataPointsPerSample = getNumDPPSample();
480 DataExpanded* temp_ev=dynamic_cast<DataExpanded*>(ev);
481 if (temp_ev==0) {
482 throw DataException("Error - DataExpanded::nonsymmetric: casting to DataExpanded failed (propably a programming error).");
483 }
484 const ValueType& vec=getVectorRO();
485 const ShapeType& shape=getShape();
486 ValueType& evVec=temp_ev->getVectorRW();
487 const ShapeType& evShape=temp_ev->getShape();
488 #pragma omp parallel for private(sampleNo,dataPointNo) schedule(static)
489 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
490 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
491 DataMaths::nonsymmetric(vec,shape,getPointOffset(sampleNo,dataPointNo),
492 evVec,evShape,ev->getPointOffset(sampleNo,dataPointNo));
493 }
494 }
495 }
496 void
497 DataExpanded::trace(DataAbstract* ev, int axis_offset)
498 {
499 int sampleNo,dataPointNo;
500 int numSamples = getNumSamples();
501 int numDataPointsPerSample = getNumDPPSample();
502 DataExpanded* temp_ev=dynamic_cast<DataExpanded*>(ev);
503 if (temp_ev==0) {
504 throw DataException("Error - DataExpanded::trace: casting to DataExpanded failed (propably a programming error).");
505 }
506 const ValueType& vec=getVectorRO();
507 const ShapeType& shape=getShape();
508 ValueType& evVec=temp_ev->getVectorRW();
509 const ShapeType& evShape=temp_ev->getShape();
510 #pragma omp parallel for private(sampleNo,dataPointNo) schedule(static)
511 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
512 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
513 DataMaths::trace(vec,shape,getPointOffset(sampleNo,dataPointNo),
514 evVec,evShape,ev->getPointOffset(sampleNo,dataPointNo),axis_offset);
515 }
516 }
517 }
518
519 void
520 DataExpanded::transpose(DataAbstract* ev, int axis_offset)
521 {
522 int sampleNo,dataPointNo;
523 int numSamples = getNumSamples();
524 int numDataPointsPerSample = getNumDPPSample();
525 DataExpanded* temp_ev=dynamic_cast<DataExpanded*>(ev);
526 if (temp_ev==0) {
527 throw DataException("Error - DataExpanded::transpose: casting to DataExpanded failed (propably a programming error).");
528 }
529 const ValueType& vec=getVectorRO();
530 const ShapeType& shape=getShape();
531 ValueType& evVec=temp_ev->getVectorRW();
532 const ShapeType& evShape=temp_ev->getShape();
533 #pragma omp parallel for private(sampleNo,dataPointNo) schedule(static)
534 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
535 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
536 DataMaths::transpose(vec,shape,getPointOffset(sampleNo,dataPointNo),
537 evVec,evShape,ev->getPointOffset(sampleNo,dataPointNo),axis_offset);
538 }
539 }
540 }
541
542 void
543 DataExpanded::swapaxes(DataAbstract* ev, int axis0, int axis1)
544 {
545 int sampleNo,dataPointNo;
546 int numSamples = getNumSamples();
547 int numDataPointsPerSample = getNumDPPSample();
548 DataExpanded* temp_ev=dynamic_cast<DataExpanded*>(ev);
549 if (temp_ev==0) {
550 throw DataException("Error - DataExpanded::swapaxes: casting to DataExpanded failed (propably a programming error).");
551 }
552 const ValueType& vec=getVectorRO();
553 const ShapeType& shape=getShape();
554 ValueType& evVec=temp_ev->getVectorRW();
555 const ShapeType& evShape=temp_ev->getShape();
556 #pragma omp parallel for private(sampleNo,dataPointNo) schedule(static)
557 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
558 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
559 DataMaths::swapaxes(vec,shape,getPointOffset(sampleNo,dataPointNo),
560 evVec,evShape,ev->getPointOffset(sampleNo,dataPointNo),axis0,axis1);
561 }
562 }
563 }
564 void
565 DataExpanded::eigenvalues(DataAbstract* ev)
566 {
567 int sampleNo,dataPointNo;
568 int numSamples = getNumSamples();
569 int numDataPointsPerSample = getNumDPPSample();
570 DataExpanded* temp_ev=dynamic_cast<DataExpanded*>(ev);
571 if (temp_ev==0) {
572 throw DataException("Error - DataExpanded::eigenvalues: casting to DataExpanded failed (propably a programming error).");
573 }
574 const ValueType& vec=getVectorRO();
575 const ShapeType& shape=getShape();
576 ValueType& evVec=temp_ev->getVectorRW();
577 const ShapeType& evShape=temp_ev->getShape();
578 #pragma omp parallel for private(sampleNo,dataPointNo) schedule(static)
579 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
580 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
581 DataMaths::eigenvalues(vec,shape,getPointOffset(sampleNo,dataPointNo),
582 evVec,evShape,ev->getPointOffset(sampleNo,dataPointNo));
583 }
584 }
585 }
586 void
587 DataExpanded::eigenvalues_and_eigenvectors(DataAbstract* ev,DataAbstract* V,const double tol)
588 {
589 int numSamples = getNumSamples();
590 int numDataPointsPerSample = getNumDPPSample();
591 int sampleNo,dataPointNo;
592 DataExpanded* temp_ev=dynamic_cast<DataExpanded*>(ev);
593 if (temp_ev==0) {
594 throw DataException("Error - DataExpanded::eigenvalues_and_eigenvectors: casting to DataExpanded failed (propably a programming error).");
595 }
596 DataExpanded* temp_V=dynamic_cast<DataExpanded*>(V);
597 if (temp_V==0) {
598 throw DataException("Error - DataExpanded::eigenvalues_and_eigenvectors: casting to DataExpanded failed (propably a programming error).");
599 }
600 const ValueType& vec=getVectorRO();
601 const ShapeType& shape=getShape();
602 ValueType& evVec=temp_ev->getVectorRW();
603 const ShapeType& evShape=temp_ev->getShape();
604 ValueType& VVec=temp_V->getVectorRW();
605 const ShapeType& VShape=temp_V->getShape();
606 #pragma omp parallel for private(sampleNo,dataPointNo) schedule(static)
607 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
608 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
609 DataMaths::eigenvalues_and_eigenvectors(vec,shape,getPointOffset(sampleNo,dataPointNo),
610 evVec,evShape,ev->getPointOffset(sampleNo,dataPointNo),
611 VVec, VShape,V->getPointOffset(sampleNo,dataPointNo),tol);
612 }
613 }
614 }
615
616
617 int
618 DataExpanded::matrixInverse(DataAbstract* out) const
619 {
620 DataExpanded* temp=dynamic_cast<DataExpanded*>(out);
621 if (temp==0)
622 {
623 throw DataException("Error - DataExpanded::matrixInverse: casting to DataExpanded failed (propably a programming error).");
624 }
625
626 if (getRank()!=2)
627 {
628 throw DataException("Error - DataExpanded::matrixInverse: input must be rank 2.");
629
630 }
631 int sampleNo;
632 const int numdpps=getNumDPPSample();
633 const int numSamples = getNumSamples();
634 const ValueType& vec=m_data.getData();
635 int errcode=0;
636 #pragma omp parallel private(sampleNo)
637 {
638 int errorcode=0;
639 LapackInverseHelper h(getShape()[0]);
640 #pragma omp for schedule(static)
641 for (sampleNo = 0; sampleNo < numSamples; sampleNo++)
642 {
643 // not sure I like all those virtual calls to getPointOffset
644 DataTypes::ValueType::size_type offset=getPointOffset(sampleNo,0);
645 int res=DataMaths::matrix_inverse(vec, getShape(), offset, temp->getVectorRW(), temp->getShape(), offset, numdpps, h);
646 if (res>errorcode)
647 {
648 errorcode=res;
649 #pragma omp critical
650 {
651 errcode=errorcode; // I'm not especially concerned which error gets reported as long as one is
652 }
653 }
654 }
655 }
656 return errcode;
657 if (errcode)
658 {
659 DataMaths::matrixInverseError(errcode); // throws exceptions
660 }
661 }
662
663 void
664 DataExpanded::setToZero(){
665 // TODO: Surely there is a more efficient way to do this????
666 // Why is there no memset here? Parallel issues?
667 // A: This ensures that memory is touched by the correct thread.
668 CHECK_FOR_EX_WRITE
669 int numSamples = getNumSamples();
670 int numDataPointsPerSample = getNumDPPSample();
671 DataTypes::ValueType::size_type n = getNoValues();
672 double* p;
673 int sampleNo,dataPointNo, i;
674 #pragma omp parallel for private(sampleNo,dataPointNo,p,i) schedule(static)
675 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
676 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
677 p=&(m_data[getPointOffset(sampleNo,dataPointNo)]);
678 for (i=0; i<n ;++i) p[i]=0.;
679 }
680 }
681 }
682
683 /* Append MPI rank to file name if multiple MPI processes */
684 char *Escript_MPI_appendRankToFileName(const char *fileName, int mpi_size, int mpi_rank) {
685 /* Make plenty of room for the mpi_rank number and terminating '\0' */
686 char *newFileName = (char *)malloc(strlen(fileName)+20);
687 strncpy(newFileName, fileName, strlen(fileName)+1);
688 if (mpi_size>1) sprintf(newFileName+strlen(newFileName), ".%04d", mpi_rank);
689 return(newFileName);
690 }
691
692 void
693 DataExpanded::dump(const std::string fileName) const
694 {
695 #ifdef USE_NETCDF
696 const int ldims=2+DataTypes::maxRank;
697 const NcDim* ncdims[ldims];
698 NcVar *var, *ids;
699 int rank = getRank();
700 int type= getFunctionSpace().getTypeCode();
701 int ndims =0;
702 long dims[ldims];
703 const double* d_ptr=&(m_data[0]);
704 const DataTypes::ShapeType& shape = getShape();
705 int mpi_iam=getFunctionSpace().getDomain()->getMPIRank();
706 int mpi_num=getFunctionSpace().getDomain()->getMPISize();
707 #ifdef ESYS_MPI
708 MPI_Status status;
709 #endif
710
711 #ifdef ESYS_MPI
712 /* Serialize NetCDF I/O */
713 if (mpi_iam>0) MPI_Recv(&ndims, 0, MPI_INT, mpi_iam-1, 81801, MPI_COMM_WORLD, &status);
714 #endif
715
716 // netCDF error handler
717 NcError err(NcError::verbose_nonfatal);
718 // Create the file.
719 char *newFileName = Escript_MPI_appendRankToFileName(fileName.c_str(), mpi_num, mpi_iam);
720 NcFile dataFile(newFileName, NcFile::Replace);
721 // check if writing was successful
722 if (!dataFile.is_valid())
723 throw DataException("Error - DataExpanded:: opening of netCDF file for output failed.");
724 if (!dataFile.add_att("type_id",2) )
725 throw DataException("Error - DataExpanded:: appending data type to netCDF file failed.");
726 if (!dataFile.add_att("rank",rank) )
727 throw DataException("Error - DataExpanded:: appending rank attribute to netCDF file failed.");
728 if (!dataFile.add_att("function_space_type",type))
729 throw DataException("Error - DataExpanded:: appending function space attribute to netCDF file failed.");
730 ndims=rank+2;
731 if ( rank >0 ) {
732 dims[0]=shape[0];
733 if (! (ncdims[0] = dataFile.add_dim("d0",shape[0])) )
734 throw DataException("Error - DataExpanded:: appending ncdimension 0 to netCDF file failed.");
735 }
736 if ( rank >1 ) {
737 dims[1]=shape[1];
738 if (! (ncdims[1] = dataFile.add_dim("d1",shape[1])) )
739 throw DataException("Error - DataExpanded:: appending ncdimension 1 to netCDF file failed.");
740 }
741 if ( rank >2 ) {
742 dims[2]=shape[2];
743 if (! (ncdims[2] = dataFile.add_dim("d2", shape[2])) )
744 throw DataException("Error - DataExpanded:: appending ncdimension 2 to netCDF file failed.");
745 }
746 if ( rank >3 ) {
747 dims[3]=shape[3];
748 if (! (ncdims[3] = dataFile.add_dim("d3", shape[3])) )
749 throw DataException("Error - DataExpanded:: appending ncdimension 3 to netCDF file failed.");
750 }
751 dims[rank]=getFunctionSpace().getNumDataPointsPerSample();
752 if (! (ncdims[rank] = dataFile.add_dim("num_data_points_per_sample", dims[rank])) )
753 throw DataException("Error - DataExpanded:: appending num_data_points_per_sample to netCDF file failed.");
754 dims[rank+1]=getFunctionSpace().getNumSamples();
755 if (! (ncdims[rank+1] = dataFile.add_dim("num_samples", dims[rank+1])) )
756 throw DataException("Error - DataExpanded:: appending num_sample to netCDF file failed.");
757
758 if (getFunctionSpace().getNumSamples()>0)
759 {
760
761 if (! ( ids = dataFile.add_var("id", ncInt, ncdims[rank+1])) )
762 throw DataException("Error - DataExpanded:: appending reference id to netCDF file failed.");
763 const int* ids_p=getFunctionSpace().borrowSampleReferenceIDs();
764 if (! (ids->put(ids_p,dims[rank+1])) )
765 throw DataException("Error - DataExpanded:: copy reference id to netCDF buffer failed.");
766 if (! ( var = dataFile.add_var("data", ncDouble, ndims, ncdims)) )
767 throw DataException("Error - DataExpanded:: appending variable to netCDF file failed.");
768 if (! (var->put(d_ptr,dims)) )
769 throw DataException("Error - DataExpanded:: copy data to netCDF buffer failed.");
770 }
771 #ifdef ESYS_MPI
772 if (mpi_iam<mpi_num-1) MPI_Send(&ndims, 0, MPI_INT, mpi_iam+1, 81801, MPI_COMM_WORLD);
773 #endif
774 #else
775 throw DataException("Error - DataExpanded:: dump is not configured with netCDF. Please contact your installation manager.");
776 #endif
777 }
778
779 void
780 DataExpanded::setTaggedValue(int tagKey,
781 const DataTypes::ShapeType& pointshape,
782 const DataTypes::ValueType& value,
783 int dataOffset)
784 {
785 CHECK_FOR_EX_WRITE
786 int numSamples = getNumSamples();
787 int numDataPointsPerSample = getNumDPPSample();
788 int sampleNo,dataPointNo, i;
789 DataTypes::ValueType::size_type n = getNoValues();
790 double* p;
791 const double* in=&value[0+dataOffset];
792
793 if (value.size() != n) {
794 throw DataException("Error - DataExpanded::setTaggedValue: number of input values does not match number of values per data points.");
795 }
796
797 #pragma omp parallel for private(sampleNo,dataPointNo,p,i) schedule(static)
798 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
799 if (getFunctionSpace().getTagFromSampleNo(sampleNo) == tagKey ) {
800 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
801 p=&(m_data[getPointOffset(sampleNo,dataPointNo)]);
802 for (i=0; i<n ;++i) p[i]=in[i];
803 }
804 }
805 }
806 }
807
808
809 void
810 DataExpanded::reorderByReferenceIDs(int *reference_ids)
811 {
812 CHECK_FOR_EX_WRITE
813 int numSamples = getNumSamples();
814 DataTypes::ValueType::size_type n = getNoValues() * getNumDPPSample();
815 int sampleNo, sampleNo2,i;
816 double* p,*p2;
817 register double rtmp;
818 FunctionSpace fs=getFunctionSpace();
819
820 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
821 const int id_in=reference_ids[sampleNo];
822 const int id=fs.getReferenceIDOfSample(sampleNo);
823 if (id!=id_in) {
824 bool matched=false;
825 for (sampleNo2 = sampleNo+1; sampleNo2 < numSamples; sampleNo2++) {
826 if (id == reference_ids[sampleNo2]) {
827 p=&(m_data[getPointOffset(sampleNo,0)]);
828 p2=&(m_data[getPointOffset(sampleNo2,0)]);
829 for (i=0; i<n ;i++) {
830 rtmp=p[i];
831 p[i]=p2[i];
832 p2[i]=rtmp;
833 }
834 reference_ids[sampleNo]=id;
835 reference_ids[sampleNo2]=id_in;
836 matched=true;
837 break;
838 }
839 }
840 if (! matched) {
841 throw DataException("Error - DataExpanded::reorderByReferenceIDs: unable to reorder sample data by reference ids");
842 }
843 }
844 }
845 }
846
847 DataTypes::ValueType&
848 DataExpanded::getVectorRW()
849 {
850 CHECK_FOR_EX_WRITE
851 return m_data.getData();
852 }
853
854 const DataTypes::ValueType&
855 DataExpanded::getVectorRO() const
856 {
857 return m_data.getData();
858 }
859
860
861 #ifndef MKLRANDOM
862 namespace {
863
864 boost::mt19937 base; // used to seed all the other generators
865 vector<boost::mt19937> gens;
866
867
868 void seedGens(long seed)
869 {
870 #ifdef _OPENMP
871 int numthreads=omp_get_max_threads();
872 #else
873 int numthreads=1;
874 #endif
875 if (gens.size()==0) // we haven't instantiated the generators yet
876 {
877 gens.resize(numthreads); // yes this means all the generators will be owned by one thread in a NUMA sense
878 } // I don't think it is worth a more complicated solution at this point
879 if (seed!=0)
880 {
881 base.seed((uint32_t)seed); // without this cast, icc gets confused
882 for (int i=0;i<numthreads;++i)
883 {
884 uint32_t b=base();
885 gens[i].seed(b); // initialise each generator with successive random values
886 }
887 }
888 }
889
890
891 }
892 #endif
893
894 // Idea here is to create an array of seeds by feeding the original seed into the random generator
895 // The code at the beginning of the function to compute the seed if one is given is
896 // just supposed to introduce some variety (and ensure that multiple ranks don't get the same seed).
897 // I make no claim about how well these initial seeds are distributed
898 void DataExpanded::randomFill(long seed)
899 {
900 CHECK_FOR_EX_WRITE
901 static unsigned prevseed=0; // So if we create a bunch of objects we don't get the same start seed
902 if (seed==0) // for each one
903 {
904 if (prevseed==0)
905 {
906 time_t s=time(0);
907 seed=s;
908 }
909 else
910 {
911 seed=prevseed+419; // these numbers are arbitrary
912 if (seed>3040101) // I want to avoid overflow on 32bit systems
913 {
914 seed=((int)(seed)%0xABCD)+1;
915 }
916 }
917 }
918 // now we need to consider MPI since we don't want each rank to start with the same seed
919 seed+=getFunctionSpace().getDomain()->getMPIRank()*getFunctionSpace().getDomain()->getMPISize()*3;
920 prevseed=seed;
921
922 #ifdef MKLRANDOM
923
924 #ifdef _OPENMP
925 int numthreads=omp_get_max_threads();
926 #else
927 int numthreads=1;
928 #endif
929 double* seeds=new double[numthreads];
930 VSLStreamStatePtr sstream;
931
932 int status=vslNewStream(&sstream, VSL_BRNG_MT19937, seed); // use a Mersenne Twister
933 numeric_limits<double> dlim;
934 vdRngUniform(VSL_METHOD_DUNIFORM_STD, sstream , numthreads, seeds, -1, 1);
935 vslDeleteStream(&sstream);
936 DataVector& dv=getVectorRW();
937 size_t dvsize=dv.size();
938 #pragma omp parallel
939 {
940 int tnum=0;
941 #ifdef _OPENMP
942 tnum=omp_get_thread_num();
943 #endif
944 VSLStreamStatePtr stream;
945 // the 12345 is a hack to give us a better chance of getting different integer seeds.
946 int status=vslNewStream(&stream, VSL_BRNG_MT19937, seeds[tnum]*12345); // use a Mersenne Twister
947 int bigchunk=(dvsize/numthreads+1);
948 int smallchunk=dvsize-bigchunk*(numthreads-1);
949 int chunksize=(tnum<(numthreads-1))?bigchunk:smallchunk;
950 vdRngUniform(VSL_METHOD_DUNIFORM_STD, stream, chunksize, &(dv[bigchunk*tnum]), 0,1);
951 vslDeleteStream(&stream);
952 }
953 delete[] seeds;
954 #else
955 boost::mt19937::result_type RMAX=base.max();
956 seedGens(seed);
957 DataVector& dv=getVectorRW();
958 long i;
959 const size_t dvsize=dv.size();
960
961 #pragma omp parallel private(i)
962 {
963 int tnum=0;
964 #ifdef _OPENMP
965 tnum=omp_get_thread_num();
966 #endif
967 boost::mt19937& generator=gens[tnum];
968
969 #pragma omp for schedule(static)
970 for (i=0;i<dvsize;++i)
971 {
972
973
974
975
976
977 #ifdef _WIN32
978 dv[i]=((double)generator())/RMAX;
979 #else
980 dv[i]=((double)generator())/RMAX;
981 #endif
982 }
983 }
984 #endif
985 }
986
987 } // end of namespace

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26