/[escript]/trunk/escript/src/DataExpanded.cpp
ViewVC logotype

Contents of /trunk/escript/src/DataExpanded.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3550 - (show annotations)
Fri Aug 19 01:51:35 2011 UTC (8 years, 1 month ago) by jfenwick
File size: 32387 byte(s)
Switching from rand() to boost::random for non-VSL randoms

1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2010 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 #include "Data.h"
16 #include "DataExpanded.h"
17 #include "DataException.h"
18 #include "DataConstant.h"
19 #include "DataTagged.h"
20 #include <limits>
21
22 #include "esysUtils/Esys_MPI.h"
23
24 #ifdef USE_NETCDF
25 #include <netcdfcpp.h>
26 #endif
27
28 #include <boost/python/extract.hpp>
29 #include "DataMaths.h"
30
31 //#define MKLRANDOM
32
33 #ifdef MKLRANDOM
34 #include <mkl_vsl.h>
35 #else
36 #include <boost/random/mersenne_twister.hpp>
37 #endif
38
39 using namespace std;
40 using namespace boost::python;
41 using namespace boost;
42 using namespace escript::DataTypes;
43
44
45 // #define CHECK_FOR_EX_WRITE if (!checkNoSharing()) {throw DataException("Attempt to modify shared object");}
46
47 #define CHECK_FOR_EX_WRITE if (!checkNoSharing()) {std::ostringstream ss; ss << " Attempt to modify shared object. line " << __LINE__ << " of " << __FILE__; *((int*)0)=17;throw DataException(ss.str());}
48
49 namespace escript {
50
51 DataExpanded::DataExpanded(const WrappedArray& value,
52 const FunctionSpace& what)
53 : parent(what,value.getShape())
54 {
55 //
56 // initialise the data array for this object
57 initialise(what.getNumSamples(),what.getNumDPPSample());
58 //
59 // copy the given value to every data point
60 copy(value);
61 }
62
63 DataExpanded::DataExpanded(const DataExpanded& other)
64 : parent(other.getFunctionSpace(), other.getShape()),
65 m_data(other.m_data)
66 {
67 }
68
69 DataExpanded::DataExpanded(const DataConstant& other)
70 : parent(other.getFunctionSpace(), other.getShape())
71 {
72 //
73 // initialise the data array for this object
74 initialise(other.getNumSamples(),other.getNumDPPSample());
75 //
76 // DataConstant only has one value, copy this to every data point
77 copy(other);
78 }
79
80 DataExpanded::DataExpanded(const DataTagged& other)
81 : parent(other.getFunctionSpace(), other.getShape())
82 {
83 //
84 // initialise the data array for this object
85 initialise(other.getNumSamples(),other.getNumDPPSample());
86 //
87 // for each data point in this object, extract and copy the corresponding data
88 // value from the given DataTagged object
89 int i,j;
90 DataTypes::ValueType::size_type numRows=m_data.getNumRows();
91 DataTypes::ValueType::size_type numCols=m_data.getNumCols();
92 #pragma omp parallel for private(i,j) schedule(static)
93 for (i=0;i<numRows;i++) {
94 for (j=0;j<numCols;j++) {
95 try {
96 DataTypes::copyPoint(getVectorRW(), getPointOffset(i,j), getNoValues(),
97 other.getVectorRO(),
98 other.getPointOffset(i,j));
99 }
100 catch (std::exception& e) {
101 cout << e.what() << endl;
102 }
103 }
104 }
105 }
106
107 DataExpanded::DataExpanded(const DataExpanded& other,
108 const DataTypes::RegionType& region)
109 : parent(other.getFunctionSpace(),DataTypes::getResultSliceShape(region))
110 {
111 //
112 // get the shape of the slice
113 // DataTypes::ShapeType shape(DataTypes::getResultSliceShape(region));
114 //
115 // initialise this Data object to the shape of the slice
116 initialise(other.getNumSamples(),other.getNumDPPSample());
117 //
118 // copy the data
119 DataTypes::RegionLoopRangeType region_loop_range=DataTypes::getSliceRegionLoopRange(region);
120 DataTypes::ValueType::size_type numRows=m_data.getNumRows();
121 DataTypes::ValueType::size_type numCols=m_data.getNumCols();
122 int i,j;
123 #pragma omp parallel for private(i,j) schedule(static)
124 for (i=0;i<numRows;i++) {
125 for (j=0;j<numCols;j++) {
126 try {
127 DataTypes::copySlice(getVectorRW(),getShape(),getPointOffset(i,j),
128 other.getVectorRO(),
129 other.getShape(),
130 other.getPointOffset(i,j),
131 region_loop_range);
132 }
133 catch (std::exception& e) {
134 cout << e.what() << endl;
135 }
136 }
137 }
138 }
139
140 DataExpanded::DataExpanded(const FunctionSpace& what,
141 const DataTypes::ShapeType &shape,
142 const DataTypes::ValueType &data)
143 : parent(what,shape)
144 {
145 EsysAssert(data.size()%getNoValues()==0,
146 "DataExpanded Constructor - size of supplied data is not a multiple of shape size.");
147
148 if (data.size()==getNoValues())
149 {
150 ValueType& vec=m_data.getData();
151 //
152 // create the view of the data
153 initialise(what.getNumSamples(),what.getNumDPPSample());
154 // now we copy this value to all elements
155 for (int i=0;i<getLength();)
156 {
157 for (unsigned int j=0;j<getNoValues();++j,++i)
158 {
159 vec[i]=data[j];
160 }
161 }
162 }
163 else
164 {
165 //
166 // copy the data in the correct format
167 m_data.getData()=data;
168 }
169
170
171 }
172
173 DataExpanded::DataExpanded(const FunctionSpace& what,
174 const DataTypes::ShapeType &shape,
175 const double v)
176 : parent(what,shape)
177 {
178 ValueType& vec=m_data.getData();
179 //
180 // create the view of the data
181 initialise(what.getNumSamples(),what.getNumDPPSample());
182 // now we copy this value to all elements
183 const int L=getLength();
184 int i;
185 #pragma omp parallel for schedule(static) private(i)
186 for (i=0;i<L;++i)
187 {
188 vec[i]=v;
189 }
190 }
191
192
193
194 DataExpanded::~DataExpanded()
195 {
196 }
197
198 DataAbstract*
199 DataExpanded::deepCopy()
200 {
201 return new DataExpanded(*this);
202 }
203
204
205 DataAbstract*
206 DataExpanded::getSlice(const DataTypes::RegionType& region) const
207 {
208 return new DataExpanded(*this,region);
209 }
210
211 void
212 DataExpanded::setSlice(const DataAbstract* value,
213 const DataTypes::RegionType& region)
214 {
215 const DataExpanded* tempDataExp=dynamic_cast<const DataExpanded*>(value);
216 if (tempDataExp==0) {
217 throw DataException("Programming error - casting to DataExpanded.");
218 }
219 CHECK_FOR_EX_WRITE
220 //
221 // get shape of slice
222 DataTypes::ShapeType shape(DataTypes::getResultSliceShape(region));
223 DataTypes::RegionLoopRangeType region_loop_range=DataTypes::getSliceRegionLoopRange(region);
224 //
225 // check shape
226 if (getRank()!=region.size()) {
227 throw DataException("Error - Invalid slice region.");
228 }
229 if (tempDataExp->getRank()>0 && !DataTypes::checkShape(value->getShape(), shape)) {
230 throw DataException (DataTypes::createShapeErrorMessage(
231 "Error - Couldn't copy slice due to shape mismatch.",shape, value->getShape()));
232 }
233 //
234 // copy the data from the slice into this object
235 DataTypes::ValueType::size_type numRows=m_data.getNumRows();
236 DataTypes::ValueType::size_type numCols=m_data.getNumCols();
237 int i, j;
238 ValueType& vec=getVectorRW();
239 const ShapeType& mshape=getShape();
240 const ValueType& tVec=tempDataExp->getVectorRO();
241 const ShapeType& tShape=tempDataExp->getShape();
242 #pragma omp parallel for private(i,j) schedule(static)
243 for (i=0;i<numRows;i++) {
244 for (j=0;j<numCols;j++) {
245 DataTypes::copySliceFrom(vec,mshape,getPointOffset(i,j),
246 tVec,
247 tShape,
248 tempDataExp->getPointOffset(i,j),
249 region_loop_range);
250
251 }
252 }
253 }
254
255 void
256 DataExpanded::copy(const DataConstant& value)
257 {
258 EsysAssert((checkShape(getShape(), value.getShape())),
259 createShapeErrorMessage("Error - Couldn't copy due to shape mismatch.",value.getShape(),getShape()));
260
261 //
262 // copy a single value to every data point in this object
263 int nRows=m_data.getNumRows();
264 int nCols=m_data.getNumCols();
265 int i,j;
266 #pragma omp parallel for private(i,j) schedule(static)
267 for (i=0;i<nRows;i++) {
268 for (j=0;j<nCols;j++) {
269 DataTypes::copyPoint(getVectorRW(), getPointOffset(i,j), getNoValues(), value.getVectorRO(), 0);
270 }
271 }
272 }
273
274 void
275 DataExpanded::copy(const WrappedArray& value)
276 {
277 // check the input shape matches this shape
278 if (!DataTypes::checkShape(getShape(),value.getShape())) {
279 throw DataException(DataTypes::createShapeErrorMessage(
280 "Error - (DataExpanded) Cannot copy due to shape mismatch.",
281 value.getShape(),getShape()));
282 }
283 getVectorRW().copyFromArray(value, getNumDPPSample()*getNumSamples());
284 }
285
286
287 void
288 DataExpanded::initialise(int noSamples,
289 int noDataPointsPerSample)
290 {
291 if (noSamples==0) //retain the default empty object
292 {
293 return;
294 }
295 //
296 // resize data array to the required size
297 m_data.resize(noSamples,noDataPointsPerSample,getNoValues());
298 }
299
300 bool
301 DataExpanded::hasNaN() const
302 {
303 const ValueType& v=m_data.getData();
304 for (ValueType::size_type i=0;i<v.size();++i)
305 {
306 if (nancheck(v[i]))
307 {
308 return true;
309 }
310 }
311 return false;
312 }
313
314
315 string
316 DataExpanded::toString() const
317 {
318 stringstream temp;
319 FunctionSpace fs=getFunctionSpace();
320
321 int offset=0;
322 for (int i=0;i<m_data.getNumRows();i++) {
323 for (int j=0;j<m_data.getNumCols();j++) {
324 offset=getPointOffset(i,j);
325 stringstream suffix;
326 suffix << "( id: " << i << ", ref: " << fs.getReferenceIDOfSample(i) << ", pnt: " << j << ")";
327 temp << DataTypes::pointToString(getVectorRO(),getShape(),offset,suffix.str());
328 if (!(i==(m_data.getNumRows()-1) && j==(m_data.getNumCols()-1))) {
329 temp << endl;
330 }
331 }
332 }
333 string result=temp.str();
334 if (result.empty())
335 {
336 return "(data contains no samples)\n";
337 }
338 return temp.str();
339 }
340
341 DataTypes::ValueType::size_type
342 DataExpanded::getPointOffset(int sampleNo,
343 int dataPointNo) const
344 {
345 return m_data.index(sampleNo,dataPointNo);
346 }
347
348 DataTypes::ValueType::size_type
349 DataExpanded::getPointOffset(int sampleNo,
350 int dataPointNo)
351 {
352 return m_data.index(sampleNo,dataPointNo);
353 }
354
355 DataTypes::ValueType::size_type
356 DataExpanded::getLength() const
357 {
358 return m_data.size();
359 }
360
361
362
363 void
364 DataExpanded::copyToDataPoint(const int sampleNo, const int dataPointNo, const double value) {
365 CHECK_FOR_EX_WRITE
366 //
367 // Get the number of samples and data-points per sample.
368 int numSamples = getNumSamples();
369 int numDataPointsPerSample = getNumDPPSample();
370 int dataPointRank = getRank();
371 ShapeType dataPointShape = getShape();
372 if (numSamples*numDataPointsPerSample > 0) {
373 //TODO: global error handling
374 if ((sampleNo >= numSamples) || (sampleNo < 0 )) {
375 throw DataException("Error - DataExpanded::copyDataPoint invalid sampleNo.");
376 }
377 if ((dataPointNo >= numDataPointsPerSample) || (dataPointNo < 0)) {
378 throw DataException("Error - DataExpanded::copyDataPoint invalid dataPointNoInSample.");
379 }
380 ValueType::size_type offset = getPointOffset(sampleNo, dataPointNo);
381 ValueType& vec=getVectorRW();
382 if (dataPointRank==0) {
383 vec[offset]=value;
384 } else if (dataPointRank==1) {
385 for (int i=0; i<dataPointShape[0]; i++) {
386 vec[offset+i]=value;
387 }
388 } else if (dataPointRank==2) {
389 for (int i=0; i<dataPointShape[0]; i++) {
390 for (int j=0; j<dataPointShape[1]; j++) {
391 vec[offset+getRelIndex(dataPointShape,i,j)]=value;
392 }
393 }
394 } else if (dataPointRank==3) {
395 for (int i=0; i<dataPointShape[0]; i++) {
396 for (int j=0; j<dataPointShape[1]; j++) {
397 for (int k=0; k<dataPointShape[2]; k++) {
398 vec[offset+getRelIndex(dataPointShape,i,j,k)]=value;
399 }
400 }
401 }
402 } else if (dataPointRank==4) {
403 for (int i=0; i<dataPointShape[0]; i++) {
404 for (int j=0; j<dataPointShape[1]; j++) {
405 for (int k=0; k<dataPointShape[2]; k++) {
406 for (int l=0; l<dataPointShape[3]; l++) {
407 vec[offset+getRelIndex(dataPointShape,i,j,k,l)]=value;
408 }
409 }
410 }
411 }
412 }
413 }
414 }
415
416 void
417 DataExpanded::copyToDataPoint(const int sampleNo, const int dataPointNo, const WrappedArray& value) {
418 CHECK_FOR_EX_WRITE
419 //
420 // Get the number of samples and data-points per sample.
421 int numSamples = getNumSamples();
422 int numDataPointsPerSample = getNumDPPSample();
423 //
424 // check rank:
425 if (value.getRank()!=getRank())
426 throw DataException("Rank of value does not match Data object rank");
427 if (numSamples*numDataPointsPerSample > 0) {
428 //TODO: global error handling
429 if ((sampleNo >= numSamples) || (sampleNo < 0 )) {
430 throw DataException("Error - DataExpanded::copyDataPoint invalid sampleNo.");
431 }
432 if ((dataPointNo >= numDataPointsPerSample) || (dataPointNo < 0)) {
433 throw DataException("Error - DataExpanded::copyDataPoint invalid dataPointNoInSample.");
434 }
435 ValueType::size_type offset = getPointOffset(sampleNo, dataPointNo);
436 ValueType& vec=getVectorRW();
437 vec.copyFromArrayToOffset(value,offset,1);
438 }
439 }
440
441 void
442 DataExpanded::symmetric(DataAbstract* ev)
443 {
444 int sampleNo,dataPointNo;
445 int numSamples = getNumSamples();
446 int numDataPointsPerSample = getNumDPPSample();
447 DataExpanded* temp_ev=dynamic_cast<DataExpanded*>(ev);
448 if (temp_ev==0) {
449 throw DataException("Error - DataExpanded::symmetric: casting to DataExpanded failed (propably a programming error).");
450 }
451 const ValueType& vec=getVectorRO();
452 const ShapeType& shape=getShape();
453 ValueType& evVec=temp_ev->getVectorRW();
454 const ShapeType& evShape=temp_ev->getShape();
455 #pragma omp parallel for private(sampleNo,dataPointNo) schedule(static)
456 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
457 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
458 DataMaths::symmetric(vec,shape,getPointOffset(sampleNo,dataPointNo),
459 evVec,evShape,ev->getPointOffset(sampleNo,dataPointNo));
460 }
461 }
462 }
463 void
464 DataExpanded::nonsymmetric(DataAbstract* ev)
465 {
466 int sampleNo,dataPointNo;
467 int numSamples = getNumSamples();
468 int numDataPointsPerSample = getNumDPPSample();
469 DataExpanded* temp_ev=dynamic_cast<DataExpanded*>(ev);
470 if (temp_ev==0) {
471 throw DataException("Error - DataExpanded::nonsymmetric: casting to DataExpanded failed (propably a programming error).");
472 }
473 const ValueType& vec=getVectorRO();
474 const ShapeType& shape=getShape();
475 ValueType& evVec=temp_ev->getVectorRW();
476 const ShapeType& evShape=temp_ev->getShape();
477 #pragma omp parallel for private(sampleNo,dataPointNo) schedule(static)
478 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
479 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
480 DataMaths::nonsymmetric(vec,shape,getPointOffset(sampleNo,dataPointNo),
481 evVec,evShape,ev->getPointOffset(sampleNo,dataPointNo));
482 }
483 }
484 }
485 void
486 DataExpanded::trace(DataAbstract* ev, int axis_offset)
487 {
488 int sampleNo,dataPointNo;
489 int numSamples = getNumSamples();
490 int numDataPointsPerSample = getNumDPPSample();
491 DataExpanded* temp_ev=dynamic_cast<DataExpanded*>(ev);
492 if (temp_ev==0) {
493 throw DataException("Error - DataExpanded::trace: casting to DataExpanded failed (propably a programming error).");
494 }
495 const ValueType& vec=getVectorRO();
496 const ShapeType& shape=getShape();
497 ValueType& evVec=temp_ev->getVectorRW();
498 const ShapeType& evShape=temp_ev->getShape();
499 #pragma omp parallel for private(sampleNo,dataPointNo) schedule(static)
500 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
501 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
502 DataMaths::trace(vec,shape,getPointOffset(sampleNo,dataPointNo),
503 evVec,evShape,ev->getPointOffset(sampleNo,dataPointNo),axis_offset);
504 }
505 }
506 }
507
508 void
509 DataExpanded::transpose(DataAbstract* ev, int axis_offset)
510 {
511 int sampleNo,dataPointNo;
512 int numSamples = getNumSamples();
513 int numDataPointsPerSample = getNumDPPSample();
514 DataExpanded* temp_ev=dynamic_cast<DataExpanded*>(ev);
515 if (temp_ev==0) {
516 throw DataException("Error - DataExpanded::transpose: casting to DataExpanded failed (propably a programming error).");
517 }
518 const ValueType& vec=getVectorRO();
519 const ShapeType& shape=getShape();
520 ValueType& evVec=temp_ev->getVectorRW();
521 const ShapeType& evShape=temp_ev->getShape();
522 #pragma omp parallel for private(sampleNo,dataPointNo) schedule(static)
523 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
524 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
525 DataMaths::transpose(vec,shape,getPointOffset(sampleNo,dataPointNo),
526 evVec,evShape,ev->getPointOffset(sampleNo,dataPointNo),axis_offset);
527 }
528 }
529 }
530
531 void
532 DataExpanded::swapaxes(DataAbstract* ev, int axis0, int axis1)
533 {
534 int sampleNo,dataPointNo;
535 int numSamples = getNumSamples();
536 int numDataPointsPerSample = getNumDPPSample();
537 DataExpanded* temp_ev=dynamic_cast<DataExpanded*>(ev);
538 if (temp_ev==0) {
539 throw DataException("Error - DataExpanded::swapaxes: casting to DataExpanded failed (propably a programming error).");
540 }
541 const ValueType& vec=getVectorRO();
542 const ShapeType& shape=getShape();
543 ValueType& evVec=temp_ev->getVectorRW();
544 const ShapeType& evShape=temp_ev->getShape();
545 #pragma omp parallel for private(sampleNo,dataPointNo) schedule(static)
546 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
547 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
548 DataMaths::swapaxes(vec,shape,getPointOffset(sampleNo,dataPointNo),
549 evVec,evShape,ev->getPointOffset(sampleNo,dataPointNo),axis0,axis1);
550 }
551 }
552 }
553 void
554 DataExpanded::eigenvalues(DataAbstract* ev)
555 {
556 int sampleNo,dataPointNo;
557 int numSamples = getNumSamples();
558 int numDataPointsPerSample = getNumDPPSample();
559 DataExpanded* temp_ev=dynamic_cast<DataExpanded*>(ev);
560 if (temp_ev==0) {
561 throw DataException("Error - DataExpanded::eigenvalues: casting to DataExpanded failed (propably a programming error).");
562 }
563 const ValueType& vec=getVectorRO();
564 const ShapeType& shape=getShape();
565 ValueType& evVec=temp_ev->getVectorRW();
566 const ShapeType& evShape=temp_ev->getShape();
567 #pragma omp parallel for private(sampleNo,dataPointNo) schedule(static)
568 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
569 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
570 DataMaths::eigenvalues(vec,shape,getPointOffset(sampleNo,dataPointNo),
571 evVec,evShape,ev->getPointOffset(sampleNo,dataPointNo));
572 }
573 }
574 }
575 void
576 DataExpanded::eigenvalues_and_eigenvectors(DataAbstract* ev,DataAbstract* V,const double tol)
577 {
578 int numSamples = getNumSamples();
579 int numDataPointsPerSample = getNumDPPSample();
580 int sampleNo,dataPointNo;
581 DataExpanded* temp_ev=dynamic_cast<DataExpanded*>(ev);
582 if (temp_ev==0) {
583 throw DataException("Error - DataExpanded::eigenvalues_and_eigenvectors: casting to DataExpanded failed (propably a programming error).");
584 }
585 DataExpanded* temp_V=dynamic_cast<DataExpanded*>(V);
586 if (temp_V==0) {
587 throw DataException("Error - DataExpanded::eigenvalues_and_eigenvectors: casting to DataExpanded failed (propably a programming error).");
588 }
589 const ValueType& vec=getVectorRO();
590 const ShapeType& shape=getShape();
591 ValueType& evVec=temp_ev->getVectorRW();
592 const ShapeType& evShape=temp_ev->getShape();
593 ValueType& VVec=temp_V->getVectorRW();
594 const ShapeType& VShape=temp_V->getShape();
595 #pragma omp parallel for private(sampleNo,dataPointNo) schedule(static)
596 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
597 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
598 DataMaths::eigenvalues_and_eigenvectors(vec,shape,getPointOffset(sampleNo,dataPointNo),
599 evVec,evShape,ev->getPointOffset(sampleNo,dataPointNo),
600 VVec, VShape,V->getPointOffset(sampleNo,dataPointNo),tol);
601 }
602 }
603 }
604
605
606 int
607 DataExpanded::matrixInverse(DataAbstract* out) const
608 {
609 DataExpanded* temp=dynamic_cast<DataExpanded*>(out);
610 if (temp==0)
611 {
612 throw DataException("Error - DataExpanded::matrixInverse: casting to DataExpanded failed (propably a programming error).");
613 }
614
615 if (getRank()!=2)
616 {
617 throw DataException("Error - DataExpanded::matrixInverse: input must be rank 2.");
618
619 }
620 int sampleNo;
621 const int numdpps=getNumDPPSample();
622 const int numSamples = getNumSamples();
623 const ValueType& vec=m_data.getData();
624 int errcode=0;
625 #pragma omp parallel private(sampleNo)
626 {
627 int errorcode=0;
628 LapackInverseHelper h(getShape()[0]);
629 #pragma omp for schedule(static)
630 for (sampleNo = 0; sampleNo < numSamples; sampleNo++)
631 {
632 // not sure I like all those virtual calls to getPointOffset
633 DataTypes::ValueType::size_type offset=getPointOffset(sampleNo,0);
634 int res=DataMaths::matrix_inverse(vec, getShape(), offset, temp->getVectorRW(), temp->getShape(), offset, numdpps, h);
635 if (res>errorcode)
636 {
637 errorcode=res;
638 #pragma omp critical
639 {
640 errcode=errorcode; // I'm not especially concerned which error gets reported as long as one is
641 }
642 }
643 }
644 }
645 return errcode;
646 if (errcode)
647 {
648 DataMaths::matrixInverseError(errcode); // throws exceptions
649 }
650 }
651
652 void
653 DataExpanded::setToZero(){
654 // TODO: Surely there is a more efficient way to do this????
655 // Why is there no memset here? Parallel issues?
656 // A: This ensures that memory is touched by the correct thread.
657 CHECK_FOR_EX_WRITE
658 int numSamples = getNumSamples();
659 int numDataPointsPerSample = getNumDPPSample();
660 DataTypes::ValueType::size_type n = getNoValues();
661 double* p;
662 int sampleNo,dataPointNo, i;
663 #pragma omp parallel for private(sampleNo,dataPointNo,p,i) schedule(static)
664 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
665 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
666 p=&(m_data[getPointOffset(sampleNo,dataPointNo)]);
667 for (i=0; i<n ;++i) p[i]=0.;
668 }
669 }
670 }
671
672 /* Append MPI rank to file name if multiple MPI processes */
673 char *Escript_MPI_appendRankToFileName(const char *fileName, int mpi_size, int mpi_rank) {
674 /* Make plenty of room for the mpi_rank number and terminating '\0' */
675 char *newFileName = (char *)malloc(strlen(fileName)+20);
676 strncpy(newFileName, fileName, strlen(fileName)+1);
677 if (mpi_size>1) sprintf(newFileName+strlen(newFileName), ".%04d", mpi_rank);
678 return(newFileName);
679 }
680
681 void
682 DataExpanded::dump(const std::string fileName) const
683 {
684 #ifdef USE_NETCDF
685 const int ldims=2+DataTypes::maxRank;
686 const NcDim* ncdims[ldims];
687 NcVar *var, *ids;
688 int rank = getRank();
689 int type= getFunctionSpace().getTypeCode();
690 int ndims =0;
691 long dims[ldims];
692 const double* d_ptr=&(m_data[0]);
693 const DataTypes::ShapeType& shape = getShape();
694 int mpi_iam=getFunctionSpace().getDomain()->getMPIRank();
695 int mpi_num=getFunctionSpace().getDomain()->getMPISize();
696 #ifdef ESYS_MPI
697 MPI_Status status;
698 #endif
699
700 #ifdef ESYS_MPI
701 /* Serialize NetCDF I/O */
702 if (mpi_iam>0) MPI_Recv(&ndims, 0, MPI_INT, mpi_iam-1, 81801, MPI_COMM_WORLD, &status);
703 #endif
704
705 // netCDF error handler
706 NcError err(NcError::verbose_nonfatal);
707 // Create the file.
708 char *newFileName = Escript_MPI_appendRankToFileName(fileName.c_str(), mpi_num, mpi_iam);
709 NcFile dataFile(newFileName, NcFile::Replace);
710 // check if writing was successful
711 if (!dataFile.is_valid())
712 throw DataException("Error - DataExpanded:: opening of netCDF file for output failed.");
713 if (!dataFile.add_att("type_id",2) )
714 throw DataException("Error - DataExpanded:: appending data type to netCDF file failed.");
715 if (!dataFile.add_att("rank",rank) )
716 throw DataException("Error - DataExpanded:: appending rank attribute to netCDF file failed.");
717 if (!dataFile.add_att("function_space_type",type))
718 throw DataException("Error - DataExpanded:: appending function space attribute to netCDF file failed.");
719 ndims=rank+2;
720 if ( rank >0 ) {
721 dims[0]=shape[0];
722 if (! (ncdims[0] = dataFile.add_dim("d0",shape[0])) )
723 throw DataException("Error - DataExpanded:: appending ncdimension 0 to netCDF file failed.");
724 }
725 if ( rank >1 ) {
726 dims[1]=shape[1];
727 if (! (ncdims[1] = dataFile.add_dim("d1",shape[1])) )
728 throw DataException("Error - DataExpanded:: appending ncdimension 1 to netCDF file failed.");
729 }
730 if ( rank >2 ) {
731 dims[2]=shape[2];
732 if (! (ncdims[2] = dataFile.add_dim("d2", shape[2])) )
733 throw DataException("Error - DataExpanded:: appending ncdimension 2 to netCDF file failed.");
734 }
735 if ( rank >3 ) {
736 dims[3]=shape[3];
737 if (! (ncdims[3] = dataFile.add_dim("d3", shape[3])) )
738 throw DataException("Error - DataExpanded:: appending ncdimension 3 to netCDF file failed.");
739 }
740 dims[rank]=getFunctionSpace().getNumDataPointsPerSample();
741 if (! (ncdims[rank] = dataFile.add_dim("num_data_points_per_sample", dims[rank])) )
742 throw DataException("Error - DataExpanded:: appending num_data_points_per_sample to netCDF file failed.");
743 dims[rank+1]=getFunctionSpace().getNumSamples();
744 if (! (ncdims[rank+1] = dataFile.add_dim("num_samples", dims[rank+1])) )
745 throw DataException("Error - DataExpanded:: appending num_sample to netCDF file failed.");
746
747 if (getFunctionSpace().getNumSamples()>0)
748 {
749
750 if (! ( ids = dataFile.add_var("id", ncInt, ncdims[rank+1])) )
751 throw DataException("Error - DataExpanded:: appending reference id to netCDF file failed.");
752 const int* ids_p=getFunctionSpace().borrowSampleReferenceIDs();
753 if (! (ids->put(ids_p,dims[rank+1])) )
754 throw DataException("Error - DataExpanded:: copy reference id to netCDF buffer failed.");
755 if (! ( var = dataFile.add_var("data", ncDouble, ndims, ncdims)) )
756 throw DataException("Error - DataExpanded:: appending variable to netCDF file failed.");
757 if (! (var->put(d_ptr,dims)) )
758 throw DataException("Error - DataExpanded:: copy data to netCDF buffer failed.");
759 }
760 #ifdef ESYS_MPI
761 if (mpi_iam<mpi_num-1) MPI_Send(&ndims, 0, MPI_INT, mpi_iam+1, 81801, MPI_COMM_WORLD);
762 #endif
763 #else
764 throw DataException("Error - DataExpanded:: dump is not configured with netCDF. Please contact your installation manager.");
765 #endif
766 }
767
768 void
769 DataExpanded::setTaggedValue(int tagKey,
770 const DataTypes::ShapeType& pointshape,
771 const DataTypes::ValueType& value,
772 int dataOffset)
773 {
774 CHECK_FOR_EX_WRITE
775 int numSamples = getNumSamples();
776 int numDataPointsPerSample = getNumDPPSample();
777 int sampleNo,dataPointNo, i;
778 DataTypes::ValueType::size_type n = getNoValues();
779 double* p;
780 const double* in=&value[0+dataOffset];
781
782 if (value.size() != n) {
783 throw DataException("Error - DataExpanded::setTaggedValue: number of input values does not match number of values per data points.");
784 }
785
786 #pragma omp parallel for private(sampleNo,dataPointNo,p,i) schedule(static)
787 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
788 if (getFunctionSpace().getTagFromSampleNo(sampleNo) == tagKey ) {
789 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
790 p=&(m_data[getPointOffset(sampleNo,dataPointNo)]);
791 for (i=0; i<n ;++i) p[i]=in[i];
792 }
793 }
794 }
795 }
796
797
798 void
799 DataExpanded::reorderByReferenceIDs(int *reference_ids)
800 {
801 CHECK_FOR_EX_WRITE
802 int numSamples = getNumSamples();
803 DataTypes::ValueType::size_type n = getNoValues() * getNumDPPSample();
804 int sampleNo, sampleNo2,i;
805 double* p,*p2;
806 register double rtmp;
807 FunctionSpace fs=getFunctionSpace();
808
809 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
810 const int id_in=reference_ids[sampleNo];
811 const int id=fs.getReferenceIDOfSample(sampleNo);
812 if (id!=id_in) {
813 bool matched=false;
814 for (sampleNo2 = sampleNo+1; sampleNo2 < numSamples; sampleNo2++) {
815 if (id == reference_ids[sampleNo2]) {
816 p=&(m_data[getPointOffset(sampleNo,0)]);
817 p2=&(m_data[getPointOffset(sampleNo2,0)]);
818 for (i=0; i<n ;i++) {
819 rtmp=p[i];
820 p[i]=p2[i];
821 p2[i]=rtmp;
822 }
823 reference_ids[sampleNo]=id;
824 reference_ids[sampleNo2]=id_in;
825 matched=true;
826 break;
827 }
828 }
829 if (! matched) {
830 throw DataException("Error - DataExpanded::reorderByReferenceIDs: unable to reorder sample data by reference ids");
831 }
832 }
833 }
834 }
835
836 DataTypes::ValueType&
837 DataExpanded::getVectorRW()
838 {
839 CHECK_FOR_EX_WRITE
840 return m_data.getData();
841 }
842
843 const DataTypes::ValueType&
844 DataExpanded::getVectorRO() const
845 {
846 return m_data.getData();
847 }
848
849
850 #ifndef MKLRANDOM
851 namespace {
852
853 boost::mt19937 base; // used to seed all the other generators
854 vector<boost::mt19937> gens;
855
856
857 void seedGens(long seed)
858 {
859 #ifdef _OPENMP
860 int numthreads=omp_get_max_threads();
861 #else
862 int numthreads=1;
863 #endif
864 if (gens.size()==0) // we haven't instantiated the generators yet
865 {
866 gens.resize(numthreads); // yes this means all the generators will be owned by one thread in a NUMA sense
867 } // I don't think it is worth a more complicated solution at this point
868 if (seed!=0)
869 {
870 base.seed(seed);
871 for (int i=0;i<numthreads;++i)
872 {
873 gens[i].seed(base()); // initialise each generator with successive random values
874 }
875 }
876 }
877
878
879 }
880 #endif
881
882 // Idea here is to create an array of seeds by feeding the original seed into the random generator
883 // The code at the beginning of the function to compute the seed if one is given is
884 // just supposed to introduce some variety (and ensure that multiple ranks don't get the same seed).
885 // I make no claim about how well these initial seeds are distributed
886 void DataExpanded::randomFill(long seed)
887 {
888 CHECK_FOR_EX_WRITE
889 static unsigned prevseed=0; // So if we create a bunch of objects we don't get the same start seed
890 if (seed==0) // for each one
891 {
892 if (prevseed==0)
893 {
894 time_t s=time(0);
895 seed=s;
896 }
897 else
898 {
899 seed=prevseed+419; // these numbers are arbitrary
900 if (seed>3040101) // I want to avoid overflow on 32bit systems
901 {
902 seed=((int)(seed)%0xABCD)+1;
903 }
904 }
905 }
906 // now we need to consider MPI since we don't want each rank to start with the same seed
907 seed+=getFunctionSpace().getDomain()->getMPIRank()*getFunctionSpace().getDomain()->getMPISize()*3;
908 prevseed=seed;
909
910 #ifdef MKLRANDOM
911
912 #ifdef _OPENMP
913 int numthreads=omp_get_max_threads();
914 #else
915 int numthreads=1;
916 #endif
917 double* seeds=new double[numthreads];
918 VSLStreamStatePtr sstream;
919
920 int status=vslNewStream(&sstream, VSL_BRNG_MT19937, seed); // use a Mersenne Twister
921 numeric_limits<double> dlim;
922 vdRngUniform(VSL_METHOD_DUNIFORM_STD, sstream , numthreads, seeds, -1, 1);
923 vslDeleteStream(&sstream);
924 DataVector& dv=getVectorRW();
925 size_t dvsize=dv.size();
926 #pragma omp parallel
927 {
928 int tnum=0;
929 #ifdef _OPENMP
930 tnum=omp_get_thread_num();
931 #endif
932 VSLStreamStatePtr stream;
933 // the 12345 is a hack to give us a better chance of getting different integer seeds.
934 int status=vslNewStream(&stream, VSL_BRNG_MT19937, seeds[tnum]*12345); // use a Mersenne Twister
935 int bigchunk=(dvsize/numthreads+1);
936 int smallchunk=dvsize-bigchunk*(numthreads-1);
937 int chunksize=(tnum<(numthreads-1))?bigchunk:smallchunk;
938 vdRngUniform(VSL_METHOD_DUNIFORM_STD, stream, chunksize, &(dv[bigchunk*tnum]), 0,1);
939 vslDeleteStream(&stream);
940 }
941 delete[] seeds;
942 #else
943 boost::mt19937::result_type RMAX=base.max();
944 seedGens(seed);
945 DataVector& dv=getVectorRW();
946 long i;
947 const size_t dvsize=dv.size();
948
949 #pragma omp parallel private(i)
950 {
951 int tnum=0;
952 #ifdef _OPENMP
953 tnum=omp_get_thread_num();
954 #endif
955 boost::mt19937& generator=gens[tnum];
956
957 #pragma omp for schedule(static)
958 for (i=0;i<dvsize;++i)
959 {
960
961
962
963
964
965 #ifdef _WIN32
966 dv[i]=((double)generator())/RMAX;
967 #else
968 dv[i]=((double)generator())/RMAX;
969 #endif
970 }
971 }
972 #endif
973 }
974
975 } // end of namespace

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26