/[escript]/trunk/escript/src/DataExpanded.cpp
ViewVC logotype

Contents of /trunk/escript/src/DataExpanded.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3506 - (show annotations)
Wed May 11 01:59:45 2011 UTC (8 years, 5 months ago) by jfenwick
File size: 31550 byte(s)
Use VSL if MKLRANDOM is defined.
Use different seeds for different ranks/threads.
Use different seeds for successive calls with no seed given.

1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2010 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 #include "Data.h"
16 #include "DataExpanded.h"
17 #include "DataException.h"
18 #include "DataConstant.h"
19 #include "DataTagged.h"
20 #include <limits>
21
22 #include "esysUtils/Esys_MPI.h"
23
24 #ifdef USE_NETCDF
25 #include <netcdfcpp.h>
26 #endif
27
28 #include <boost/python/extract.hpp>
29 #include "DataMaths.h"
30
31 //#define MKLRANDOM
32
33 #ifdef MKLRANDOM
34 #include <mkl_vsl.h>
35
36 #endif
37
38 using namespace std;
39 using namespace boost::python;
40 using namespace boost;
41 using namespace escript::DataTypes;
42
43
44 // #define CHECK_FOR_EX_WRITE if (!checkNoSharing()) {throw DataException("Attempt to modify shared object");}
45
46 #define CHECK_FOR_EX_WRITE if (!checkNoSharing()) {std::ostringstream ss; ss << " Attempt to modify shared object. line " << __LINE__ << " of " << __FILE__; *((int*)0)=17;throw DataException(ss.str());}
47
48 namespace escript {
49
50 DataExpanded::DataExpanded(const WrappedArray& value,
51 const FunctionSpace& what)
52 : parent(what,value.getShape())
53 {
54 //
55 // initialise the data array for this object
56 initialise(what.getNumSamples(),what.getNumDPPSample());
57 //
58 // copy the given value to every data point
59 copy(value);
60 }
61
62 DataExpanded::DataExpanded(const DataExpanded& other)
63 : parent(other.getFunctionSpace(), other.getShape()),
64 m_data(other.m_data)
65 {
66 }
67
68 DataExpanded::DataExpanded(const DataConstant& other)
69 : parent(other.getFunctionSpace(), other.getShape())
70 {
71 //
72 // initialise the data array for this object
73 initialise(other.getNumSamples(),other.getNumDPPSample());
74 //
75 // DataConstant only has one value, copy this to every data point
76 copy(other);
77 }
78
79 DataExpanded::DataExpanded(const DataTagged& other)
80 : parent(other.getFunctionSpace(), other.getShape())
81 {
82 //
83 // initialise the data array for this object
84 initialise(other.getNumSamples(),other.getNumDPPSample());
85 //
86 // for each data point in this object, extract and copy the corresponding data
87 // value from the given DataTagged object
88 int i,j;
89 DataTypes::ValueType::size_type numRows=m_data.getNumRows();
90 DataTypes::ValueType::size_type numCols=m_data.getNumCols();
91 #pragma omp parallel for private(i,j) schedule(static)
92 for (i=0;i<numRows;i++) {
93 for (j=0;j<numCols;j++) {
94 try {
95 DataTypes::copyPoint(getVectorRW(), getPointOffset(i,j), getNoValues(),
96 other.getVectorRO(),
97 other.getPointOffset(i,j));
98 }
99 catch (std::exception& e) {
100 cout << e.what() << endl;
101 }
102 }
103 }
104 }
105
106 DataExpanded::DataExpanded(const DataExpanded& other,
107 const DataTypes::RegionType& region)
108 : parent(other.getFunctionSpace(),DataTypes::getResultSliceShape(region))
109 {
110 //
111 // get the shape of the slice
112 // DataTypes::ShapeType shape(DataTypes::getResultSliceShape(region));
113 //
114 // initialise this Data object to the shape of the slice
115 initialise(other.getNumSamples(),other.getNumDPPSample());
116 //
117 // copy the data
118 DataTypes::RegionLoopRangeType region_loop_range=DataTypes::getSliceRegionLoopRange(region);
119 DataTypes::ValueType::size_type numRows=m_data.getNumRows();
120 DataTypes::ValueType::size_type numCols=m_data.getNumCols();
121 int i,j;
122 #pragma omp parallel for private(i,j) schedule(static)
123 for (i=0;i<numRows;i++) {
124 for (j=0;j<numCols;j++) {
125 try {
126 DataTypes::copySlice(getVectorRW(),getShape(),getPointOffset(i,j),
127 other.getVectorRO(),
128 other.getShape(),
129 other.getPointOffset(i,j),
130 region_loop_range);
131 }
132 catch (std::exception& e) {
133 cout << e.what() << endl;
134 }
135 }
136 }
137 }
138
139 DataExpanded::DataExpanded(const FunctionSpace& what,
140 const DataTypes::ShapeType &shape,
141 const DataTypes::ValueType &data)
142 : parent(what,shape)
143 {
144 EsysAssert(data.size()%getNoValues()==0,
145 "DataExpanded Constructor - size of supplied data is not a multiple of shape size.");
146
147 if (data.size()==getNoValues())
148 {
149 ValueType& vec=m_data.getData();
150 //
151 // create the view of the data
152 initialise(what.getNumSamples(),what.getNumDPPSample());
153 // now we copy this value to all elements
154 for (int i=0;i<getLength();)
155 {
156 for (unsigned int j=0;j<getNoValues();++j,++i)
157 {
158 vec[i]=data[j];
159 }
160 }
161 }
162 else
163 {
164 //
165 // copy the data in the correct format
166 m_data.getData()=data;
167 }
168
169
170 }
171
172 DataExpanded::DataExpanded(const FunctionSpace& what,
173 const DataTypes::ShapeType &shape,
174 const double v)
175 : parent(what,shape)
176 {
177 ValueType& vec=m_data.getData();
178 //
179 // create the view of the data
180 initialise(what.getNumSamples(),what.getNumDPPSample());
181 // now we copy this value to all elements
182 const int L=getLength();
183 int i;
184 #pragma omp parallel for schedule(static) private(i)
185 for (i=0;i<L;++i)
186 {
187 vec[i]=v;
188 }
189 }
190
191
192
193 DataExpanded::~DataExpanded()
194 {
195 }
196
197 DataAbstract*
198 DataExpanded::deepCopy()
199 {
200 return new DataExpanded(*this);
201 }
202
203
204 DataAbstract*
205 DataExpanded::getSlice(const DataTypes::RegionType& region) const
206 {
207 return new DataExpanded(*this,region);
208 }
209
210 void
211 DataExpanded::setSlice(const DataAbstract* value,
212 const DataTypes::RegionType& region)
213 {
214 const DataExpanded* tempDataExp=dynamic_cast<const DataExpanded*>(value);
215 if (tempDataExp==0) {
216 throw DataException("Programming error - casting to DataExpanded.");
217 }
218 CHECK_FOR_EX_WRITE
219 //
220 // get shape of slice
221 DataTypes::ShapeType shape(DataTypes::getResultSliceShape(region));
222 DataTypes::RegionLoopRangeType region_loop_range=DataTypes::getSliceRegionLoopRange(region);
223 //
224 // check shape
225 if (getRank()!=region.size()) {
226 throw DataException("Error - Invalid slice region.");
227 }
228 if (tempDataExp->getRank()>0 && !DataTypes::checkShape(value->getShape(), shape)) {
229 throw DataException (DataTypes::createShapeErrorMessage(
230 "Error - Couldn't copy slice due to shape mismatch.",shape, value->getShape()));
231 }
232 //
233 // copy the data from the slice into this object
234 DataTypes::ValueType::size_type numRows=m_data.getNumRows();
235 DataTypes::ValueType::size_type numCols=m_data.getNumCols();
236 int i, j;
237 ValueType& vec=getVectorRW();
238 const ShapeType& mshape=getShape();
239 const ValueType& tVec=tempDataExp->getVectorRO();
240 const ShapeType& tShape=tempDataExp->getShape();
241 #pragma omp parallel for private(i,j) schedule(static)
242 for (i=0;i<numRows;i++) {
243 for (j=0;j<numCols;j++) {
244 DataTypes::copySliceFrom(vec,mshape,getPointOffset(i,j),
245 tVec,
246 tShape,
247 tempDataExp->getPointOffset(i,j),
248 region_loop_range);
249
250 }
251 }
252 }
253
254 void
255 DataExpanded::copy(const DataConstant& value)
256 {
257 EsysAssert((checkShape(getShape(), value.getShape())),
258 createShapeErrorMessage("Error - Couldn't copy due to shape mismatch.",value.getShape(),getShape()));
259
260 //
261 // copy a single value to every data point in this object
262 int nRows=m_data.getNumRows();
263 int nCols=m_data.getNumCols();
264 int i,j;
265 #pragma omp parallel for private(i,j) schedule(static)
266 for (i=0;i<nRows;i++) {
267 for (j=0;j<nCols;j++) {
268 DataTypes::copyPoint(getVectorRW(), getPointOffset(i,j), getNoValues(), value.getVectorRO(), 0);
269 }
270 }
271 }
272
273 void
274 DataExpanded::copy(const WrappedArray& value)
275 {
276 // check the input shape matches this shape
277 if (!DataTypes::checkShape(getShape(),value.getShape())) {
278 throw DataException(DataTypes::createShapeErrorMessage(
279 "Error - (DataExpanded) Cannot copy due to shape mismatch.",
280 value.getShape(),getShape()));
281 }
282 getVectorRW().copyFromArray(value, getNumDPPSample()*getNumSamples());
283 }
284
285
286 void
287 DataExpanded::initialise(int noSamples,
288 int noDataPointsPerSample)
289 {
290 if (noSamples==0) //retain the default empty object
291 {
292 return;
293 }
294 //
295 // resize data array to the required size
296 m_data.resize(noSamples,noDataPointsPerSample,getNoValues());
297 }
298
299 bool
300 DataExpanded::hasNaN() const
301 {
302 const ValueType& v=m_data.getData();
303 for (ValueType::size_type i=0;i<v.size();++i)
304 {
305 if (nancheck(v[i]))
306 {
307 return true;
308 }
309 }
310 return false;
311 }
312
313
314 string
315 DataExpanded::toString() const
316 {
317 stringstream temp;
318 FunctionSpace fs=getFunctionSpace();
319
320 int offset=0;
321 for (int i=0;i<m_data.getNumRows();i++) {
322 for (int j=0;j<m_data.getNumCols();j++) {
323 offset=getPointOffset(i,j);
324 stringstream suffix;
325 suffix << "( id: " << i << ", ref: " << fs.getReferenceIDOfSample(i) << ", pnt: " << j << ")";
326 temp << DataTypes::pointToString(getVectorRO(),getShape(),offset,suffix.str());
327 if (!(i==(m_data.getNumRows()-1) && j==(m_data.getNumCols()-1))) {
328 temp << endl;
329 }
330 }
331 }
332 string result=temp.str();
333 if (result.empty())
334 {
335 return "(data contains no samples)\n";
336 }
337 return temp.str();
338 }
339
340 DataTypes::ValueType::size_type
341 DataExpanded::getPointOffset(int sampleNo,
342 int dataPointNo) const
343 {
344 return m_data.index(sampleNo,dataPointNo);
345 }
346
347 DataTypes::ValueType::size_type
348 DataExpanded::getPointOffset(int sampleNo,
349 int dataPointNo)
350 {
351 return m_data.index(sampleNo,dataPointNo);
352 }
353
354 DataTypes::ValueType::size_type
355 DataExpanded::getLength() const
356 {
357 return m_data.size();
358 }
359
360
361
362 void
363 DataExpanded::copyToDataPoint(const int sampleNo, const int dataPointNo, const double value) {
364 CHECK_FOR_EX_WRITE
365 //
366 // Get the number of samples and data-points per sample.
367 int numSamples = getNumSamples();
368 int numDataPointsPerSample = getNumDPPSample();
369 int dataPointRank = getRank();
370 ShapeType dataPointShape = getShape();
371 if (numSamples*numDataPointsPerSample > 0) {
372 //TODO: global error handling
373 if ((sampleNo >= numSamples) || (sampleNo < 0 )) {
374 throw DataException("Error - DataExpanded::copyDataPoint invalid sampleNo.");
375 }
376 if ((dataPointNo >= numDataPointsPerSample) || (dataPointNo < 0)) {
377 throw DataException("Error - DataExpanded::copyDataPoint invalid dataPointNoInSample.");
378 }
379 ValueType::size_type offset = getPointOffset(sampleNo, dataPointNo);
380 ValueType& vec=getVectorRW();
381 if (dataPointRank==0) {
382 vec[offset]=value;
383 } else if (dataPointRank==1) {
384 for (int i=0; i<dataPointShape[0]; i++) {
385 vec[offset+i]=value;
386 }
387 } else if (dataPointRank==2) {
388 for (int i=0; i<dataPointShape[0]; i++) {
389 for (int j=0; j<dataPointShape[1]; j++) {
390 vec[offset+getRelIndex(dataPointShape,i,j)]=value;
391 }
392 }
393 } else if (dataPointRank==3) {
394 for (int i=0; i<dataPointShape[0]; i++) {
395 for (int j=0; j<dataPointShape[1]; j++) {
396 for (int k=0; k<dataPointShape[2]; k++) {
397 vec[offset+getRelIndex(dataPointShape,i,j,k)]=value;
398 }
399 }
400 }
401 } else if (dataPointRank==4) {
402 for (int i=0; i<dataPointShape[0]; i++) {
403 for (int j=0; j<dataPointShape[1]; j++) {
404 for (int k=0; k<dataPointShape[2]; k++) {
405 for (int l=0; l<dataPointShape[3]; l++) {
406 vec[offset+getRelIndex(dataPointShape,i,j,k,l)]=value;
407 }
408 }
409 }
410 }
411 }
412 }
413 }
414
415 void
416 DataExpanded::copyToDataPoint(const int sampleNo, const int dataPointNo, const WrappedArray& value) {
417 CHECK_FOR_EX_WRITE
418 //
419 // Get the number of samples and data-points per sample.
420 int numSamples = getNumSamples();
421 int numDataPointsPerSample = getNumDPPSample();
422 //
423 // check rank:
424 if (value.getRank()!=getRank())
425 throw DataException("Rank of value does not match Data object rank");
426 if (numSamples*numDataPointsPerSample > 0) {
427 //TODO: global error handling
428 if ((sampleNo >= numSamples) || (sampleNo < 0 )) {
429 throw DataException("Error - DataExpanded::copyDataPoint invalid sampleNo.");
430 }
431 if ((dataPointNo >= numDataPointsPerSample) || (dataPointNo < 0)) {
432 throw DataException("Error - DataExpanded::copyDataPoint invalid dataPointNoInSample.");
433 }
434 ValueType::size_type offset = getPointOffset(sampleNo, dataPointNo);
435 ValueType& vec=getVectorRW();
436 vec.copyFromArrayToOffset(value,offset,1);
437 }
438 }
439
440 void
441 DataExpanded::symmetric(DataAbstract* ev)
442 {
443 int sampleNo,dataPointNo;
444 int numSamples = getNumSamples();
445 int numDataPointsPerSample = getNumDPPSample();
446 DataExpanded* temp_ev=dynamic_cast<DataExpanded*>(ev);
447 if (temp_ev==0) {
448 throw DataException("Error - DataExpanded::symmetric: casting to DataExpanded failed (propably a programming error).");
449 }
450 const ValueType& vec=getVectorRO();
451 const ShapeType& shape=getShape();
452 ValueType& evVec=temp_ev->getVectorRW();
453 const ShapeType& evShape=temp_ev->getShape();
454 #pragma omp parallel for private(sampleNo,dataPointNo) schedule(static)
455 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
456 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
457 DataMaths::symmetric(vec,shape,getPointOffset(sampleNo,dataPointNo),
458 evVec,evShape,ev->getPointOffset(sampleNo,dataPointNo));
459 }
460 }
461 }
462 void
463 DataExpanded::nonsymmetric(DataAbstract* ev)
464 {
465 int sampleNo,dataPointNo;
466 int numSamples = getNumSamples();
467 int numDataPointsPerSample = getNumDPPSample();
468 DataExpanded* temp_ev=dynamic_cast<DataExpanded*>(ev);
469 if (temp_ev==0) {
470 throw DataException("Error - DataExpanded::nonsymmetric: casting to DataExpanded failed (propably a programming error).");
471 }
472 const ValueType& vec=getVectorRO();
473 const ShapeType& shape=getShape();
474 ValueType& evVec=temp_ev->getVectorRW();
475 const ShapeType& evShape=temp_ev->getShape();
476 #pragma omp parallel for private(sampleNo,dataPointNo) schedule(static)
477 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
478 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
479 DataMaths::nonsymmetric(vec,shape,getPointOffset(sampleNo,dataPointNo),
480 evVec,evShape,ev->getPointOffset(sampleNo,dataPointNo));
481 }
482 }
483 }
484 void
485 DataExpanded::trace(DataAbstract* ev, int axis_offset)
486 {
487 int sampleNo,dataPointNo;
488 int numSamples = getNumSamples();
489 int numDataPointsPerSample = getNumDPPSample();
490 DataExpanded* temp_ev=dynamic_cast<DataExpanded*>(ev);
491 if (temp_ev==0) {
492 throw DataException("Error - DataExpanded::trace: casting to DataExpanded failed (propably a programming error).");
493 }
494 const ValueType& vec=getVectorRO();
495 const ShapeType& shape=getShape();
496 ValueType& evVec=temp_ev->getVectorRW();
497 const ShapeType& evShape=temp_ev->getShape();
498 #pragma omp parallel for private(sampleNo,dataPointNo) schedule(static)
499 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
500 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
501 DataMaths::trace(vec,shape,getPointOffset(sampleNo,dataPointNo),
502 evVec,evShape,ev->getPointOffset(sampleNo,dataPointNo),axis_offset);
503 }
504 }
505 }
506
507 void
508 DataExpanded::transpose(DataAbstract* ev, int axis_offset)
509 {
510 int sampleNo,dataPointNo;
511 int numSamples = getNumSamples();
512 int numDataPointsPerSample = getNumDPPSample();
513 DataExpanded* temp_ev=dynamic_cast<DataExpanded*>(ev);
514 if (temp_ev==0) {
515 throw DataException("Error - DataExpanded::transpose: casting to DataExpanded failed (propably a programming error).");
516 }
517 const ValueType& vec=getVectorRO();
518 const ShapeType& shape=getShape();
519 ValueType& evVec=temp_ev->getVectorRW();
520 const ShapeType& evShape=temp_ev->getShape();
521 #pragma omp parallel for private(sampleNo,dataPointNo) schedule(static)
522 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
523 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
524 DataMaths::transpose(vec,shape,getPointOffset(sampleNo,dataPointNo),
525 evVec,evShape,ev->getPointOffset(sampleNo,dataPointNo),axis_offset);
526 }
527 }
528 }
529
530 void
531 DataExpanded::swapaxes(DataAbstract* ev, int axis0, int axis1)
532 {
533 int sampleNo,dataPointNo;
534 int numSamples = getNumSamples();
535 int numDataPointsPerSample = getNumDPPSample();
536 DataExpanded* temp_ev=dynamic_cast<DataExpanded*>(ev);
537 if (temp_ev==0) {
538 throw DataException("Error - DataExpanded::swapaxes: casting to DataExpanded failed (propably a programming error).");
539 }
540 const ValueType& vec=getVectorRO();
541 const ShapeType& shape=getShape();
542 ValueType& evVec=temp_ev->getVectorRW();
543 const ShapeType& evShape=temp_ev->getShape();
544 #pragma omp parallel for private(sampleNo,dataPointNo) schedule(static)
545 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
546 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
547 DataMaths::swapaxes(vec,shape,getPointOffset(sampleNo,dataPointNo),
548 evVec,evShape,ev->getPointOffset(sampleNo,dataPointNo),axis0,axis1);
549 }
550 }
551 }
552 void
553 DataExpanded::eigenvalues(DataAbstract* ev)
554 {
555 int sampleNo,dataPointNo;
556 int numSamples = getNumSamples();
557 int numDataPointsPerSample = getNumDPPSample();
558 DataExpanded* temp_ev=dynamic_cast<DataExpanded*>(ev);
559 if (temp_ev==0) {
560 throw DataException("Error - DataExpanded::eigenvalues: casting to DataExpanded failed (propably a programming error).");
561 }
562 const ValueType& vec=getVectorRO();
563 const ShapeType& shape=getShape();
564 ValueType& evVec=temp_ev->getVectorRW();
565 const ShapeType& evShape=temp_ev->getShape();
566 #pragma omp parallel for private(sampleNo,dataPointNo) schedule(static)
567 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
568 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
569 DataMaths::eigenvalues(vec,shape,getPointOffset(sampleNo,dataPointNo),
570 evVec,evShape,ev->getPointOffset(sampleNo,dataPointNo));
571 }
572 }
573 }
574 void
575 DataExpanded::eigenvalues_and_eigenvectors(DataAbstract* ev,DataAbstract* V,const double tol)
576 {
577 int numSamples = getNumSamples();
578 int numDataPointsPerSample = getNumDPPSample();
579 int sampleNo,dataPointNo;
580 DataExpanded* temp_ev=dynamic_cast<DataExpanded*>(ev);
581 if (temp_ev==0) {
582 throw DataException("Error - DataExpanded::eigenvalues_and_eigenvectors: casting to DataExpanded failed (propably a programming error).");
583 }
584 DataExpanded* temp_V=dynamic_cast<DataExpanded*>(V);
585 if (temp_V==0) {
586 throw DataException("Error - DataExpanded::eigenvalues_and_eigenvectors: casting to DataExpanded failed (propably a programming error).");
587 }
588 const ValueType& vec=getVectorRO();
589 const ShapeType& shape=getShape();
590 ValueType& evVec=temp_ev->getVectorRW();
591 const ShapeType& evShape=temp_ev->getShape();
592 ValueType& VVec=temp_V->getVectorRW();
593 const ShapeType& VShape=temp_V->getShape();
594 #pragma omp parallel for private(sampleNo,dataPointNo) schedule(static)
595 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
596 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
597 DataMaths::eigenvalues_and_eigenvectors(vec,shape,getPointOffset(sampleNo,dataPointNo),
598 evVec,evShape,ev->getPointOffset(sampleNo,dataPointNo),
599 VVec, VShape,V->getPointOffset(sampleNo,dataPointNo),tol);
600 }
601 }
602 }
603
604
605 int
606 DataExpanded::matrixInverse(DataAbstract* out) const
607 {
608 DataExpanded* temp=dynamic_cast<DataExpanded*>(out);
609 if (temp==0)
610 {
611 throw DataException("Error - DataExpanded::matrixInverse: casting to DataExpanded failed (propably a programming error).");
612 }
613
614 if (getRank()!=2)
615 {
616 throw DataException("Error - DataExpanded::matrixInverse: input must be rank 2.");
617
618 }
619 int sampleNo;
620 const int numdpps=getNumDPPSample();
621 const int numSamples = getNumSamples();
622 const ValueType& vec=m_data.getData();
623 int errcode=0;
624 #pragma omp parallel private(sampleNo)
625 {
626 int errorcode=0;
627 LapackInverseHelper h(getShape()[0]);
628 #pragma omp for schedule(static)
629 for (sampleNo = 0; sampleNo < numSamples; sampleNo++)
630 {
631 // not sure I like all those virtual calls to getPointOffset
632 DataTypes::ValueType::size_type offset=getPointOffset(sampleNo,0);
633 int res=DataMaths::matrix_inverse(vec, getShape(), offset, temp->getVectorRW(), temp->getShape(), offset, numdpps, h);
634 if (res>errorcode)
635 {
636 errorcode=res;
637 #pragma omp critical
638 {
639 errcode=errorcode; // I'm not especially concerned which error gets reported as long as one is
640 }
641 }
642 }
643 }
644 return errcode;
645 if (errcode)
646 {
647 DataMaths::matrixInverseError(errcode); // throws exceptions
648 }
649 }
650
651 void
652 DataExpanded::setToZero(){
653 // TODO: Surely there is a more efficient way to do this????
654 // Why is there no memset here? Parallel issues?
655 // A: This ensures that memory is touched by the correct thread.
656 CHECK_FOR_EX_WRITE
657 int numSamples = getNumSamples();
658 int numDataPointsPerSample = getNumDPPSample();
659 DataTypes::ValueType::size_type n = getNoValues();
660 double* p;
661 int sampleNo,dataPointNo, i;
662 #pragma omp parallel for private(sampleNo,dataPointNo,p,i) schedule(static)
663 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
664 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
665 p=&(m_data[getPointOffset(sampleNo,dataPointNo)]);
666 for (i=0; i<n ;++i) p[i]=0.;
667 }
668 }
669 }
670
671 /* Append MPI rank to file name if multiple MPI processes */
672 char *Escript_MPI_appendRankToFileName(const char *fileName, int mpi_size, int mpi_rank) {
673 /* Make plenty of room for the mpi_rank number and terminating '\0' */
674 char *newFileName = (char *)malloc(strlen(fileName)+20);
675 strncpy(newFileName, fileName, strlen(fileName)+1);
676 if (mpi_size>1) sprintf(newFileName+strlen(newFileName), ".%04d", mpi_rank);
677 return(newFileName);
678 }
679
680 void
681 DataExpanded::dump(const std::string fileName) const
682 {
683 #ifdef USE_NETCDF
684 const int ldims=2+DataTypes::maxRank;
685 const NcDim* ncdims[ldims];
686 NcVar *var, *ids;
687 int rank = getRank();
688 int type= getFunctionSpace().getTypeCode();
689 int ndims =0;
690 long dims[ldims];
691 const double* d_ptr=&(m_data[0]);
692 const DataTypes::ShapeType& shape = getShape();
693 int mpi_iam=getFunctionSpace().getDomain()->getMPIRank();
694 int mpi_num=getFunctionSpace().getDomain()->getMPISize();
695 #ifdef ESYS_MPI
696 MPI_Status status;
697 #endif
698
699 #ifdef ESYS_MPI
700 /* Serialize NetCDF I/O */
701 if (mpi_iam>0) MPI_Recv(&ndims, 0, MPI_INT, mpi_iam-1, 81801, MPI_COMM_WORLD, &status);
702 #endif
703
704 // netCDF error handler
705 NcError err(NcError::verbose_nonfatal);
706 // Create the file.
707 char *newFileName = Escript_MPI_appendRankToFileName(fileName.c_str(), mpi_num, mpi_iam);
708 NcFile dataFile(newFileName, NcFile::Replace);
709 // check if writing was successful
710 if (!dataFile.is_valid())
711 throw DataException("Error - DataExpanded:: opening of netCDF file for output failed.");
712 if (!dataFile.add_att("type_id",2) )
713 throw DataException("Error - DataExpanded:: appending data type to netCDF file failed.");
714 if (!dataFile.add_att("rank",rank) )
715 throw DataException("Error - DataExpanded:: appending rank attribute to netCDF file failed.");
716 if (!dataFile.add_att("function_space_type",type))
717 throw DataException("Error - DataExpanded:: appending function space attribute to netCDF file failed.");
718 ndims=rank+2;
719 if ( rank >0 ) {
720 dims[0]=shape[0];
721 if (! (ncdims[0] = dataFile.add_dim("d0",shape[0])) )
722 throw DataException("Error - DataExpanded:: appending ncdimension 0 to netCDF file failed.");
723 }
724 if ( rank >1 ) {
725 dims[1]=shape[1];
726 if (! (ncdims[1] = dataFile.add_dim("d1",shape[1])) )
727 throw DataException("Error - DataExpanded:: appending ncdimension 1 to netCDF file failed.");
728 }
729 if ( rank >2 ) {
730 dims[2]=shape[2];
731 if (! (ncdims[2] = dataFile.add_dim("d2", shape[2])) )
732 throw DataException("Error - DataExpanded:: appending ncdimension 2 to netCDF file failed.");
733 }
734 if ( rank >3 ) {
735 dims[3]=shape[3];
736 if (! (ncdims[3] = dataFile.add_dim("d3", shape[3])) )
737 throw DataException("Error - DataExpanded:: appending ncdimension 3 to netCDF file failed.");
738 }
739 dims[rank]=getFunctionSpace().getNumDataPointsPerSample();
740 if (! (ncdims[rank] = dataFile.add_dim("num_data_points_per_sample", dims[rank])) )
741 throw DataException("Error - DataExpanded:: appending num_data_points_per_sample to netCDF file failed.");
742 dims[rank+1]=getFunctionSpace().getNumSamples();
743 if (! (ncdims[rank+1] = dataFile.add_dim("num_samples", dims[rank+1])) )
744 throw DataException("Error - DataExpanded:: appending num_sample to netCDF file failed.");
745
746 if (getFunctionSpace().getNumSamples()>0)
747 {
748
749 if (! ( ids = dataFile.add_var("id", ncInt, ncdims[rank+1])) )
750 throw DataException("Error - DataExpanded:: appending reference id to netCDF file failed.");
751 const int* ids_p=getFunctionSpace().borrowSampleReferenceIDs();
752 if (! (ids->put(ids_p,dims[rank+1])) )
753 throw DataException("Error - DataExpanded:: copy reference id to netCDF buffer failed.");
754 if (! ( var = dataFile.add_var("data", ncDouble, ndims, ncdims)) )
755 throw DataException("Error - DataExpanded:: appending variable to netCDF file failed.");
756 if (! (var->put(d_ptr,dims)) )
757 throw DataException("Error - DataExpanded:: copy data to netCDF buffer failed.");
758 }
759 #ifdef ESYS_MPI
760 if (mpi_iam<mpi_num-1) MPI_Send(&ndims, 0, MPI_INT, mpi_iam+1, 81801, MPI_COMM_WORLD);
761 #endif
762 #else
763 throw DataException("Error - DataExpanded:: dump is not configured with netCDF. Please contact your installation manager.");
764 #endif
765 }
766
767 void
768 DataExpanded::setTaggedValue(int tagKey,
769 const DataTypes::ShapeType& pointshape,
770 const DataTypes::ValueType& value,
771 int dataOffset)
772 {
773 CHECK_FOR_EX_WRITE
774 int numSamples = getNumSamples();
775 int numDataPointsPerSample = getNumDPPSample();
776 int sampleNo,dataPointNo, i;
777 DataTypes::ValueType::size_type n = getNoValues();
778 double* p;
779 const double* in=&value[0+dataOffset];
780
781 if (value.size() != n) {
782 throw DataException("Error - DataExpanded::setTaggedValue: number of input values does not match number of values per data points.");
783 }
784
785 #pragma omp parallel for private(sampleNo,dataPointNo,p,i) schedule(static)
786 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
787 if (getFunctionSpace().getTagFromSampleNo(sampleNo) == tagKey ) {
788 for (dataPointNo = 0; dataPointNo < numDataPointsPerSample; dataPointNo++) {
789 p=&(m_data[getPointOffset(sampleNo,dataPointNo)]);
790 for (i=0; i<n ;++i) p[i]=in[i];
791 }
792 }
793 }
794 }
795
796
797 void
798 DataExpanded::reorderByReferenceIDs(int *reference_ids)
799 {
800 CHECK_FOR_EX_WRITE
801 int numSamples = getNumSamples();
802 DataTypes::ValueType::size_type n = getNoValues() * getNumDPPSample();
803 int sampleNo, sampleNo2,i;
804 double* p,*p2;
805 register double rtmp;
806 FunctionSpace fs=getFunctionSpace();
807
808 for (sampleNo = 0; sampleNo < numSamples; sampleNo++) {
809 const int id_in=reference_ids[sampleNo];
810 const int id=fs.getReferenceIDOfSample(sampleNo);
811 if (id!=id_in) {
812 bool matched=false;
813 for (sampleNo2 = sampleNo+1; sampleNo2 < numSamples; sampleNo2++) {
814 if (id == reference_ids[sampleNo2]) {
815 p=&(m_data[getPointOffset(sampleNo,0)]);
816 p2=&(m_data[getPointOffset(sampleNo2,0)]);
817 for (i=0; i<n ;i++) {
818 rtmp=p[i];
819 p[i]=p2[i];
820 p2[i]=rtmp;
821 }
822 reference_ids[sampleNo]=id;
823 reference_ids[sampleNo2]=id_in;
824 matched=true;
825 break;
826 }
827 }
828 if (! matched) {
829 throw DataException("Error - DataExpanded::reorderByReferenceIDs: unable to reorder sample data by reference ids");
830 }
831 }
832 }
833 }
834
835 DataTypes::ValueType&
836 DataExpanded::getVectorRW()
837 {
838 CHECK_FOR_EX_WRITE
839 return m_data.getData();
840 }
841
842 const DataTypes::ValueType&
843 DataExpanded::getVectorRO() const
844 {
845 return m_data.getData();
846 }
847
848
849 // Idea here is to create an array of seeds by feeding the original seed into the random generator
850 // The code at the beginning of the function to compute the seed if one is given is
851 // just supposed to introduce some variety (and ensure that multiple ranks don't get the same seed).
852 // I make no claim about how well these initial seeds are distributed
853 void DataExpanded::randomFill(long seed)
854 {
855 CHECK_FOR_EX_WRITE
856 static unsigned prevseed=0; // So if we create a bunch of objects we don't get the same start seed
857 if (seed==0) // for each one
858 {
859 if (prevseed==0)
860 {
861 time_t s=time(0);
862 seed=s;
863 }
864 else
865 {
866 seed=prevseed+419; // these numbers are arbitrary
867 if (seed>3040101) // I want to avoid overflow on 32bit systems
868 {
869 seed=((int)(seed)%0xABCD)+1;
870 }
871 }
872 }
873 // now we need to consider MPI since we don't want each rank to start with the same seed
874 seed+=getFunctionSpace().getDomain()->getMPIRank()*getFunctionSpace().getDomain()->getMPISize()*3;
875 prevseed=seed;
876 #ifdef _OPENMP
877 int numthreads=omp_get_max_threads();
878 #else
879 int numthreads=1;
880 #endif
881
882 #ifdef MKLRANDOM
883 double* seeds=new double[numthreads];
884 VSLStreamStatePtr sstream;
885
886 int status=vslNewStream(&sstream, VSL_BRNG_MT19937, seed); // use a Mersenne Twister
887 numeric_limits<double> dlim;
888 vdRngUniform(VSL_METHOD_DUNIFORM_STD, sstream , numthreads, seeds, -1, 1);
889 vslDeleteStream(&sstream);
890 DataVector& dv=getVectorRW();
891 size_t dvsize=dv.size();
892 #pragma omp parallel
893 {
894 int tnum=0;
895 #ifdef _OPENMP
896 tnum=omp_get_thread_num();
897 #endif
898 VSLStreamStatePtr stream;
899 // the 12345 is a hack to give us a better chance of getting different integer seeds.
900 int status=vslNewStream(&stream, VSL_BRNG_MT19937, seeds[tnum]*12345); // use a Mersenne Twister
901 int bigchunk=(dvsize/numthreads+1);
902 int smallchunk=dvsize-bigchunk*(numthreads-1);
903 int chunksize=(tnum<(numthreads-1))?bigchunk:smallchunk;
904 vdRngUniform(VSL_METHOD_DUNIFORM_STD, stream, chunksize, &(dv[bigchunk*tnum]), 0,1);
905 vslDeleteStream(&stream);
906 }
907 delete[] seeds;
908 #else
909 srand(seed);
910 unsigned* seeds=new unsigned[numthreads];
911 for (int i=0;i<numthreads;++i)
912 {
913 seeds[i]=rand();
914 }
915 DataVector& dv=getVectorRW();
916 long i;
917 const size_t dvsize=dv.size();
918 #pragma omp parallel private(i)
919 {
920 int tnum=0;
921 #ifdef _OPENMP
922 tnum=omp_get_thread_num();
923 #endif
924 unsigned info=seeds[tnum];
925
926 #pragma omp for schedule(static)
927 for (i=0;i<dvsize;++i)
928 {
929 dv[i]=(double)rand_r(&info)/RAND_MAX;
930 }
931 }
932 delete[] seeds;
933 #endif
934 }
935
936 } // end of namespace

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26