/[escript]/trunk/escript/src/Utils.cpp
ViewVC logotype

Contents of /trunk/escript/src/Utils.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 4477 - (show annotations)
Thu Jun 20 02:41:28 2013 UTC (6 years, 4 months ago) by caltinay
File size: 13474 byte(s)
escript: use scoped_array instead of scoped_ptr
ripley: netcdf returns a copy of an array so use scoped_array
downunder: s/Sperical/Spherical/
scripts: added a few more suppressions

1
2 /*****************************************************************************
3 *
4 * Copyright (c) 2003-2013 by University of Queensland
5 * http://www.uq.edu.au
6 *
7 * Primary Business: Queensland, Australia
8 * Licensed under the Open Software License version 3.0
9 * http://www.opensource.org/licenses/osl-3.0.php
10 *
11 * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12 * Development since 2012 by School of Earth Sciences
13 *
14 *****************************************************************************/
15
16
17 #include <fstream>
18 #include <string.h>
19
20 // added for saveCSV
21 #include <boost/python.hpp>
22 #include <boost/scoped_array.hpp>
23 #include "Data.h"
24
25 #include "Utils.h"
26 #include "DataVector.h"
27
28 #ifdef _OPENMP
29 #include <omp.h>
30 #endif
31
32 #ifdef ESYS_MPI
33 #include <mpi.h>
34 #endif
35
36 #ifdef _WIN32
37 #include <WinSock2.h>
38 #else
39 #include <unistd.h>
40 #endif
41
42 namespace escript {
43
44 int getSvnVersion()
45 {
46 #ifdef SVN_VERSION
47 return SVN_VERSION;
48 #else
49 return 0;
50 #endif
51 }
52
53 /* This is probably not very robust, but it works on Savanna today and is useful for performance analysis */
54 int get_core_id() {
55 int processor_num=-1;
56 #ifdef CORE_ID1
57 FILE *fp;
58 int i, count_spaces=0;
59 char fname[100];
60 char buf[1000];
61
62 sprintf(fname, "/proc/%d/stat", getpid());
63 fp = fopen(fname, "r");
64 if (fp == NULL) return(-1);
65 fgets(buf, 1000, fp);
66 fclose(fp);
67
68 for (i=strlen(buf)-1; i>=0; i--) {
69 if (buf[i] == ' ') count_spaces++;
70 if (count_spaces == 4) break;
71 }
72 processor_num = atoi(&buf[i+1]);
73 #endif
74 return(processor_num);
75 }
76
77
78 void printParallelThreadCnt()
79 {
80 int mpi_iam=0, mpi_num=1;
81 char hname[64];
82
83 #ifdef HAVE_GETHOSTNAME
84 gethostname(hname, 64);
85 hname[63] = '\0';
86 #else
87 strcpy(hname, "unknown host");
88 #endif
89
90 #ifdef ESYS_MPI
91 MPI_Comm_rank(MPI_COMM_WORLD, &mpi_iam);
92 MPI_Comm_size(MPI_COMM_WORLD, &mpi_num);
93 #endif
94
95 #pragma omp parallel
96 {
97 int omp_iam=0, omp_num=1;
98 #ifdef _OPENMP
99 omp_iam = omp_get_thread_num(); /* Call in a parallel region */
100 omp_num = omp_get_num_threads();
101 #endif
102 #pragma omp critical (printthrdcount)
103 printf("printParallelThreadCounts: MPI=%03d/%03d OpenMP=%03d/%03d running on %s core %d\n",
104 mpi_iam, mpi_num, omp_iam, omp_num, hname, get_core_id());
105 }
106 }
107
108 void setNumberOfThreads(const int num_threads)
109 {
110
111 #ifdef _OPENMP
112 omp_set_num_threads(num_threads);
113 #endif
114
115 }
116
117 int getNumberOfThreads()
118 {
119 #ifdef _OPENMP
120 return omp_get_max_threads();
121 #else
122 return 1;
123 #endif
124
125 }
126
127 ESCRIPT_DLL_API int getMPISizeWorld() {
128 int mpi_num = 1;
129 #ifdef ESYS_MPI
130 MPI_Comm_size(MPI_COMM_WORLD, &mpi_num);
131 #endif
132 return mpi_num;
133 }
134
135 ESCRIPT_DLL_API int getMPIRankWorld() {
136 int mpi_iam = 0;
137 #ifdef ESYS_MPI
138 MPI_Comm_rank(MPI_COMM_WORLD, &mpi_iam);
139 #endif
140 return mpi_iam;
141 }
142
143 ESCRIPT_DLL_API int getMPIWorldMax(const int val) {
144 #ifdef ESYS_MPI
145 int val2 = val;
146 int out = val;
147 MPI_Allreduce( &val2, &out, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD );
148 #else
149 int out = val;
150 #endif
151 return out;
152 }
153
154 ESCRIPT_DLL_API int getMPIWorldSum(const int val) {
155 #ifdef ESYS_MPI
156 int val2 = val;
157 int out = 0;
158 MPI_Allreduce( &val2, &out, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
159 #else
160 int out = val;
161 #endif
162 return out;
163 }
164
165 ESCRIPT_DLL_API int runMPIProgram(boost::python::list args) {
166 #ifdef ESYS_MPI
167 MPI_Comm intercomm;
168 MPI_Info info;
169 int errors;
170 int nargs = boost::python::extract<int>(args.attr("__len__")());
171 std::string cmd = boost::python::extract<std::string>(args[0]);
172 std::vector<std::string> cpp_args(nargs);
173 char** c_args = new char*[nargs];
174 char* c_cmd = const_cast<char*>(cmd.c_str());
175 // skip command name in argument list
176 for (int i=1; i<nargs; i++) {
177 cpp_args[i-1]=boost::python::extract<std::string>(args[i]);
178 c_args[i-1]=const_cast<char*>(cpp_args[i-1].c_str());
179 }
180 c_args[nargs-1]=NULL;
181 MPI_Info_create(&info);
182 MPI_Comm_spawn(c_cmd, c_args, 1, info, 0, MPI_COMM_WORLD, &intercomm, &errors);
183 MPI_Info_free(&info);
184 delete[] c_args;
185 return errors;
186 #else
187 std::string cmd;
188 int nargs = boost::python::extract<int>(args.attr("__len__")());
189 for (int i=0; i<nargs; i++) {
190 cmd+=boost::python::extract<std::string>(args[i]);
191 cmd+=" ";
192 }
193 return system(cmd.c_str());
194 #endif
195 }
196
197 ESCRIPT_DLL_API double getMachinePrecision() {
198 return DBL_EPSILON;
199 }
200 ESCRIPT_DLL_API double getMaxFloat() {
201 return DBL_MAX;
202 }
203 ESCRIPT_DLL_API void MPIBarrierWorld() {
204 #ifdef ESYS_MPI
205 MPI_Barrier(MPI_COMM_WORLD );
206 #endif
207 }
208
209
210 ESCRIPT_DLL_API
211 void
212 saveDataCSV(const std::string& filename, boost::python::dict arg, const std::string& sep, const std::string& csep,
213 bool append)
214 {
215 using std::cout;
216 using std::endl;
217 boost::python::list keys=arg.keys();
218 int numdata = boost::python::extract<int>(arg.attr("__len__")());
219 bool hasmask=arg.has_key("mask");
220 Data mask;
221 if (hasmask)
222 {
223 mask=boost::python::extract<escript::Data>(arg["mask"]);
224 keys.remove("mask");
225 numdata--;
226 if (mask.getDataPointRank()!=0)
227 {
228 throw DataException("saveDataCSVcpp: masks must be scalar.");
229 }
230 }
231 if (numdata<1)
232 {
233 throw DataException("saveDataCSVcpp: no data to save specified.");
234 }
235 std::vector<int> step(numdata);
236 std::vector<std::string> names(numdata);
237 std::vector<Data> data(numdata);
238 std::vector<const DataAbstract::ValueType::value_type*> samples(numdata);
239 std::vector<int> offset(numdata);
240 std::vector<int> fstypes(numdata); // FunctionSpace types for each data
241
242 keys.sort(); // to get some predictable order to things
243
244 // We need to interpret the samples correctly even if they are different types
245 // for this reason, we should iterate over samples
246 for (int i=0;i<numdata;++i)
247 {
248 names[i]=boost::python::extract<std::string>(keys[i]);
249 data[i]=boost::python::extract<escript::Data>(arg[keys[i]]);
250 step[i]=(data[i].actsExpanded()?DataTypes::noValues(data[i].getDataPointShape()):0);
251 fstypes[i]=data[i].getFunctionSpace().getTypeCode();
252 if (i>0)
253 {
254 if (data[i].getDomain()!=data[i-1].getDomain())
255 {
256 throw DataException("saveDataCSVcpp: all data must be on the same domain.");
257 }
258 }
259 }
260 int bestfnspace=0;
261 if (!data[0].getDomain()->commonFunctionSpace(fstypes, bestfnspace))
262 {
263 throw DataException("saveDataCSVcpp: FunctionSpaces of data are incompatible");
264 }
265 // now we interpolate all data to the same type
266 FunctionSpace best(data[0].getDomain(),bestfnspace);
267 for (int i=0;i<numdata;++i)
268 {
269 data[i]=data[i].interpolate(best);
270 }
271 int numsamples=data[0].getNumSamples(); // these must be the same for all data
272 int dpps=data[0].getNumDataPointsPerSample();
273
274
275 std::ostringstream os;
276
277
278 bool first=true;
279
280 if (data[0].getDomain()->getMPIRank()==0)
281 {
282 for (int i=0;i<numdata;++i)
283 {
284 const DataTypes::ShapeType& s=data[i].getDataPointShape();
285 switch (data[i].getDataPointRank())
286 {
287 case 0: if (!first)
288 {
289 os << sep;
290 }
291 else
292 {
293 first=false;
294 }
295 os << names[i]; break;
296 case 1: for (int j=0;j<s[0];++j)
297 {
298 if (!first)
299 {
300 os << sep;
301 }
302 else
303 {
304 first=false;
305 }
306 os << names[i] << csep << j;
307 }
308 break;
309 case 2: for (int j=0;j<s[0];++j)
310 {
311 for (int k=0;k<s[1];++k)
312 {
313 if (!first)
314 {
315 os << sep;
316 }
317 else
318 {
319 first=false;
320 }
321 os << names[i] << csep << k << csep << j;
322 }
323 }
324 break;
325 case 3: for (int j=0;j<s[0];++j)
326 {
327 for (int k=0;k<s[1];++k)
328 {
329 for (int l=0;l<s[2];++l)
330 {
331 if (!first)
332 {
333 os << sep;
334 }
335 else
336 {
337 first=false;
338 }
339 os << names[i] << csep << k << csep << j << csep << l;
340 }
341 }
342 }
343 break;
344 case 4: for (int j=0;j<s[0];++j)
345 {
346 for (int k=0;k<s[1];++k)
347 {
348 for (int l=0;l<s[2];++l)
349 {
350 for (int m=0;m<s[3];++m)
351 {
352 if (!first)
353 {
354 os << sep;
355 }
356 else
357 {
358 first=false;
359 }
360 os << names[i] << csep << k << csep << j << csep << l << csep << m;
361 }
362 }
363 }
364 }
365 break;
366 default:
367 throw DataException("saveDataCSV: Illegal rank");
368 }
369 }
370 os << endl;
371 }
372
373 const double* masksample=0;
374
375 bool expandedmask=false; // does the mask act expanded. Are there mask value for each point in the sample
376 bool wantrow=true; // do we output this row?
377 if (hasmask)
378 {
379 if (mask.actsExpanded())
380 {
381 expandedmask=true;
382 }
383 }
384 os.setf(std::ios_base::scientific, std::ios_base::floatfield);
385 os.precision(15);
386
387 // errors prior to this point will occur on all processes anyway
388 // so there is no need to explicitly notify other ranks
389 int error=0;
390 try{
391 for (int i=0;i<numsamples;++i)
392 {
393 if (!best.ownSample(i))
394 {
395 continue;
396 }
397 wantrow=true;
398 for (int d=0;d<numdata;++d)
399 {
400 samples[d]=data[d].getSampleDataRO(i);
401 }
402 if (hasmask)
403 {
404 masksample=mask.getSampleDataRO(i);
405 if (!expandedmask) // mask controls whole sample
406 {
407 if (masksample[0]<=0) // masks are scalar
408 {
409 wantrow=false;
410 }
411 }
412 }
413 for (int j=0;j<dpps;++j)
414 {
415 // now we need to check if this point is masked off
416 if (expandedmask)
417 {
418 wantrow=(masksample[j]>0); // masks are scalar to the relevant value is at [j]
419 }
420 if (wantrow)
421 {
422 bool needsep=false;
423 for (int d=0;d<numdata;++d)
424 {
425 DataTypes::pointToStream(os, samples[d], data[d].getDataPointShape(), offset[d], needsep, sep);
426 needsep=true;
427 offset[d]+=step[d];
428 }
429 os << endl;
430 }
431 }
432 for (int d=0;d<numdata;++d)
433 {
434 offset[d]=0;
435 }
436 }
437 } catch (...)
438 {
439 error=1;
440 if (data[0].getDomain()->getMPISize()==1) {
441 throw;
442 }
443 }
444 #ifdef ESYS_MPI
445 MPI_Comm com=data[0].getDomain()->getMPIComm();
446 int rerror=0;
447 MPI_Allreduce( &error, &rerror, 1, MPI_INT, MPI_MAX, com );
448 error=rerror;
449 if (error)
450 {
451 throw DataException("saveDataCSVcpp: error building output");
452 }
453 #endif
454
455 // at this point os will contain the text to be written
456 #ifndef ESYS_MPI
457 (void) error;
458
459 std::ofstream ofs;
460 if (append)
461 {
462 ofs.open(filename.c_str(), std::ios_base::app);
463 }
464 else
465 {
466 ofs.open(filename.c_str());
467 }
468 if (!ofs.is_open())
469 {
470 throw DataException("saveDataCSVcpp: unable to open file for writing");
471 }
472 ofs << os.str();
473 ofs.close();
474
475 #else
476 // here we have MPI
477 MPI_File mpi_fileHandle_p;
478 MPI_Status mpi_status;
479 MPI_Info mpi_info = MPI_INFO_NULL;
480 boost::scoped_array<char> fname_p(new char[filename.size()+1]);
481 strcpy(fname_p.get(), filename.c_str());
482
483 int amode = MPI_MODE_CREATE|MPI_MODE_WRONLY|MPI_MODE_UNIQUE_OPEN;
484 if (append)
485 {
486 amode |= MPI_MODE_APPEND;
487 }
488 else
489 {
490 if (data[0].getDomain()->getMPIRank()==0)
491 {
492 std::ifstream ifs(fname_p.get()); // if file exists, remove it
493 if (ifs.is_open())
494 {
495 ifs.close();
496 if (remove(fname_p.get()))
497 {
498 error=1;
499 }
500 }
501 }
502 data[0].getDomain()->MPIBarrier();
503 int rerror=0;
504 MPI_Allreduce( &error, &rerror, 1, MPI_INT, MPI_MAX, com );
505 if (rerror!=0)
506 {
507 std::ostringstream oss;
508 oss << "saveDataCSVcpp: File " << filename << " already exists and could not be removed in preparation for new output.";
509 throw DataException(oss.str());
510 }
511 }
512 int ierr;
513 ierr = MPI_File_open(com, fname_p.get(), amode, mpi_info, &mpi_fileHandle_p);
514 if (ierr != MPI_SUCCESS)
515 {
516 std::ostringstream oss;
517 oss << "saveDataCSVcpp: File " << filename << " could not be opened for writing in parallel";
518 // file is not open so we can throw
519 throw DataException(oss.str());
520 }
521 else
522 {
523 ierr=MPI_File_set_view(mpi_fileHandle_p,MPI_DISPLACEMENT_CURRENT,
524 MPI_CHAR, MPI_CHAR, const_cast<char*>("native"), mpi_info);
525 // here we are assuming that std::string holds the same type of char as MPI_CHAR
526 }
527
528 std::string contents=os.str();
529 boost::scoped_array<char> buff(new char[contents.size()+1]);
530 strcpy(buff.get(), contents.c_str());
531 ierr=MPI_File_write_ordered(mpi_fileHandle_p, buff.get(), contents.size(), MPI_CHAR, &mpi_status);
532 if (ierr != MPI_SUCCESS)
533 {
534 error=1;
535 }
536
537 if (MPI_File_close(&mpi_fileHandle_p)!= MPI_SUCCESS)
538 {
539 error=1;
540 }
541 data[0].getDomain()->MPIBarrier();
542 if (error) // any errors at this stage are from collective routines
543 { // so there is no need to reduce_all
544 throw DataException("saveDataCSVcpp: Error writing and closing file");
545 }
546
547 #endif
548 }
549
550
551 void
552 resolveGroup(boost::python::object obj)
553 {
554 int len=0;
555 try
556 {
557 len=boost::python::extract<int>(obj.attr("__len__")());
558 }
559 catch(...)
560 {
561 PyErr_Clear(); // tell python the error isn't there anymore
562 throw DataException("Error - resolveGroup expects a sequence object.");
563 }
564 std::vector<DataLazy*> dats;
565 std::vector<Data*> dp;
566 for (int i=0;i<len;++i)
567 {
568 Data* p=0;
569 try
570 {
571 p=boost::python::extract<Data*>(obj[i]);
572 }
573 catch(...)
574 {
575 PyErr_Clear();
576 throw DataException("Error - resolveGroup only accepts Data objects.");
577 }
578 if (p->isLazy())
579 {
580 dats.push_back(dynamic_cast<DataLazy*>(p->borrowData()));
581 dp.push_back(p);
582 }
583 }
584 if (!dats.empty())
585 {
586 dats[0]->resolveGroupWorker(dats);
587 }
588 // all the data will be identities now but its still lazy
589 // convert it to ready
590 for (int i=dp.size()-1;i>=0;--i)
591 {
592 dp[i]->resolve();
593 }
594 }
595
596
597 } // end of namespace

  ViewVC Help
Powered by ViewVC 1.1.26