/[escript]/branches/stage3.1/escript/src/Utils.cpp
ViewVC logotype

Contents of /branches/stage3.1/escript/src/Utils.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2945 - (show annotations)
Wed Feb 24 00:17:46 2010 UTC (8 years, 11 months ago) by jfenwick
File size: 12332 byte(s)
Bringing release stage up to trunk version 2944

1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2010 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 #include <fstream>
16 #include <string.h>
17
18 // added for saveCSV
19 #include <boost/python.hpp>
20 #include <boost/scoped_ptr.hpp>
21 #include "Data.h"
22
23 #include "Utils.h"
24 #include "DataVector.h"
25
26 #ifdef _OPENMP
27 #include <omp.h>
28 #endif
29
30 #ifdef PASO_MPI
31 #include <mpi.h>
32 #endif
33
34 #ifdef _WIN32
35 #include <WinSock2.h>
36 #else
37 #include <unistd.h>
38 #endif
39
40 namespace escript {
41
42 int getSvnVersion()
43 {
44 #ifdef SVN_VERSION
45 return SVN_VERSION;
46 #else
47 return 0;
48 #endif
49 }
50
51 /* This is probably not very robust, but it works on Savanna today and is useful for performance analysis */
52 int get_core_id() {
53 int processor_num=-1;
54 #ifdef CORE_ID1
55 FILE *fp;
56 int i, count_spaces=0;
57 char fname[100];
58 char buf[1000];
59
60 sprintf(fname, "/proc/%d/stat", getpid());
61 fp = fopen(fname, "r");
62 if (fp == NULL) return(-1);
63 fgets(buf, 1000, fp);
64 fclose(fp);
65
66 for (i=strlen(buf)-1; i>=0; i--) {
67 if (buf[i] == ' ') count_spaces++;
68 if (count_spaces == 4) break;
69 }
70 processor_num = atoi(&buf[i+1]);
71 #endif
72 return(processor_num);
73 }
74
75
76 void printParallelThreadCnt()
77 {
78 int mpi_iam=0, mpi_num=1;
79 char hname[64];
80
81 #ifdef HAVE_GETHOSTNAME
82 gethostname(hname, 64);
83 hname[63] = '\0';
84 #else
85 strcpy(hname, "unknown host");
86 #endif
87
88 #ifdef PASO_MPI
89 MPI_Comm_rank(MPI_COMM_WORLD, &mpi_iam);
90 MPI_Comm_size(MPI_COMM_WORLD, &mpi_num);
91 #endif
92
93 #pragma omp parallel
94 {
95 int omp_iam=0, omp_num=1;
96 #ifdef _OPENMP
97 omp_iam = omp_get_thread_num(); /* Call in a parallel region */
98 omp_num = omp_get_num_threads();
99 #endif
100 #pragma omp critical (printthrdcount)
101 printf("printParallelThreadCounts: MPI=%03d/%03d OpenMP=%03d/%03d running on %s core %d\n",
102 mpi_iam, mpi_num, omp_iam, omp_num, hname, get_core_id());
103 }
104 }
105
106 void setNumberOfThreads(const int num_threads)
107 {
108
109 #ifdef _OPENMP
110 omp_set_num_threads(num_threads);
111 #endif
112
113 }
114
115 int getNumberOfThreads()
116 {
117 #ifdef _OPENMP
118 return omp_get_max_threads();
119 #else
120 return 1;
121 #endif
122
123 }
124
125 ESCRIPT_DLL_API int getMPISizeWorld() {
126 int mpi_num = 1;
127 #ifdef PASO_MPI
128 MPI_Comm_size(MPI_COMM_WORLD, &mpi_num);
129 #endif
130 return mpi_num;
131 }
132
133 ESCRIPT_DLL_API int getMPIRankWorld() {
134 int mpi_iam = 0;
135 #ifdef PASO_MPI
136 MPI_Comm_rank(MPI_COMM_WORLD, &mpi_iam);
137 #endif
138 return mpi_iam;
139 }
140
141 ESCRIPT_DLL_API int getMPIWorldMax(const int val) {
142 #ifdef PASO_MPI
143 int val2 = val;
144 int out = val;
145 MPI_Allreduce( &val2, &out, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD );
146 #else
147 int out = val;
148 #endif
149 return out;
150 }
151
152 ESCRIPT_DLL_API int getMPIWorldSum(const int val) {
153 #ifdef PASO_MPI
154 int val2 = val;
155 int out = 0;
156 MPI_Allreduce( &val2, &out, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
157 #else
158 int out = val;
159 #endif
160 return out;
161 }
162
163 ESCRIPT_DLL_API double getMachinePrecision() {
164 return DBL_EPSILON;
165 }
166 ESCRIPT_DLL_API double getMaxFloat() {
167 return DBL_MAX;
168 }
169 ESCRIPT_DLL_API void MPIBarrierWorld() {
170 #ifdef PASO_MPI
171 MPI_Barrier(MPI_COMM_WORLD );
172 #endif
173 }
174
175
176 ESCRIPT_DLL_API
177 void
178 saveDataCSV(const std::string& filename, boost::python::dict arg, const std::string& sep, const std::string& csep,
179 bool append)
180 {
181 using std::cout;
182 using std::endl;
183 boost::python::list keys=arg.keys();
184 int numdata = boost::python::extract<int>(arg.attr("__len__")());
185 bool hasmask=arg.has_key("mask");
186 Data mask;
187 if (hasmask)
188 {
189 mask=boost::python::extract<escript::Data>(arg["mask"]);
190 keys.remove("mask");
191 numdata--;
192 if (mask.getDataPointRank()!=0)
193 {
194 throw DataException("saveDataCSVcpp: masks must be scalar.");
195 }
196 }
197 if (numdata<1)
198 {
199 throw DataException("saveDataCSVcpp: no data to save specified.");
200 }
201 std::vector<int> step(numdata);
202 std::vector<std::string> names(numdata);
203 std::vector<Data> data(numdata);
204 std::vector<const DataAbstract::ValueType::value_type*> samples(numdata);
205 std::vector<int> offset(numdata);
206 std::vector<int> fstypes(numdata); // FunctionSpace types for each data
207
208 keys.sort(); // to get some predictable order to things
209
210 // We need to interpret the samples correctly even if they are different types
211 // for this reason, we should interate over samples
212 for (int i=0;i<numdata;++i)
213 {
214 names[i]=boost::python::extract<std::string>(keys[i]);
215 data[i]=boost::python::extract<escript::Data>(arg[keys[i]]);
216 step[i]=(data[i].actsExpanded()?DataTypes::noValues(data[i].getDataPointShape()):0);
217 fstypes[i]=data[i].getFunctionSpace().getTypeCode();
218 if (i>0)
219 {
220 if (data[i].getDomain()!=data[i-1].getDomain())
221 {
222 throw DataException("saveDataCSVcpp: all data must be on the same domain.");
223 }
224 }
225 }
226 int bestfnspace=0;
227 if (!data[0].getDomain()->commonFunctionSpace(fstypes, bestfnspace))
228 {
229 throw DataException("saveDataCSVcpp: FunctionSpaces of data are incompatible");
230 }
231 // now we interpolate all data to the same type
232 FunctionSpace best(data[0].getDomain(),bestfnspace);
233 for (int i=0;i<numdata;++i)
234 {
235 data[i]=data[i].interpolate(best);
236 }
237 int numsamples=data[0].getNumSamples(); // these must be the same for all data
238 int dpps=data[0].getNumDataPointsPerSample();
239
240
241 std::ostringstream os;
242
243
244 bool first=true;
245
246 if (data[0].getDomain()->getMPIRank()==0)
247 {
248 for (int i=0;i<numdata;++i)
249 {
250 const DataTypes::ShapeType& s=data[i].getDataPointShape();
251 switch (data[i].getDataPointRank())
252 {
253 case 0: if (!first)
254 {
255 os << sep;
256 }
257 else
258 {
259 first=false;
260 }
261 os << names[i]; break;
262 case 1: for (int j=0;j<s[0];++j)
263 {
264 if (!first)
265 {
266 os << sep;
267 }
268 else
269 {
270 first=false;
271 }
272 os << names[i] << csep << j;
273 }
274 break;
275 case 2: for (int j=0;j<s[0];++j)
276 {
277 for (int k=0;k<s[1];++k)
278 {
279 if (!first)
280 {
281 os << sep;
282 }
283 else
284 {
285 first=false;
286 }
287 os << names[i] << csep << k << csep << j;
288 }
289 }
290 break;
291 case 3: for (int j=0;j<s[0];++j)
292 {
293 for (int k=0;k<s[1];++k)
294 {
295 for (int l=0;l<s[2];++l)
296 {
297 if (!first)
298 {
299 os << sep;
300 }
301 else
302 {
303 first=false;
304 }
305 os << names[i] << csep << k << csep << j << csep << l;
306 }
307 }
308 }
309 break;
310 case 4: for (int j=0;j<s[0];++j)
311 {
312 for (int k=0;k<s[1];++k)
313 {
314 for (int l=0;l<s[2];++l)
315 {
316 for (int m=0;m<s[3];++m)
317 {
318 if (!first)
319 {
320 os << sep;
321 }
322 else
323 {
324 first=false;
325 }
326 os << names[i] << csep << k << csep << j << csep << l << csep << m;
327 }
328 }
329 }
330 }
331 break;
332 default:
333 throw DataException("saveDataCSV: Illegal rank");
334 }
335 }
336 os << endl;
337 }
338
339 const double* masksample=0;
340 int maskoffset=0;
341
342
343 bool expandedmask=false; // does the mask act expanded. Are there mask value for each point in the sample
344 bool wantrow=true; // do we output this row?
345 if (hasmask)
346 {
347 if (mask.actsExpanded())
348 {
349 maskoffset=DataTypes::noValues(mask.getDataPointShape());
350 expandedmask=true;
351 }
352 }
353 os.setf(std::ios_base::scientific, std::ios_base::floatfield);
354 os.precision(15);
355
356 // errors prior to this point will occur on all processes anyway
357 // so there is no need to explicitly notify other ranks
358 int error=0;
359 try{
360 for (int i=0;i<numsamples;++i)
361 {
362 if (!best.ownSample(i))
363 {
364 continue;
365 }
366 wantrow=true;
367 for (int d=0;d<numdata;++d)
368 {
369 samples[d]=data[d].getSampleDataRO(i);
370 }
371 if (hasmask)
372 {
373 masksample=mask.getSampleDataRO(i);
374 if (!expandedmask) // mask controls whole sample
375 {
376 if (masksample[0]<=0) // masks are scalar
377 {
378 wantrow=false;
379 }
380 }
381 }
382 for (int j=0;j<dpps;++j)
383 {
384 // now we need to check if this point is masked off
385 if (expandedmask)
386 {
387 wantrow=(masksample[j]>0); // masks are scalar to the relevant value is at [j]
388 }
389 if (wantrow)
390 {
391 bool needsep=false;
392 for (int d=0;d<numdata;++d)
393 {
394 DataTypes::pointToStream(os, samples[d], data[d].getDataPointShape(), offset[d], needsep, sep);
395 needsep=true;
396 offset[d]+=step[d];
397 }
398 os << endl;
399 }
400 }
401 for (int d=0;d<numdata;++d)
402 {
403 offset[d]=0;
404 }
405 }
406 } catch (...)
407 {
408 error=1;
409 #ifndef PASO_MPI
410 throw;
411 #endif
412 }
413 #ifdef PASO_MPI
414 MPI_Comm com=data[0].getDomain()->getMPIComm();
415 int rerror=0;
416 MPI_Allreduce( &error, &rerror, 1, MPI_INT, MPI_MAX, com );
417 error=rerror;
418 if (error)
419 {
420 throw DataException("saveDataCSVcpp: error building output");
421 }
422 #endif
423
424 // at this point os will contain the text to be written
425 #ifndef PASO_MPI
426
427 std::ofstream ofs;
428 if (append)
429 {
430 ofs.open(filename.c_str(), std::ios_base::app);
431 }
432 else
433 {
434 ofs.open(filename.c_str());
435 }
436 if (!ofs.is_open())
437 {
438 throw DataException("saveDataCSVcpp: unable to open file for writing");
439 }
440 ofs << os.str();
441 ofs.close();
442
443 #else
444 // here we have MPI
445 MPI_File mpi_fileHandle_p;
446 MPI_Status mpi_status;
447 MPI_Info mpi_info = MPI_INFO_NULL;
448 char* fname_c=new char[filename.size()+1];
449 strcpy(fname_c,filename.c_str());
450 boost::scoped_ptr<char> fname_p(fname_c);
451
452 int amode = MPI_MODE_CREATE|MPI_MODE_WRONLY|MPI_MODE_UNIQUE_OPEN;
453 if (append)
454 {
455 amode |= MPI_MODE_APPEND;
456 }
457 else
458 {
459 if (data[0].getDomain()->getMPIRank()==0)
460 {
461 std::ifstream ifs(fname_p.get()); // if file exists, remove it
462 if (ifs.is_open())
463 {
464 ifs.close();
465 if (remove(fname_p.get()))
466 {
467 error=1;
468 }
469 }
470 }
471 data[0].getDomain()->MPIBarrier();
472 int rerror=0;
473 MPI_Allreduce( &error, &rerror, 1, MPI_INT, MPI_MAX, com );
474 if (rerror!=0)
475 {
476 std::ostringstream oss;
477 oss << "saveDataCSVcpp: File " << filename << " already exists and could not be removed in preparation for new output.";
478 throw DataException(oss.str());
479 }
480 }
481 int ierr;
482 ierr = MPI_File_open(com, fname_p.get(), amode, mpi_info, &mpi_fileHandle_p);
483 if (ierr != MPI_SUCCESS)
484 {
485 std::ostringstream oss;
486 oss << "saveDataCSVcpp: File " << filename << " could not be opened for writing in parallel";
487 // file is not open so we can throw
488 throw DataException(oss.str());
489 }
490 else
491 {
492 ierr=MPI_File_set_view(mpi_fileHandle_p,MPI_DISPLACEMENT_CURRENT,
493 MPI_CHAR, MPI_CHAR, const_cast<char*>("native"), mpi_info);
494 // here we are assuming that std::string holds the same type of char as MPI_CHAR
495 }
496
497 std::string contents=os.str();
498 char* con=new char[contents.size()+1];
499 strcpy(con, contents.c_str());
500 boost::scoped_ptr<char> buff(con);
501 ierr=MPI_File_write_ordered(mpi_fileHandle_p, buff.get(), contents.size(), MPI_CHAR, &mpi_status);
502 if (ierr != MPI_SUCCESS)
503 {
504 error=1;
505 }
506
507 if (MPI_File_close(&mpi_fileHandle_p)!= MPI_SUCCESS)
508 {
509 error=1;
510 }
511 data[0].getDomain()->MPIBarrier();
512 if (error) // any errors at this stage are from collective routines
513 { // so there is no need to reduce_all
514 throw DataException("saveDataCSVcpp: Error writing and closing file");
515 }
516
517 #endif
518 }
519
520
521 void
522 resolveGroup(boost::python::object obj)
523 {
524 int len=0;
525 try
526 {
527 len=boost::python::extract<int>(obj.attr("__len__")());
528 }
529 catch(...)
530 {
531 PyErr_Clear(); // tell python the error isn't there anymore
532 throw DataException("Error - resolveGroup expects a sequence object.");
533 }
534 std::vector<DataLazy*> dats;
535 std::vector<Data*> dp;
536 for (int i=0;i<len;++i)
537 {
538 Data* p=0;
539 try
540 {
541 p=boost::python::extract<Data*>(obj[i]);
542 }
543 catch(...)
544 {
545 PyErr_Clear();
546 throw DataException("Error - resolveGroup only accepts Data objects.");
547 }
548 if (p->isLazy())
549 {
550 dats.push_back(dynamic_cast<DataLazy*>(p->borrowData()));
551 dp.push_back(p);
552 }
553 }
554 if (!dats.empty())
555 {
556 dats[0]->resolveGroupWorker(dats);
557 }
558 // all the data will be identities now but its still lazy
559 // convert it to ready
560 for (int i=dp.size()-1;i>=0;--i)
561 {
562 dp[i]->resolve();
563 }
564 }
565
566
567 } // end of namespace

  ViewVC Help
Powered by ViewVC 1.1.26