/[escript]/trunk/escript/src/Utils.cpp
ViewVC logotype

Contents of /trunk/escript/src/Utils.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2641 - (show annotations)
Mon Aug 31 07:41:49 2009 UTC (10 years, 7 months ago) by jfenwick
File size: 11801 byte(s)
Fixed some of my stupids related to MPI compile errors.

1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2009 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 #include <fstream>
16 #include <string.h>
17
18 // added for saveCSV
19 #include <boost/python.hpp>
20 #include <boost/scoped_ptr.hpp>
21 #include "Data.h"
22
23 #include "Utils.h"
24 #include "DataVector.h"
25
26 #ifdef _OPENMP
27 #include <omp.h>
28 #endif
29
30 #ifdef PASO_MPI
31 #include <mpi.h>
32 #endif
33
34 #ifdef _WIN32
35 #include <WinSock2.h>
36 #else
37 #include <unistd.h>
38 #endif
39
40 namespace escript {
41
42 int getSvnVersion()
43 {
44 #ifdef SVN_VERSION
45 return SVN_VERSION;
46 #else
47 return 0;
48 #endif
49 }
50
51 /* This is probably not very robust, but it works on Savanna today and is useful for performance analysis */
52 int get_core_id() {
53 int processor_num=-1;
54 #ifdef CORE_ID1
55 FILE *fp;
56 int i, count_spaces=0;
57 char fname[100];
58 char buf[1000];
59
60 sprintf(fname, "/proc/%d/stat", getpid());
61 fp = fopen(fname, "r");
62 if (fp == NULL) return(-1);
63 fgets(buf, 1000, fp);
64 fclose(fp);
65
66 for (i=strlen(buf)-1; i>=0; i--) {
67 if (buf[i] == ' ') count_spaces++;
68 if (count_spaces == 4) break;
69 }
70 processor_num = atoi(&buf[i+1]);
71 #endif
72 return(processor_num);
73 }
74
75
76 void printParallelThreadCnt()
77 {
78 int mpi_iam=0, mpi_num=1;
79 char hname[64];
80
81 #ifdef HAVE_GETHOSTNAME
82 gethostname(hname, 64);
83 hname[63] = '\0';
84 #else
85 strcpy(hname, "unknown host");
86 #endif
87
88 #ifdef PASO_MPI
89 MPI_Comm_rank(MPI_COMM_WORLD, &mpi_iam);
90 MPI_Comm_size(MPI_COMM_WORLD, &mpi_num);
91 #endif
92
93 #pragma omp parallel
94 {
95 int omp_iam=0, omp_num=1;
96 #ifdef _OPENMP
97 omp_iam = omp_get_thread_num(); /* Call in a parallel region */
98 omp_num = omp_get_num_threads();
99 #endif
100 #pragma omp critical (printthrdcount)
101 printf("printParallelThreadCounts: MPI=%03d/%03d OpenMP=%03d/%03d running on %s core %d\n",
102 mpi_iam, mpi_num, omp_iam, omp_num, hname, get_core_id());
103 }
104 }
105
106 void setNumberOfThreads(const int num_threads)
107 {
108
109 #ifdef _OPENMP
110 omp_set_num_threads(num_threads);
111 #endif
112
113 }
114
115 int getNumberOfThreads()
116 {
117 #ifdef _OPENMP
118 return omp_get_max_threads();
119 #else
120 return 1;
121 #endif
122
123 }
124
125 ESCRIPT_DLL_API int getMPISizeWorld() {
126 int mpi_num = 1;
127 #ifdef PASO_MPI
128 MPI_Comm_size(MPI_COMM_WORLD, &mpi_num);
129 #endif
130 return mpi_num;
131 }
132
133 ESCRIPT_DLL_API int getMPIRankWorld() {
134 int mpi_iam = 0;
135 #ifdef PASO_MPI
136 MPI_Comm_rank(MPI_COMM_WORLD, &mpi_iam);
137 #endif
138 return mpi_iam;
139 }
140
141 ESCRIPT_DLL_API int getMPIWorldMax(const int val) {
142 #ifdef PASO_MPI
143 int val2 = val;
144 int out = val;
145 MPI_Allreduce( &val2, &out, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD );
146 #else
147 int out = val;
148 #endif
149 return out;
150 }
151
152 ESCRIPT_DLL_API int getMPIWorldSum(const int val) {
153 #ifdef PASO_MPI
154 int val2 = val;
155 int out = 0;
156 MPI_Allreduce( &val2, &out, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
157 #else
158 int out = val;
159 #endif
160 return out;
161 }
162
163 ESCRIPT_DLL_API double getMachinePrecision() {
164 return DBL_EPSILON;
165 }
166 ESCRIPT_DLL_API double getMaxFloat() {
167 return DBL_MAX;
168 }
169 ESCRIPT_DLL_API void MPIBarrierWorld() {
170 #ifdef PASO_MPI
171 MPI_Barrier(MPI_COMM_WORLD );
172 #endif
173 }
174
175 ESCRIPT_DLL_API
176 void
177 saveDataCSV(const std::string& filename, boost::python::dict arg, const std::string& sep, const std::string& csep,
178 bool append)
179 {
180 using std::cout;
181 using std::endl;
182 boost::python::list keys=arg.keys();
183 int numdata = boost::python::extract<int>(arg.attr("__len__")());
184 bool hasmask=arg.has_key("mask");
185 Data mask;
186 if (hasmask)
187 {
188 mask=boost::python::extract<escript::Data>(arg["mask"]);
189 keys.remove("mask");
190 numdata--;
191 if (mask.getDataPointRank()!=0)
192 {
193 throw DataException("saveDataCSVcpp: masks must be scalar.");
194 }
195 }
196 if (numdata<1)
197 {
198 throw DataException("saveDataCSVcpp: no data to save specified.");
199 }
200 std::vector<int> step(numdata);
201 std::vector<std::string> names(numdata);
202 std::vector<Data> data(numdata);
203 std::vector<const DataAbstract::ValueType::value_type*> samples(numdata);
204 std::vector<int> offset(numdata);
205 std::vector<int> fstypes(numdata); // FunctionSpace types for each data
206
207 // We need to interpret the samples correctly even if they are different types
208 // for this reason, we should interate over samples
209 for (int i=0;i<numdata;++i)
210 {
211 names[i]=boost::python::extract<std::string>(keys[i]);
212 data[i]=boost::python::extract<escript::Data>(arg[keys[i]]);
213 step[i]=(data[i].actsExpanded()?DataTypes::noValues(data[i].getDataPointShape()):0);
214 fstypes[i]=data[i].getFunctionSpace().getTypeCode();
215 if (i>0)
216 {
217 if (data[i].getDomain()!=data[i-1].getDomain())
218 {
219 throw DataException("saveDataCSVcpp: all data must be on the same domain.");
220 }
221 }
222 }
223 int bestfnspace=0;
224 if (!data[0].getDomain()->commonFunctionSpace(fstypes, bestfnspace))
225 {
226 throw DataException("saveDataCSVcpp: FunctionSpaces of data are incompatible");
227 }
228 // now we interpolate all data to the same type
229 FunctionSpace best(data[0].getDomain(),bestfnspace);
230 for (int i=0;i<numdata;++i)
231 {
232 data[i]=data[i].interpolate(best);
233 }
234 int numsamples=data[0].getNumSamples(); // these must be the same for all data
235 int dpps=data[0].getNumDataPointsPerSample();
236
237
238 std::ostringstream os;
239
240
241 bool first=true;
242
243 if (data[0].getDomain()->getMPIRank()==0)
244 {
245 for (int i=0;i<numdata;++i)
246 {
247 const DataTypes::ShapeType& s=data[i].getDataPointShape();
248 switch (data[i].getDataPointRank())
249 {
250 case 0: if (!first)
251 {
252 os << sep;
253 }
254 else
255 {
256 first=false;
257 }
258 os << names[i]; break;
259 case 1: for (int j=0;j<s[0];++j)
260 {
261 if (!first)
262 {
263 os << sep;
264 }
265 else
266 {
267 first=false;
268 }
269 os << names[i] << csep << j;
270 }
271 break;
272 case 2: for (int j=0;j<s[0];++j)
273 {
274 for (int k=0;k<s[1];++k)
275 {
276 if (!first)
277 {
278 os << sep;
279 }
280 else
281 {
282 first=false;
283 }
284 os << names[i] << csep << k << csep << j;
285 }
286 }
287 break;
288 case 3: for (int j=0;j<s[0];++j)
289 {
290 for (int k=0;k<s[1];++k)
291 {
292 for (int l=0;l<s[2];++l)
293 {
294 if (!first)
295 {
296 os << sep;
297 }
298 else
299 {
300 first=false;
301 }
302 os << names[i] << csep << k << csep << j << csep << l;
303 }
304 }
305 }
306 break;
307 case 4: for (int j=0;j<s[0];++j)
308 {
309 for (int k=0;k<s[1];++k)
310 {
311 for (int l=0;l<s[2];++l)
312 {
313 for (int m=0;m<s[3];++m)
314 {
315 if (!first)
316 {
317 os << sep;
318 }
319 else
320 {
321 first=false;
322 }
323 os << names[i] << csep << k << csep << j << csep << l << csep << m;
324 }
325 }
326 }
327 }
328 break;
329 default:
330 throw DataException("saveDataCSV: Illegal rank");
331 }
332 }
333 os << endl;
334 }
335 boost::scoped_ptr<BufferGroup> maskbuffer; // sample buffer for the mask [if we have one]
336 const double* masksample=0;
337 int maskoffset=0;
338 //the use of shared_ptr here is just to ensure the buffer group is freed
339 //I would have used scoped_ptr but they don't work in vectors
340 std::vector<boost::shared_ptr<BufferGroup> > bg(numdata);
341 for (int d=0;d<numdata;++d)
342 {
343 bg[d].reset(data[d].allocSampleBuffer());
344 }
345
346 bool expandedmask=false; // does the mask act expanded. Are there mask value for each point in the sample
347 bool wantrow=true; // do we output this row?
348 if (hasmask)
349 {
350 maskbuffer.reset(mask.allocSampleBuffer());
351 if (mask.actsExpanded())
352 {
353 maskoffset=DataTypes::noValues(mask.getDataPointShape());
354 expandedmask=true;
355 }
356 }
357 os.setf(std::ios_base::scientific, std::ios_base::floatfield);
358 os.precision(15);
359
360 // errors prior to this point will occur on all processes anyway
361 // so there is no need to explicitly notify other ranks
362 int error=0;
363 try{
364 for (int i=0;i<numsamples;++i)
365 {
366 wantrow=true;
367 for (int d=0;d<numdata;++d)
368 {
369 samples[d]=data[d].getSampleDataRO(i,bg[d].get());
370 }
371 if (hasmask)
372 {
373 masksample=mask.getSampleDataRO(i, maskbuffer.get());
374 if (!expandedmask) // mask controls whole sample
375 {
376 if (masksample[0]<=0) // masks are scalar
377 {
378 wantrow=false;
379 }
380 }
381 }
382 for (int j=0;j<dpps;++j)
383 {
384 // now we need to check if this point is masked off
385 if (expandedmask)
386 {
387 wantrow=(masksample[j]>0); // masks are scalar to the relevant value is at [j]
388 }
389 if (wantrow)
390 {
391 bool needsep=false;
392 for (int d=0;d<numdata;++d)
393 {
394 DataTypes::pointToStream(os, samples[d], data[d].getDataPointShape(), offset[d], needsep, sep);
395 needsep=true;
396 offset[d]+=step[d];
397 }
398 os << endl;
399 }
400 }
401 for (int d=0;d<numdata;++d)
402 {
403 offset[d]=0;
404 }
405 }
406 } catch (...)
407 {
408 error=1;
409 #ifndef PASO_MPI
410 throw;
411 #endif
412 }
413 #ifdef PASO_MPI
414 MPI_Comm com=data[0].getDomain()->getMPIComm();
415 int rerror=0;
416 MPI_Allreduce( &error, &rerror, 1, MPI_INT, MPI_MAX, com );
417 error=rerror;
418 if (error)
419 {
420 throw DataException("saveDataCSVcpp: error building output");
421 }
422 #endif
423
424 // at this point os will contain the text to be written
425 #ifndef PASO_MPI
426
427 std::ofstream ofs;
428 if (append)
429 {
430 ofs.open(filename.c_str(), std::ios_base::app);
431 }
432 else
433 {
434 ofs.open(filename.c_str());
435 }
436 if (!ofs.is_open())
437 {
438 throw DataException("saveDataCSVcpp: unable to open file for writing");
439 }
440 ofs << os.str();
441 ofs.close();
442
443 #else
444 // here we have MPI
445 const char* mpistr=0;
446 MPI_File mpi_fileHandle_p;
447 MPI_Status mpi_status;
448 MPI_Info mpi_info = MPI_INFO_NULL;
449 char* fname_c=new char[filename.size()+1];
450 strcpy(fname_c,filename.c_str());
451 boost::scoped_ptr<char> fname_p(fname_c);
452
453 int amode = MPI_MODE_CREATE|MPI_MODE_WRONLY|MPI_MODE_UNIQUE_OPEN;
454 if (append)
455 {
456 amode |= MPI_MODE_APPEND;
457 }
458 else
459 {
460 if (data[0].getDomain()->getMPIRank()==0)
461 {
462 std::ifstream ifs(fname_p.get()); // if file exists, remove it
463 if (ifs.is_open())
464 {
465 ifs.close();
466 if (!remove(fname_p.get()))
467 {
468 error=1;
469 }
470 }
471 }
472 data[0].getDomain()->MPIBarrier();
473 int rerror=0;
474 MPI_Allreduce( &error, &rerror, 1, MPI_INT, MPI_MAX, com );
475 if (rerror!=0)
476 {
477 std::ostringstream oss;
478 oss << "saveDataCSVcpp: File " << filename << " already exists and could not be removed in preparation for new output.";
479 throw DataException(oss.str());
480 }
481 }
482 int ierr;
483 ierr = MPI_File_open(com, fname_p.get(), amode, mpi_info, &mpi_fileHandle_p);
484 if (ierr != MPI_SUCCESS)
485 {
486 std::ostringstream oss;
487 oss << "saveDataCSVcpp: File " << filename << " could not be opened for writing in parallel";
488 // file is not open so we can throw
489 throw DataException(oss.str());
490 }
491 else
492 {
493 ierr=MPI_File_set_view(mpi_fileHandle_p,MPI_DISPLACEMENT_CURRENT,
494 MPI_CHAR, MPI_CHAR, "native", mpi_info);
495 // here we are assuming that std::string holds the same type of char as MPI_CHAR
496 }
497 std::string contents=os.str();
498 char* con=new char[contents.size()+1];
499 strcpy(con, contents.c_str());
500 boost::scoped_ptr<char> buff(con);
501 ierr=MPI_File_write_ordered(mpi_fileHandle_p, buff.get(), contents.size(), MPI_CHAR, &mpi_status);
502 if (ierr != MPI_SUCCESS)
503 {
504 error=1;
505 }
506
507 if (MPI_File_close(&mpi_fileHandle_p)!= MPI_SUCCESS)
508 {
509 error=1;
510 }
511 data[0].getDomain()->MPIBarrier();
512 if (error) // any errors at this stage are from collective routines
513 { // so there is no need to reduce_all
514 throw DataException("saveDataCSVcpp: Error writing and closing file");
515 }
516
517 #endif
518 }
519
520 } // end of namespace

  ViewVC Help
Powered by ViewVC 1.1.26