/[escript]/trunk/escript/src/Utils.cpp
ViewVC logotype

Contents of /trunk/escript/src/Utils.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2644 - (show annotations)
Wed Sep 2 04:14:03 2009 UTC (10 years, 7 months ago) by jfenwick
File size: 11914 byte(s)
Add unit tests for saveDataCSV which should be ready for use now.
Keyword args are now output in sorted order.

Removed some unneeded functions.
1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2009 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 #include <fstream>
16 #include <string.h>
17
18 // added for saveCSV
19 #include <boost/python.hpp>
20 #include <boost/scoped_ptr.hpp>
21 #include "Data.h"
22
23 #include "Utils.h"
24 #include "DataVector.h"
25
26 #ifdef _OPENMP
27 #include <omp.h>
28 #endif
29
30 #ifdef PASO_MPI
31 #include <mpi.h>
32 #endif
33
34 #ifdef _WIN32
35 #include <WinSock2.h>
36 #else
37 #include <unistd.h>
38 #endif
39
40 namespace escript {
41
42 int getSvnVersion()
43 {
44 #ifdef SVN_VERSION
45 return SVN_VERSION;
46 #else
47 return 0;
48 #endif
49 }
50
51 /* This is probably not very robust, but it works on Savanna today and is useful for performance analysis */
52 int get_core_id() {
53 int processor_num=-1;
54 #ifdef CORE_ID1
55 FILE *fp;
56 int i, count_spaces=0;
57 char fname[100];
58 char buf[1000];
59
60 sprintf(fname, "/proc/%d/stat", getpid());
61 fp = fopen(fname, "r");
62 if (fp == NULL) return(-1);
63 fgets(buf, 1000, fp);
64 fclose(fp);
65
66 for (i=strlen(buf)-1; i>=0; i--) {
67 if (buf[i] == ' ') count_spaces++;
68 if (count_spaces == 4) break;
69 }
70 processor_num = atoi(&buf[i+1]);
71 #endif
72 return(processor_num);
73 }
74
75
76 void printParallelThreadCnt()
77 {
78 int mpi_iam=0, mpi_num=1;
79 char hname[64];
80
81 #ifdef HAVE_GETHOSTNAME
82 gethostname(hname, 64);
83 hname[63] = '\0';
84 #else
85 strcpy(hname, "unknown host");
86 #endif
87
88 #ifdef PASO_MPI
89 MPI_Comm_rank(MPI_COMM_WORLD, &mpi_iam);
90 MPI_Comm_size(MPI_COMM_WORLD, &mpi_num);
91 #endif
92
93 #pragma omp parallel
94 {
95 int omp_iam=0, omp_num=1;
96 #ifdef _OPENMP
97 omp_iam = omp_get_thread_num(); /* Call in a parallel region */
98 omp_num = omp_get_num_threads();
99 #endif
100 #pragma omp critical (printthrdcount)
101 printf("printParallelThreadCounts: MPI=%03d/%03d OpenMP=%03d/%03d running on %s core %d\n",
102 mpi_iam, mpi_num, omp_iam, omp_num, hname, get_core_id());
103 }
104 }
105
106 void setNumberOfThreads(const int num_threads)
107 {
108
109 #ifdef _OPENMP
110 omp_set_num_threads(num_threads);
111 #endif
112
113 }
114
115 int getNumberOfThreads()
116 {
117 #ifdef _OPENMP
118 return omp_get_max_threads();
119 #else
120 return 1;
121 #endif
122
123 }
124
125 ESCRIPT_DLL_API int getMPISizeWorld() {
126 int mpi_num = 1;
127 #ifdef PASO_MPI
128 MPI_Comm_size(MPI_COMM_WORLD, &mpi_num);
129 #endif
130 return mpi_num;
131 }
132
133 ESCRIPT_DLL_API int getMPIRankWorld() {
134 int mpi_iam = 0;
135 #ifdef PASO_MPI
136 MPI_Comm_rank(MPI_COMM_WORLD, &mpi_iam);
137 #endif
138 return mpi_iam;
139 }
140
141 ESCRIPT_DLL_API int getMPIWorldMax(const int val) {
142 #ifdef PASO_MPI
143 int val2 = val;
144 int out = val;
145 MPI_Allreduce( &val2, &out, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD );
146 #else
147 int out = val;
148 #endif
149 return out;
150 }
151
152 ESCRIPT_DLL_API int getMPIWorldSum(const int val) {
153 #ifdef PASO_MPI
154 int val2 = val;
155 int out = 0;
156 MPI_Allreduce( &val2, &out, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
157 #else
158 int out = val;
159 #endif
160 return out;
161 }
162
163 ESCRIPT_DLL_API double getMachinePrecision() {
164 return DBL_EPSILON;
165 }
166 ESCRIPT_DLL_API double getMaxFloat() {
167 return DBL_MAX;
168 }
169 ESCRIPT_DLL_API void MPIBarrierWorld() {
170 #ifdef PASO_MPI
171 MPI_Barrier(MPI_COMM_WORLD );
172 #endif
173 }
174
175
176 ESCRIPT_DLL_API
177 void
178 saveDataCSV(const std::string& filename, boost::python::dict arg, const std::string& sep, const std::string& csep,
179 bool append)
180 {
181 using std::cout;
182 using std::endl;
183 boost::python::list keys=arg.keys();
184 int numdata = boost::python::extract<int>(arg.attr("__len__")());
185 bool hasmask=arg.has_key("mask");
186 Data mask;
187 if (hasmask)
188 {
189 mask=boost::python::extract<escript::Data>(arg["mask"]);
190 keys.remove("mask");
191 numdata--;
192 if (mask.getDataPointRank()!=0)
193 {
194 throw DataException("saveDataCSVcpp: masks must be scalar.");
195 }
196 }
197 if (numdata<1)
198 {
199 throw DataException("saveDataCSVcpp: no data to save specified.");
200 }
201 std::vector<int> step(numdata);
202 std::vector<std::string> names(numdata);
203 std::vector<Data> data(numdata);
204 std::vector<const DataAbstract::ValueType::value_type*> samples(numdata);
205 std::vector<int> offset(numdata);
206 std::vector<int> fstypes(numdata); // FunctionSpace types for each data
207
208 keys.sort(); // to get some predictable order to things
209
210 // We need to interpret the samples correctly even if they are different types
211 // for this reason, we should interate over samples
212 for (int i=0;i<numdata;++i)
213 {
214 names[i]=boost::python::extract<std::string>(keys[i]);
215 data[i]=boost::python::extract<escript::Data>(arg[keys[i]]);
216 step[i]=(data[i].actsExpanded()?DataTypes::noValues(data[i].getDataPointShape()):0);
217 fstypes[i]=data[i].getFunctionSpace().getTypeCode();
218 if (i>0)
219 {
220 if (data[i].getDomain()!=data[i-1].getDomain())
221 {
222 throw DataException("saveDataCSVcpp: all data must be on the same domain.");
223 }
224 }
225 }
226 int bestfnspace=0;
227 if (!data[0].getDomain()->commonFunctionSpace(fstypes, bestfnspace))
228 {
229 throw DataException("saveDataCSVcpp: FunctionSpaces of data are incompatible");
230 }
231 // now we interpolate all data to the same type
232 FunctionSpace best(data[0].getDomain(),bestfnspace);
233 for (int i=0;i<numdata;++i)
234 {
235 data[i]=data[i].interpolate(best);
236 }
237 int numsamples=data[0].getNumSamples(); // these must be the same for all data
238 int dpps=data[0].getNumDataPointsPerSample();
239
240
241 std::ostringstream os;
242
243
244 bool first=true;
245
246 if (data[0].getDomain()->getMPIRank()==0)
247 {
248 for (int i=0;i<numdata;++i)
249 {
250 const DataTypes::ShapeType& s=data[i].getDataPointShape();
251 switch (data[i].getDataPointRank())
252 {
253 case 0: if (!first)
254 {
255 os << sep;
256 }
257 else
258 {
259 first=false;
260 }
261 os << names[i]; break;
262 case 1: for (int j=0;j<s[0];++j)
263 {
264 if (!first)
265 {
266 os << sep;
267 }
268 else
269 {
270 first=false;
271 }
272 os << names[i] << csep << j;
273 }
274 break;
275 case 2: for (int j=0;j<s[0];++j)
276 {
277 for (int k=0;k<s[1];++k)
278 {
279 if (!first)
280 {
281 os << sep;
282 }
283 else
284 {
285 first=false;
286 }
287 os << names[i] << csep << k << csep << j;
288 }
289 }
290 break;
291 case 3: for (int j=0;j<s[0];++j)
292 {
293 for (int k=0;k<s[1];++k)
294 {
295 for (int l=0;l<s[2];++l)
296 {
297 if (!first)
298 {
299 os << sep;
300 }
301 else
302 {
303 first=false;
304 }
305 os << names[i] << csep << k << csep << j << csep << l;
306 }
307 }
308 }
309 break;
310 case 4: for (int j=0;j<s[0];++j)
311 {
312 for (int k=0;k<s[1];++k)
313 {
314 for (int l=0;l<s[2];++l)
315 {
316 for (int m=0;m<s[3];++m)
317 {
318 if (!first)
319 {
320 os << sep;
321 }
322 else
323 {
324 first=false;
325 }
326 os << names[i] << csep << k << csep << j << csep << l << csep << m;
327 }
328 }
329 }
330 }
331 break;
332 default:
333 throw DataException("saveDataCSV: Illegal rank");
334 }
335 }
336 os << endl;
337 }
338 boost::scoped_ptr<BufferGroup> maskbuffer; // sample buffer for the mask [if we have one]
339 const double* masksample=0;
340 int maskoffset=0;
341 //the use of shared_ptr here is just to ensure the buffer group is freed
342 //I would have used scoped_ptr but they don't work in vectors
343 std::vector<boost::shared_ptr<BufferGroup> > bg(numdata);
344 for (int d=0;d<numdata;++d)
345 {
346 bg[d].reset(data[d].allocSampleBuffer());
347 }
348
349 bool expandedmask=false; // does the mask act expanded. Are there mask value for each point in the sample
350 bool wantrow=true; // do we output this row?
351 if (hasmask)
352 {
353 maskbuffer.reset(mask.allocSampleBuffer());
354 if (mask.actsExpanded())
355 {
356 maskoffset=DataTypes::noValues(mask.getDataPointShape());
357 expandedmask=true;
358 }
359 }
360 os.setf(std::ios_base::scientific, std::ios_base::floatfield);
361 os.precision(15);
362
363 // errors prior to this point will occur on all processes anyway
364 // so there is no need to explicitly notify other ranks
365 int error=0;
366 try{
367 for (int i=0;i<numsamples;++i)
368 {
369 if (!best.ownSample(i))
370 {
371 continue;
372 }
373 wantrow=true;
374 for (int d=0;d<numdata;++d)
375 {
376 samples[d]=data[d].getSampleDataRO(i,bg[d].get());
377 }
378 if (hasmask)
379 {
380 masksample=mask.getSampleDataRO(i, maskbuffer.get());
381 if (!expandedmask) // mask controls whole sample
382 {
383 if (masksample[0]<=0) // masks are scalar
384 {
385 wantrow=false;
386 }
387 }
388 }
389 for (int j=0;j<dpps;++j)
390 {
391 // now we need to check if this point is masked off
392 if (expandedmask)
393 {
394 wantrow=(masksample[j]>0); // masks are scalar to the relevant value is at [j]
395 }
396 if (wantrow)
397 {
398 bool needsep=false;
399 for (int d=0;d<numdata;++d)
400 {
401 DataTypes::pointToStream(os, samples[d], data[d].getDataPointShape(), offset[d], needsep, sep);
402 needsep=true;
403 offset[d]+=step[d];
404 }
405 os << endl;
406 }
407 }
408 for (int d=0;d<numdata;++d)
409 {
410 offset[d]=0;
411 }
412 }
413 } catch (...)
414 {
415 error=1;
416 #ifndef PASO_MPI
417 throw;
418 #endif
419 }
420 #ifdef PASO_MPI
421 MPI_Comm com=data[0].getDomain()->getMPIComm();
422 int rerror=0;
423 MPI_Allreduce( &error, &rerror, 1, MPI_INT, MPI_MAX, com );
424 error=rerror;
425 if (error)
426 {
427 throw DataException("saveDataCSVcpp: error building output");
428 }
429 #endif
430
431 // at this point os will contain the text to be written
432 #ifndef PASO_MPI
433
434 std::ofstream ofs;
435 if (append)
436 {
437 ofs.open(filename.c_str(), std::ios_base::app);
438 }
439 else
440 {
441 ofs.open(filename.c_str());
442 }
443 if (!ofs.is_open())
444 {
445 throw DataException("saveDataCSVcpp: unable to open file for writing");
446 }
447 ofs << os.str();
448 ofs.close();
449
450 #else
451 // here we have MPI
452 const char* mpistr=0;
453 MPI_File mpi_fileHandle_p;
454 MPI_Status mpi_status;
455 MPI_Info mpi_info = MPI_INFO_NULL;
456 char* fname_c=new char[filename.size()+1];
457 strcpy(fname_c,filename.c_str());
458 boost::scoped_ptr<char> fname_p(fname_c);
459
460 int amode = MPI_MODE_CREATE|MPI_MODE_WRONLY|MPI_MODE_UNIQUE_OPEN;
461 if (append)
462 {
463 amode |= MPI_MODE_APPEND;
464 }
465 else
466 {
467 if (data[0].getDomain()->getMPIRank()==0)
468 {
469 std::ifstream ifs(fname_p.get()); // if file exists, remove it
470 if (ifs.is_open())
471 {
472 ifs.close();
473 if (remove(fname_p.get()))
474 {
475 error=1;
476 }
477 }
478 }
479 data[0].getDomain()->MPIBarrier();
480 int rerror=0;
481 MPI_Allreduce( &error, &rerror, 1, MPI_INT, MPI_MAX, com );
482 if (rerror!=0)
483 {
484 std::ostringstream oss;
485 oss << "saveDataCSVcpp: File " << filename << " already exists and could not be removed in preparation for new output.";
486 throw DataException(oss.str());
487 }
488 }
489 int ierr;
490 ierr = MPI_File_open(com, fname_p.get(), amode, mpi_info, &mpi_fileHandle_p);
491 if (ierr != MPI_SUCCESS)
492 {
493 std::ostringstream oss;
494 oss << "saveDataCSVcpp: File " << filename << " could not be opened for writing in parallel";
495 // file is not open so we can throw
496 throw DataException(oss.str());
497 }
498 else
499 {
500 ierr=MPI_File_set_view(mpi_fileHandle_p,MPI_DISPLACEMENT_CURRENT,
501 MPI_CHAR, MPI_CHAR, "native", mpi_info);
502 // here we are assuming that std::string holds the same type of char as MPI_CHAR
503 }
504
505 std::string contents=os.str();
506 char* con=new char[contents.size()+1];
507 strcpy(con, contents.c_str());
508 boost::scoped_ptr<char> buff(con);
509 ierr=MPI_File_write_ordered(mpi_fileHandle_p, buff.get(), contents.size(), MPI_CHAR, &mpi_status);
510 if (ierr != MPI_SUCCESS)
511 {
512 error=1;
513 }
514
515 if (MPI_File_close(&mpi_fileHandle_p)!= MPI_SUCCESS)
516 {
517 error=1;
518 }
519 data[0].getDomain()->MPIBarrier();
520 if (error) // any errors at this stage are from collective routines
521 { // so there is no need to reduce_all
522 throw DataException("saveDataCSVcpp: Error writing and closing file");
523 }
524
525 #endif
526 }
527
528
529 } // end of namespace

  ViewVC Help
Powered by ViewVC 1.1.26