/[escript]/trunk/escript/src/Utils.cpp
ViewVC logotype

Annotation of /trunk/escript/src/Utils.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2641 - (hide annotations)
Mon Aug 31 07:41:49 2009 UTC (10 years, 7 months ago) by jfenwick
File size: 11801 byte(s)
Fixed some of my stupids related to MPI compile errors.

1 gross 391
2 ksteube 1312 /*******************************************************
3 ksteube 1811 *
4 jfenwick 2548 * Copyright (c) 2003-2009 by University of Queensland
5 ksteube 1811 * Earth Systems Science Computational Center (ESSCC)
6     * http://www.uq.edu.au/esscc
7     *
8     * Primary Business: Queensland, Australia
9     * Licensed under the Open Software License version 3.0
10     * http://www.opensource.org/licenses/osl-3.0.php
11     *
12     *******************************************************/
13 ksteube 1312
14 ksteube 1811
15 jfenwick 2641 #include <fstream>
16 ksteube 1806 #include <string.h>
17    
18 jfenwick 2635 // added for saveCSV
19     #include <boost/python.hpp>
20 jfenwick 2641 #include <boost/scoped_ptr.hpp>
21 jfenwick 2635 #include "Data.h"
22    
23 jgs 474 #include "Utils.h"
24 gross 797 #include "DataVector.h"
25 gross 391
26 jgs 478 #ifdef _OPENMP
27     #include <omp.h>
28     #endif
29    
30 ksteube 1561 #ifdef PASO_MPI
31     #include <mpi.h>
32     #endif
33    
34 phornby 1628 #ifdef _WIN32
35     #include <WinSock2.h>
36 phornby 1835 #else
37     #include <unistd.h>
38 phornby 1628 #endif
39    
40 gross 391 namespace escript {
41    
42 ksteube 1247 int getSvnVersion()
43     {
44     #ifdef SVN_VERSION
45     return SVN_VERSION;
46     #else
47     return 0;
48     #endif
49     }
50    
51 ksteube 1620 /* This is probably not very robust, but it works on Savanna today and is useful for performance analysis */
52     int get_core_id() {
53     int processor_num=-1;
54     #ifdef CORE_ID1
55     FILE *fp;
56     int i, count_spaces=0;
57     char fname[100];
58     char buf[1000];
59    
60     sprintf(fname, "/proc/%d/stat", getpid());
61     fp = fopen(fname, "r");
62     if (fp == NULL) return(-1);
63     fgets(buf, 1000, fp);
64     fclose(fp);
65    
66     for (i=strlen(buf)-1; i>=0; i--) {
67     if (buf[i] == ' ') count_spaces++;
68     if (count_spaces == 4) break;
69     }
70     processor_num = atoi(&buf[i+1]);
71     #endif
72     return(processor_num);
73     }
74    
75    
76 ksteube 1561 void printParallelThreadCnt()
77     {
78     int mpi_iam=0, mpi_num=1;
79 ksteube 1568 char hname[64];
80 ksteube 1561
81 ksteube 1705 #ifdef HAVE_GETHOSTNAME
82 ksteube 1568 gethostname(hname, 64);
83 ksteube 1806 hname[63] = '\0';
84 ksteube 1705 #else
85     strcpy(hname, "unknown host");
86     #endif
87 ksteube 1567
88 ksteube 1561 #ifdef PASO_MPI
89     MPI_Comm_rank(MPI_COMM_WORLD, &mpi_iam);
90     MPI_Comm_size(MPI_COMM_WORLD, &mpi_num);
91     #endif
92    
93     #pragma omp parallel
94     {
95     int omp_iam=0, omp_num=1;
96     #ifdef _OPENMP
97     omp_iam = omp_get_thread_num(); /* Call in a parallel region */
98     omp_num = omp_get_num_threads();
99     #endif
100 jfenwick 2607 #pragma omp critical (printthrdcount)
101 ksteube 1620 printf("printParallelThreadCounts: MPI=%03d/%03d OpenMP=%03d/%03d running on %s core %d\n",
102     mpi_iam, mpi_num, omp_iam, omp_num, hname, get_core_id());
103 ksteube 1561 }
104     }
105    
106 gross 391 void setNumberOfThreads(const int num_threads)
107     {
108    
109     #ifdef _OPENMP
110     omp_set_num_threads(num_threads);
111     #endif
112    
113     }
114    
115     int getNumberOfThreads()
116     {
117     #ifdef _OPENMP
118     return omp_get_max_threads();
119     #else
120     return 1;
121     #endif
122    
123     }
124    
125 gross 2313 ESCRIPT_DLL_API int getMPISizeWorld() {
126 ksteube 1805 int mpi_num = 1;
127     #ifdef PASO_MPI
128     MPI_Comm_size(MPI_COMM_WORLD, &mpi_num);
129     #endif
130     return mpi_num;
131     }
132    
133 gross 2313 ESCRIPT_DLL_API int getMPIRankWorld() {
134 ksteube 1805 int mpi_iam = 0;
135     #ifdef PASO_MPI
136     MPI_Comm_rank(MPI_COMM_WORLD, &mpi_iam);
137     #endif
138     return mpi_iam;
139     }
140    
141 gross 2308 ESCRIPT_DLL_API int getMPIWorldMax(const int val) {
142     #ifdef PASO_MPI
143     int val2 = val;
144     int out = val;
145     MPI_Allreduce( &val2, &out, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD );
146     #else
147     int out = val;
148     #endif
149     return out;
150     }
151    
152 jfenwick 2607 ESCRIPT_DLL_API int getMPIWorldSum(const int val) {
153     #ifdef PASO_MPI
154     int val2 = val;
155     int out = 0;
156     MPI_Allreduce( &val2, &out, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
157     #else
158     int out = val;
159     #endif
160     return out;
161     }
162 gross 2308
163 gross 2313 ESCRIPT_DLL_API double getMachinePrecision() {
164 gross 2100 return DBL_EPSILON;
165     }
166 gross 2313 ESCRIPT_DLL_API double getMaxFloat() {
167 gross 2100 return DBL_MAX;
168     }
169 gross 2313 ESCRIPT_DLL_API void MPIBarrierWorld() {
170     #ifdef PASO_MPI
171     MPI_Barrier(MPI_COMM_WORLD );
172     #endif
173     }
174 gross 2100
175 jfenwick 2635 ESCRIPT_DLL_API
176     void
177     saveDataCSV(const std::string& filename, boost::python::dict arg, const std::string& sep, const std::string& csep,
178     bool append)
179     {
180     using std::cout;
181     using std::endl;
182     boost::python::list keys=arg.keys();
183     int numdata = boost::python::extract<int>(arg.attr("__len__")());
184 jfenwick 2637 bool hasmask=arg.has_key("mask");
185     Data mask;
186     if (hasmask)
187     {
188     mask=boost::python::extract<escript::Data>(arg["mask"]);
189     keys.remove("mask");
190     numdata--;
191     if (mask.getDataPointRank()!=0)
192     {
193     throw DataException("saveDataCSVcpp: masks must be scalar.");
194     }
195     }
196 jfenwick 2635 if (numdata<1)
197     {
198     throw DataException("saveDataCSVcpp: no data to save specified.");
199     }
200     std::vector<int> step(numdata);
201     std::vector<std::string> names(numdata);
202     std::vector<Data> data(numdata);
203     std::vector<const DataAbstract::ValueType::value_type*> samples(numdata);
204     std::vector<int> offset(numdata);
205     std::vector<int> fstypes(numdata); // FunctionSpace types for each data
206 gross 2100
207 jfenwick 2635 // We need to interpret the samples correctly even if they are different types
208     // for this reason, we should interate over samples
209     for (int i=0;i<numdata;++i)
210     {
211     names[i]=boost::python::extract<std::string>(keys[i]);
212     data[i]=boost::python::extract<escript::Data>(arg[keys[i]]);
213 jfenwick 2637 step[i]=(data[i].actsExpanded()?DataTypes::noValues(data[i].getDataPointShape()):0);
214 jfenwick 2635 fstypes[i]=data[i].getFunctionSpace().getTypeCode();
215     if (i>0)
216     {
217     if (data[i].getDomain()!=data[i-1].getDomain())
218     {
219     throw DataException("saveDataCSVcpp: all data must be on the same domain.");
220     }
221     }
222     }
223     int bestfnspace=0;
224     if (!data[0].getDomain()->commonFunctionSpace(fstypes, bestfnspace))
225     {
226     throw DataException("saveDataCSVcpp: FunctionSpaces of data are incompatible");
227     }
228     // now we interpolate all data to the same type
229     FunctionSpace best(data[0].getDomain(),bestfnspace);
230     for (int i=0;i<numdata;++i)
231     {
232     data[i]=data[i].interpolate(best);
233     }
234     int numsamples=data[0].getNumSamples(); // these must be the same for all data
235     int dpps=data[0].getNumDataPointsPerSample();
236    
237    
238 jfenwick 2641 std::ostringstream os;
239 jfenwick 2635
240 jfenwick 2641
241 jfenwick 2635 bool first=true;
242 jfenwick 2641
243     if (data[0].getDomain()->getMPIRank()==0)
244 jfenwick 2635 {
245 jfenwick 2641 for (int i=0;i<numdata;++i)
246     {
247 jfenwick 2635 const DataTypes::ShapeType& s=data[i].getDataPointShape();
248     switch (data[i].getDataPointRank())
249     {
250     case 0: if (!first)
251     {
252     os << sep;
253     }
254     else
255     {
256     first=false;
257     }
258     os << names[i]; break;
259     case 1: for (int j=0;j<s[0];++j)
260     {
261     if (!first)
262     {
263     os << sep;
264     }
265     else
266     {
267     first=false;
268     }
269     os << names[i] << csep << j;
270     }
271     break;
272     case 2: for (int j=0;j<s[0];++j)
273     {
274     for (int k=0;k<s[1];++k)
275     {
276     if (!first)
277     {
278     os << sep;
279     }
280     else
281     {
282     first=false;
283     }
284     os << names[i] << csep << k << csep << j;
285     }
286     }
287     break;
288     case 3: for (int j=0;j<s[0];++j)
289     {
290     for (int k=0;k<s[1];++k)
291     {
292     for (int l=0;l<s[2];++l)
293     {
294     if (!first)
295     {
296     os << sep;
297     }
298     else
299     {
300     first=false;
301     }
302     os << names[i] << csep << k << csep << j << csep << l;
303     }
304     }
305     }
306     break;
307     case 4: for (int j=0;j<s[0];++j)
308     {
309     for (int k=0;k<s[1];++k)
310     {
311     for (int l=0;l<s[2];++l)
312     {
313     for (int m=0;m<s[3];++m)
314     {
315     if (!first)
316     {
317     os << sep;
318     }
319     else
320     {
321     first=false;
322     }
323     os << names[i] << csep << k << csep << j << csep << l << csep << m;
324     }
325     }
326     }
327     }
328     break;
329     default:
330     throw DataException("saveDataCSV: Illegal rank");
331     }
332 jfenwick 2641 }
333     os << endl;
334 jfenwick 2635 }
335 jfenwick 2637 boost::scoped_ptr<BufferGroup> maskbuffer; // sample buffer for the mask [if we have one]
336     const double* masksample=0;
337     int maskoffset=0;
338 jfenwick 2635 //the use of shared_ptr here is just to ensure the buffer group is freed
339     //I would have used scoped_ptr but they don't work in vectors
340     std::vector<boost::shared_ptr<BufferGroup> > bg(numdata);
341     for (int d=0;d<numdata;++d)
342     {
343     bg[d].reset(data[d].allocSampleBuffer());
344     }
345    
346 jfenwick 2637 bool expandedmask=false; // does the mask act expanded. Are there mask value for each point in the sample
347     bool wantrow=true; // do we output this row?
348     if (hasmask)
349     {
350     maskbuffer.reset(mask.allocSampleBuffer());
351     if (mask.actsExpanded())
352     {
353     maskoffset=DataTypes::noValues(mask.getDataPointShape());
354     expandedmask=true;
355     }
356     }
357 jfenwick 2641 os.setf(std::ios_base::scientific, std::ios_base::floatfield);
358     os.precision(15);
359    
360     // errors prior to this point will occur on all processes anyway
361     // so there is no need to explicitly notify other ranks
362     int error=0;
363 jfenwick 2635 try{
364     for (int i=0;i<numsamples;++i)
365     {
366 jfenwick 2637 wantrow=true;
367 jfenwick 2635 for (int d=0;d<numdata;++d)
368     {
369     samples[d]=data[d].getSampleDataRO(i,bg[d].get());
370     }
371 jfenwick 2637 if (hasmask)
372     {
373     masksample=mask.getSampleDataRO(i, maskbuffer.get());
374     if (!expandedmask) // mask controls whole sample
375     {
376     if (masksample[0]<=0) // masks are scalar
377     {
378     wantrow=false;
379     }
380     }
381     }
382 jfenwick 2635 for (int j=0;j<dpps;++j)
383     {
384 jfenwick 2637 // now we need to check if this point is masked off
385     if (expandedmask)
386 jfenwick 2635 {
387 jfenwick 2637 wantrow=(masksample[j]>0); // masks are scalar to the relevant value is at [j]
388 jfenwick 2635 }
389 jfenwick 2637 if (wantrow)
390     {
391     bool needsep=false;
392     for (int d=0;d<numdata;++d)
393     {
394     DataTypes::pointToStream(os, samples[d], data[d].getDataPointShape(), offset[d], needsep, sep);
395     needsep=true;
396     offset[d]+=step[d];
397     }
398     os << endl;
399     }
400 jfenwick 2635 }
401     for (int d=0;d<numdata;++d)
402     {
403     offset[d]=0;
404     }
405     }
406     } catch (...)
407     {
408 jfenwick 2641 error=1;
409     #ifndef PASO_MPI
410 jfenwick 2635 throw;
411 jfenwick 2641 #endif
412 jfenwick 2635 }
413 jfenwick 2641 #ifdef PASO_MPI
414     MPI_Comm com=data[0].getDomain()->getMPIComm();
415     int rerror=0;
416     MPI_Allreduce( &error, &rerror, 1, MPI_INT, MPI_MAX, com );
417     error=rerror;
418     if (error)
419     {
420     throw DataException("saveDataCSVcpp: error building output");
421     }
422     #endif
423 jfenwick 2637
424 jfenwick 2641 // at this point os will contain the text to be written
425     #ifndef PASO_MPI
426 jfenwick 2637
427 jfenwick 2641 std::ofstream ofs;
428     if (append)
429     {
430     ofs.open(filename.c_str(), std::ios_base::app);
431     }
432     else
433     {
434     ofs.open(filename.c_str());
435     }
436     if (!ofs.is_open())
437     {
438     throw DataException("saveDataCSVcpp: unable to open file for writing");
439     }
440     ofs << os.str();
441     ofs.close();
442    
443     #else
444     // here we have MPI
445     const char* mpistr=0;
446     MPI_File mpi_fileHandle_p;
447     MPI_Status mpi_status;
448     MPI_Info mpi_info = MPI_INFO_NULL;
449     char* fname_c=new char[filename.size()+1];
450     strcpy(fname_c,filename.c_str());
451     boost::scoped_ptr<char> fname_p(fname_c);
452    
453     int amode = MPI_MODE_CREATE|MPI_MODE_WRONLY|MPI_MODE_UNIQUE_OPEN;
454     if (append)
455     {
456     amode |= MPI_MODE_APPEND;
457     }
458     else
459     {
460     if (data[0].getDomain()->getMPIRank()==0)
461     {
462     std::ifstream ifs(fname_p.get()); // if file exists, remove it
463     if (ifs.is_open())
464     {
465     ifs.close();
466     if (!remove(fname_p.get()))
467     {
468     error=1;
469     }
470     }
471     }
472     data[0].getDomain()->MPIBarrier();
473     int rerror=0;
474     MPI_Allreduce( &error, &rerror, 1, MPI_INT, MPI_MAX, com );
475     if (rerror!=0)
476     {
477     std::ostringstream oss;
478     oss << "saveDataCSVcpp: File " << filename << " already exists and could not be removed in preparation for new output.";
479     throw DataException(oss.str());
480     }
481     }
482     int ierr;
483     ierr = MPI_File_open(com, fname_p.get(), amode, mpi_info, &mpi_fileHandle_p);
484     if (ierr != MPI_SUCCESS)
485     {
486     std::ostringstream oss;
487     oss << "saveDataCSVcpp: File " << filename << " could not be opened for writing in parallel";
488     // file is not open so we can throw
489     throw DataException(oss.str());
490     }
491     else
492     {
493     ierr=MPI_File_set_view(mpi_fileHandle_p,MPI_DISPLACEMENT_CURRENT,
494     MPI_CHAR, MPI_CHAR, "native", mpi_info);
495     // here we are assuming that std::string holds the same type of char as MPI_CHAR
496     }
497     std::string contents=os.str();
498     char* con=new char[contents.size()+1];
499     strcpy(con, contents.c_str());
500     boost::scoped_ptr<char> buff(con);
501     ierr=MPI_File_write_ordered(mpi_fileHandle_p, buff.get(), contents.size(), MPI_CHAR, &mpi_status);
502     if (ierr != MPI_SUCCESS)
503     {
504     error=1;
505     }
506    
507     if (MPI_File_close(&mpi_fileHandle_p)!= MPI_SUCCESS)
508     {
509     error=1;
510     }
511     data[0].getDomain()->MPIBarrier();
512     if (error) // any errors at this stage are from collective routines
513     { // so there is no need to reduce_all
514     throw DataException("saveDataCSVcpp: Error writing and closing file");
515     }
516    
517     #endif
518 jfenwick 2635 }
519    
520 gross 391 } // end of namespace

  ViewVC Help
Powered by ViewVC 1.1.26