/[escript]/branches/split/esysUtils/src/Esys_MPI.cpp
ViewVC logotype

Annotation of /branches/split/esysUtils/src/Esys_MPI.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 4762 - (hide annotations)
Tue Mar 18 01:31:20 2014 UTC (5 years ago) by jfenwick
File size: 8105 byte(s)
I do not know why I took this out
1 ksteube 1312
2 jfenwick 3981 /*****************************************************************************
3 ksteube 1811 *
4 jfenwick 4657 * Copyright (c) 2003-2014 by University of Queensland
5 jfenwick 3981 * http://www.uq.edu.au
6 ksteube 1811 *
7     * Primary Business: Queensland, Australia
8     * Licensed under the Open Software License version 3.0
9     * http://www.opensource.org/licenses/osl-3.0.php
10     *
11 jfenwick 3981 * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12 jfenwick 4657 * Development 2012-2013 by School of Earth Sciences
13     * Development from 2014 by Centre for Geoscience Computing (GeoComp)
14 jfenwick 3981 *
15     *****************************************************************************/
16 ksteube 1312
17 ksteube 1811
18 bcumming 731 #include <stdlib.h>
19     #include <stdio.h>
20 ksteube 1343 #include <string.h>
21 bcumming 731
22    
23 jfenwick 3227 #include "Esys_MPI.h"
24     #include "index.h"
25     #include "mem.h"
26     #include "error.h"
27 bcumming 731
28    
29 jfenwick 4746 #include <iostream> // temp for debugging
30    
31 jfenwick 4747 namespace esysUtils
32     {
33     JMPI makeInfo(MPI_Comm comm, bool owncom)
34     {
35     JMPI_* p=new JMPI_(comm, owncom);
36     return JMPI(p);
37     }
38 jfenwick 4746
39 jfenwick 4747
40     JMPI_::JMPI_(MPI_Comm mpicomm, bool owncom)
41     : comm(mpicomm), ownscomm(owncom)
42     {
43     msg_tag_counter = 0;
44     #ifdef ESYS_MPI
45     if (MPI_Comm_rank(comm, &rank)!=MPI_SUCCESS || MPI_Comm_size(comm, &size)!=MPI_SUCCESS)
46     {
47     Esys_setError( ESYS_MPI_ERROR, "Esys_MPIInfo_alloc : error finding comm rank/size" );
48     }
49     #else
50     rank=0;
51     size=1;
52     #endif
53     }
54    
55     JMPI_::~JMPI_()
56     {
57     #ifdef ESYS_MPI
58 jfenwick 4756 if (ownscomm)
59     {
60     MPI_Comm_free(&comm);
61     }
62 jfenwick 4747 #endif
63     }
64    
65 jfenwick 4750 dim_t JMPI_::setDistribution(index_t min_id,index_t max_id,index_t* distribution)
66     {
67     int rest=0, p;
68     int s=size;
69     dim_t N=max_id-min_id+1;
70     if (N>0) {
71     int local_N=N/s;
72     rest=N-local_N*s;
73     for (p=0; p<s; ++p) {
74     if (p<rest) {
75     distribution[p]=min_id+(local_N+1)*p;
76     } else {
77     distribution[p]=min_id+rest+local_N*p;
78     }
79     }
80     distribution[s]=max_id+1;
81     if (rest==0) {
82     return local_N;
83     } else {
84     return local_N+1;
85     }
86     } else {
87     for (p=0; p<s+1; ++p) distribution[p]=min_id;
88     return 0;
89     }
90    
91    
92     }
93 jfenwick 4747
94 jfenwick 4750 void JMPI_::split(dim_t N, dim_t* local_N,index_t* offset)
95     {
96     int rest=0;
97     int s=size;
98     int r=rank;
99     *local_N=N/s;
100     rest=N-(*local_N)*s;
101     if (r<rest) {
102     (*local_N)++;
103     (*offset)=(*local_N)*r;
104     } else {
105     (*offset)=(*local_N)*r+rest;
106     }
107 jfenwick 4747 }
108    
109 jfenwick 4750 }
110 jfenwick 4747
111 jfenwick 4750
112 bcumming 731 /* allocate memory for an mpi_comm, and find the communicator details */
113 jfenwick 3227 Esys_MPIInfo* Esys_MPIInfo_alloc( MPI_Comm comm )
114 bcumming 731 {
115 jfenwick 3227 #ifdef ESYS_MPI
116 phornby 1628 int error;
117     #endif
118    
119 jfenwick 3227 Esys_MPIInfo *out=NULL;
120 bcumming 731
121 jfenwick 4336 out = new Esys_MPIInfo;
122 bcumming 731
123     out->reference_counter = 0;
124 ksteube 1312 out->msg_tag_counter = 0;
125 jfenwick 3227 #ifdef ESYS_MPI
126 ksteube 1312 error = MPI_Comm_rank( comm, &out->rank )==MPI_SUCCESS && MPI_Comm_size( comm, &out->size )==MPI_SUCCESS;
127     if( !error ) {
128 jfenwick 3231 Esys_setError( ESYS_MPI_ERROR, "Esys_MPIInfo_alloc : error finding comm rank/size" );
129 ksteube 1312 }
130 bcumming 731
131 ksteube 1312 out->comm = comm;
132     #else
133     out->rank=0;
134     out->size=1;
135     out->comm=-1;
136     #endif
137 bcumming 731 out->reference_counter++;
138    
139     return out;
140     }
141    
142     /* free memory for an mpi_comm */
143 jfenwick 3227 void Esys_MPIInfo_free( Esys_MPIInfo *in )
144 bcumming 731 {
145 gross 3793 if( in!=NULL) {
146 jfenwick 4336 if (!(--in->reference_counter) ) delete in;
147 gross 3793 }
148 bcumming 731 }
149    
150 jfenwick 3227 Esys_MPIInfo *Esys_MPIInfo_getReference( Esys_MPIInfo* in )
151 bcumming 731 {
152     if (in!=NULL)
153     ++(in->reference_counter);
154    
155     return in;
156     }
157 ksteube 1312 /* N = #CPUs, k is a CPU number but out of range or even negative. Return a CPU number in 0...n-1. */
158 jfenwick 3227 index_t Esys_MPIInfo_mod(index_t n, index_t k)
159 ksteube 1312 {
160     index_t q, out=0;
161     if (n>1) {
162     q=k/n;
163     if (k>0) {
164     out=k-n*q;
165     } else if (k<0) {
166     out=k-n*(q-1);
167     }
168     }
169     return out;
170     }
171 bcumming 731
172 jfenwick 3227 void Esys_MPIInfo_Split( Esys_MPIInfo *mpi_info, dim_t N, dim_t* local_N,index_t* offset)
173 ksteube 1312 {
174     int rest=0;
175     int s=mpi_info->size;
176     int r=mpi_info->rank;
177     *local_N=N/s;
178     rest=N-(*local_N)*s;
179     if (r<rest) {
180     (*local_N)++;
181     (*offset)=(*local_N)*r;
182     } else {
183     (*offset)=(*local_N)*r+rest;
184     }
185     }
186    
187    
188 jfenwick 4750 dim_t Esys_MPIInfo_setDistribution(esysUtils::JMPI& mpi_info ,index_t min_id,index_t max_id,index_t* distribution) {
189 ksteube 1312 int rest=0, p;
190     int s=mpi_info->size;
191     dim_t N=max_id-min_id+1;
192 gross 1738 if (N>0) {
193     int local_N=N/s;
194     rest=N-local_N*s;
195     for (p=0; p<s; ++p) {
196     if (p<rest) {
197     distribution[p]=min_id+(local_N+1)*p;
198     } else {
199     distribution[p]=min_id+rest+local_N*p;
200     }
201     }
202     distribution[s]=max_id+1;
203     if (rest==0) {
204     return local_N;
205 ksteube 1312 } else {
206 gross 1738 return local_N+1;
207 ksteube 1312 }
208 gross 1738 } else {
209     for (p=0; p<s+1; ++p) distribution[p]=min_id;
210     return 0;
211     }
212 ksteube 1312 }
213    
214 jfenwick 4345 /* checks that there is no error across all processes in a communicator */
215     /* NOTE : does not make guarantee consistency of error string on each process */
216 jfenwick 4756 bool esysUtils::Esys_MPIInfo_noError( const esysUtils::JMPI& mpi_info )
217 ksteube 1312 {
218 jfenwick 3227 int errorLocal = Esys_noError() ? 0 : 1;
219 ksteube 2126 int errorGlobal = errorLocal;
220 jfenwick 3227
221 jfenwick 4762 #ifdef ESYS_MPI
222     if (!checkResult(errorLocal, errorGlobal, mpi_info->comm))
223     {
224     return false;
225     }
226     if( (errorLocal==0) && (errorGlobal==1))
227     {
228     Esys_setError( ESYS_MPI_ERROR, "Esys_MPIInfo_noError() : there was an error on another MPI process" );
229     }
230     #endif
231    
232 ksteube 1312 return (errorGlobal==0);
233     }
234    
235 jfenwick 4746 /* returns the max of inputs on all ranks -- or just sends the input back on nompi */
236     bool esysUtils::checkResult(int& input, int& output, MPI_Comm& comm)
237     {
238     #ifdef ESYS_MPI
239     output=0;
240     if (MPI_Allreduce(&input, &output, 1, MPI_INT, MPI_MAX, comm)!=MPI_SUCCESS)
241     {
242     return false;
243     }
244     return true;
245     #else
246     output=input;
247     return true;
248     #endif
249     }
250    
251    
252    
253    
254     // ensure that the any ranks with an empty src argument end up with the string from
255     // one of the other ranks
256     // with no-mpi, it makes dest point at a copy of src
257     // Expected use case for this code is to ship error messages between ranks
258     // as such, it is not written to be speedy
259     bool esysUtils::shipString(const char* src, char** dest, MPI_Comm& comm)
260     {
261     #ifdef ESYS_MPI
262     Esys_MPI_rank rank=0;
263     if (MPI_Comm_rank( comm, &rank )!=MPI_SUCCESS)
264     {
265     return false; // we have no reason to believe MPI works anymore
266     }
267    
268     int slen=strlen(src);
269     // everybody needs to tell everyone if they have a string
270     // send your rank if you have a non-empty string else
271     // send -1
272     int in=(slen?rank:-1);
273     int out;
274     if (MPI_Allreduce(&in, &out, 1, MPI_INT, MPI_MAX, comm)!=MPI_SUCCESS)
275     {
276     return false;
277     }
278     if (out==-1) // should not be called under these conditions, but noone had a string
279     {
280     *dest=new char[1];
281     *dest[0]='\0';
282     return true;
283     }
284     // since we will be using broadcast, we need to tell everyone how big the string is going to be
285     // with an additional bcast
286    
287     if (MPI_Bcast(&slen, 1, MPI_INT, out, comm)!=MPI_SUCCESS)
288     {
289     return false;
290     }
291     // now broadcast that string to everyone
292     if (rank==out)
293     {
294     // I could const _ cast src but instead I'll make a copy
295    
296     *dest=new char[slen+1];
297     strcpy(*dest, src);
298    
299     // this guy should just send the string
300     if (MPI_Bcast(*dest, slen+1, MPI_CHAR, out, comm)!=MPI_SUCCESS)
301     {
302     return false;
303     }
304     return true;
305     }
306     else
307     {
308     *dest=new char[slen+1];
309     if (MPI_Bcast(*dest, slen+1, MPI_CHAR, out, comm)!=MPI_SUCCESS)
310     {
311     return false;
312     }
313     return true;
314     }
315     #else
316     *dest=new char[strlen(src)+1];
317     strcpy(*dest, src);
318     return true;
319     #endif
320    
321     }
322    
323    
324 jfenwick 4521 namespace
325     {
326     bool splitworld=false;
327    
328     }
329    
330     namespace esysUtils
331     {
332    
333     /* has the have sub-communicators been created? */
334     bool getSplitWorld()
335     {
336     return splitworld;
337     }
338    
339    
340     /* record that a sub-communicator has been created or used */
341     void splitWorld()
342     {
343     splitworld=true;
344     }
345    
346     }
347    
348 bcumming 731 /**************************************************
349     WRAPPERS
350     **************************************************/
351    
352 jfenwick 3227 int Esys_MPIInfo_initialized( void )
353 bcumming 731 {
354 jfenwick 3227 #ifdef ESYS_MPI
355 jfenwick 1981 int error=0, initialised=0;
356 ksteube 1312 error = MPI_Initialized( &initialised );
357     if( error!=MPI_SUCCESS )
358 jfenwick 3231 Esys_setError( ESYS_MPI_ERROR, "mpi_initialised : MPI error" );
359 ksteube 1312 return initialised;
360     #else
361     return TRUE;
362     #endif
363 bcumming 731 }
364 ksteube 1343
365 gross 3327 #ifndef _OPENMP
366 jfenwick 3394 int serial_get_max_threads(void) {
367 gross 3327 return 1;
368     }
369 jfenwick 3394 int serial_get_thread_num(void) {
370 gross 3327 return 0;
371     }
372     #endif
373    

Properties

Name Value
svn:mergeinfo /branches/lapack2681/paso/src/Paso_MPI.c:2682-2741 /branches/pasowrap/esysUtils/src/Esys_MPI.c:3661-3674 /branches/py3_attempt2/esysUtils/src/Esys_MPI.c:3871-3891 /branches/restext/paso/src/Paso_MPI.c:2610-2624 /branches/ripleygmg_from_3668/esysUtils/src/Esys_MPI.c:3669-3791 /branches/stage3.0/paso/src/Paso_MPI.c:2569-2590 /branches/symbolic_from_3470/esysUtils/src/Esys_MPI.c:3471-3974 /branches/symbolic_from_3470/ripley/test/python/esysUtils/src/Esys_MPI.c:3517-3974 /release/3.0/paso/src/Paso_MPI.c:2591-2601 /trunk/esysUtils/src/Esys_MPI.cpp:4257-4344 /trunk/ripley/test/python/esysUtils/src/Esys_MPI.c:3480-3515

  ViewVC Help
Powered by ViewVC 1.1.26