/[escript]/trunk/paso/src/Coupler.c
ViewVC logotype

Contents of /trunk/paso/src/Coupler.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3642 - (show annotations)
Thu Oct 27 03:41:51 2011 UTC (7 years, 5 months ago) by caltinay
File MIME type: text/plain
File size: 11158 byte(s)
Assorted spelling/comment fixes in paso.

1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2010 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 #include "Coupler.h"
16 #include "esysUtils/error.h"
17
18 /*************************************************************
19 *
20 * allocates a Connector
21 *
22 **************************************************************/
23
24 Paso_Connector* Paso_Connector_alloc(Paso_SharedComponents* send,
25 Paso_SharedComponents* recv)
26 {
27 Paso_Connector*out=NULL;
28 Esys_resetError();
29 out=MEMALLOC(1,Paso_Connector);
30 if ( send->mpi_info != recv->mpi_info ) {
31 Esys_setError(SYSTEM_ERROR,"Paso_Coupler_alloc: send and recv mpi communicators don't match.");
32 return NULL;
33 }
34 if ( send->local_length != recv->local_length ) {
35 Esys_setError(SYSTEM_ERROR,"Paso_Coupler_alloc: local length of send and recv Paso_SharedComponents must match.");
36 return NULL;
37 }
38
39 if (!Esys_checkPtr(out)) {
40 out->send=Paso_SharedComponents_getReference(send);
41 out->recv= Paso_SharedComponents_getReference(recv);
42 out->mpi_info = Esys_MPIInfo_getReference(send->mpi_info);
43 out->reference_counter=1;
44
45 /*
46 { int i;
47 for (i=0; i< out->recv->numNeighbors; ++i)
48 printf("Coupler: %d receive %d data at %d from %d\n",send->mpi_info->rank,out->recv->offsetInShared[i+1]- out->recv->offsetInShared[i],
49 out->recv->offsetInShared[i],out->recv->neighbor[i]);
50 for (i=0; i< out->send->numNeighbors; ++i)
51 printf("Coupler: %d send %d data at %d to %d\n",send->mpi_info->rank,out->send->offsetInShared[i+1]- out->send->offsetInShared[i],
52 out->send->offsetInShared[i],out->send->neighbor[i]);
53 }
54 */
55
56 }
57 if (Esys_noError()) {
58 return out;
59 } else {
60 Paso_Connector_free(out);
61 return NULL;
62 }
63 }
64
65 /* returns a reference to Connector */
66
67 Paso_Connector* Paso_Connector_getReference(Paso_Connector* in) {
68 if (in!=NULL) {
69 ++(in->reference_counter);
70 }
71 return in;
72 }
73
74 /* deallocates a Connector: */
75
76 void Paso_Connector_free(Paso_Connector* in) {
77 if (in!=NULL) {
78 in->reference_counter--;
79 if (in->reference_counter<=0) {
80 Paso_SharedComponents_free(in->send);
81 Paso_SharedComponents_free(in->recv);
82 Esys_MPIInfo_free(in->mpi_info);
83 MEMFREE(in);
84 #ifdef Paso_TRACE
85 printf("Paso_Coupler_dealloc: system matrix pattern as been deallocated.\n");
86 #endif
87 }
88 }
89 }
90
91 Paso_Connector* Paso_Connector_copy(Paso_Connector* in) {
92 return Paso_Connector_unroll(in,1);
93 }
94
95 Paso_Connector* Paso_Connector_unroll(Paso_Connector* in, index_t block_size) {
96 Paso_SharedComponents *new_send_shcomp=NULL, *new_recv_shcomp=NULL;
97 Paso_Connector *out=NULL;
98 if (Esys_noError()) {
99 if (block_size>1) {
100 new_send_shcomp=Paso_SharedComponents_alloc(in->send->local_length,
101 in->send->numNeighbors,
102 in->send->neighbor,
103 in->send->shared,
104 in->send->offsetInShared,
105 block_size,0,in->mpi_info);
106
107 new_recv_shcomp=Paso_SharedComponents_alloc(in->recv->local_length,
108 in->recv->numNeighbors,
109 in->recv->neighbor,
110 in->recv->shared,
111 in->recv->offsetInShared,
112 block_size,0,in->mpi_info);
113 } else {
114 new_send_shcomp=Paso_SharedComponents_getReference(in->send);
115 new_recv_shcomp=Paso_SharedComponents_getReference(in->recv);
116 }
117 if (Esys_noError()) out=Paso_Connector_alloc(new_send_shcomp,new_recv_shcomp);
118 }
119 Paso_SharedComponents_free(new_send_shcomp);
120 Paso_SharedComponents_free(new_recv_shcomp);
121 if (Esys_noError()) {
122 return out;
123 } else {
124 Paso_Connector_free(out);
125 return NULL;
126 }
127 }
128 /*************************************************************
129 *
130 * allocates a Connector
131 *
132 **************************************************************/
133
134 Paso_Coupler* Paso_Coupler_alloc(Paso_Connector* connector, dim_t block_size)
135 {
136 Esys_MPIInfo *mpi_info = connector->mpi_info;
137 Paso_Coupler*out=NULL;
138 Esys_resetError();
139 out=MEMALLOC(1,Paso_Coupler);
140 if (!Esys_checkPtr(out)) {
141 out->data=NULL;
142 out->block_size=block_size;
143 out->connector=Paso_Connector_getReference(connector);
144 out->send_buffer=NULL;
145 out->recv_buffer=NULL;
146 out->mpi_requests=NULL;
147 out->mpi_stati=NULL;
148 out->mpi_info = Esys_MPIInfo_getReference(mpi_info);
149 out->reference_counter=1;
150 out->in_use = FALSE;
151
152 #ifdef ESYS_MPI
153 out->mpi_requests=MEMALLOC(connector->send->numNeighbors+connector->recv->numNeighbors,MPI_Request);
154 out->mpi_stati=MEMALLOC(connector->send->numNeighbors+connector->recv->numNeighbors,MPI_Status);
155 Esys_checkPtr(out->mpi_requests);
156 Esys_checkPtr(out->mpi_stati);
157 #endif
158 if (mpi_info->size>1) {
159 out->send_buffer=MEMALLOC(connector->send->numSharedComponents * block_size,double);
160 out->recv_buffer=MEMALLOC(connector->recv->numSharedComponents * block_size,double);
161 Esys_checkPtr(out->send_buffer);
162 Esys_checkPtr(out->recv_buffer);
163 }
164 }
165 if (Esys_noError()) {
166 return out;
167 } else {
168 Paso_Coupler_free(out);
169 return NULL;
170 }
171 }
172
173 /* returns a reference to Coupler */
174
175 Paso_Coupler* Paso_Coupler_getReference(Paso_Coupler* in) {
176 if (in!=NULL) {
177 ++(in->reference_counter);
178 }
179 return in;
180 }
181
182 /* deallocates a Coupler: */
183
184 void Paso_Coupler_free(Paso_Coupler* in) {
185 if (in!=NULL) {
186 in->reference_counter--;
187 if (in->reference_counter<=0) {
188 Paso_Connector_free(in->connector);
189 MEMFREE(in->send_buffer);
190 MEMFREE(in->recv_buffer);
191 MEMFREE(in->mpi_requests);
192 MEMFREE(in->mpi_stati);
193 Esys_MPIInfo_free(in->mpi_info);
194 MEMFREE(in);
195 }
196 }
197 }
198
199
200 void Paso_Coupler_startCollect(Paso_Coupler* coupler,const double* in)
201 {
202 Esys_MPIInfo *mpi_info = coupler->mpi_info;
203 dim_t block_size=coupler->block_size;
204 size_t block_size_size=block_size*sizeof(double);
205 dim_t i;
206 coupler->data=(double*) in;
207 if ( mpi_info->size>1) {
208 if (coupler->in_use) {
209 Esys_setError(SYSTEM_ERROR,"Paso_Coupler_startCollect: Coupler in use.");
210 }
211 /* start receiving input */
212 {
213 for (i=0; i< coupler->connector->recv->numNeighbors; ++i) {
214 #ifdef ESYS_MPI
215 MPI_Irecv(&(coupler->recv_buffer[coupler->connector->recv->offsetInShared[i] * block_size]),
216 (coupler->connector->recv->offsetInShared[i+1]- coupler->connector->recv->offsetInShared[i])*block_size,
217 MPI_DOUBLE,
218 coupler->connector->recv->neighbor[i],
219 mpi_info->msg_tag_counter+coupler->connector->recv->neighbor[i],
220 mpi_info->comm,
221 &(coupler->mpi_requests[i]));
222 #endif
223
224 }
225 }
226 /* collect values into buffer */
227 if (block_size>1) {
228 #pragma omp parallel for private(i)
229 for (i=0; i < coupler->connector->send->numSharedComponents;++i) {
230 memcpy(&(coupler->send_buffer[(block_size)*i]),&(in[ block_size * coupler->connector->send->shared[i]]), block_size_size);
231 }
232 } else {
233 #pragma omp parallel for private(i)
234 for (i=0; i < coupler->connector->send->numSharedComponents;++i) {
235 coupler->send_buffer[i]=in[coupler->connector->send->shared[i]];
236 }
237 }
238 /* send buffer out */
239 {
240 for (i=0; i< coupler->connector->send->numNeighbors; ++i) {
241 #ifdef ESYS_MPI
242 MPI_Issend(&(coupler->send_buffer[coupler->connector->send->offsetInShared[i] * block_size]),
243 (coupler->connector->send->offsetInShared[i+1]- coupler->connector->send->offsetInShared[i])*block_size,
244 MPI_DOUBLE,
245 coupler->connector->send->neighbor[i],
246 mpi_info->msg_tag_counter+mpi_info->rank,
247 mpi_info->comm,
248 &(coupler->mpi_requests[i+ coupler->connector->recv->numNeighbors]));
249 #endif
250 }
251 }
252 mpi_info->msg_tag_counter+=mpi_info->size;
253 coupler->in_use=TRUE;
254 }
255 }
256
257 double* Paso_Coupler_finishCollect(Paso_Coupler* coupler)
258 {
259 Esys_MPIInfo *mpi_info = coupler->mpi_info;
260 if ( mpi_info->size>1) {
261 if (! coupler->in_use) {
262 Esys_setError(SYSTEM_ERROR,"Paso_Coupler_finishCollect: Communication has not been initiated.");
263 return NULL;
264 }
265 /* wait for receive */
266 #ifdef ESYS_MPI
267 MPI_Waitall(coupler->connector->recv->numNeighbors+coupler->connector->send->numNeighbors,
268 coupler->mpi_requests,
269 coupler->mpi_stati);
270 #endif
271 coupler->in_use=FALSE;
272 }
273
274 return coupler->recv_buffer;
275 }
276
277 void Paso_Coupler_copyAll(const Paso_Coupler* src, Paso_Coupler* target)
278 {
279 dim_t i;
280 #pragma omp parallel
281 {
282 #pragma omp for private(i)
283 for (i =0; i< src->connector->recv->numSharedComponents * src->block_size; ++i) {
284 target->recv_buffer[i]=src->recv_buffer[i];
285 }
286 #pragma omp for private(i)
287 for (i =0; i< Paso_Coupler_getLocalLength(src) * src->block_size; ++i) {
288 target->data[i]=src->data[i];
289 }
290 }
291 }
292
293 /* */
294 void Paso_Coupler_fillOverlap(const dim_t n, double* x, Paso_Coupler *coupler)
295 {
296 double *remote_values = NULL;
297 const dim_t overlap_n = Paso_Coupler_getNumOverlapValues(coupler) ;
298 const dim_t my_n= n - overlap_n;
299 const dim_t block_size = coupler->block_size;
300 const dim_t offset = block_size * my_n;
301 dim_t i;
302
303 Paso_Coupler_startCollect(coupler, x);
304 Paso_Coupler_finishCollect(coupler);
305 remote_values=coupler->recv_buffer;
306
307 #pragma omp parallel for private(i)
308 for (i=0;i<overlap_n * block_size; ++i) {
309 x[offset+i]=remote_values[i];
310 }
311 }
312
313 /* adjusts max values across shared values x */
314 void Paso_Coupler_max(const dim_t n, double* x, Paso_Coupler *coupler)
315 {
316 double *remote_values = NULL;
317 const dim_t overlap_n = Paso_Coupler_getNumOverlapValues(coupler) ;
318 const dim_t my_n= n - overlap_n;
319 dim_t i;
320
321 Paso_Coupler_startCollect(coupler, x);
322 Paso_Coupler_finishCollect(coupler);
323 remote_values=coupler->recv_buffer;
324 #pragma omp parallel for private(i)
325 for (i=0;i<overlap_n; ++i) x[my_n+i]=MAX(x[my_n+i], remote_values[i]);
326 }
327

  ViewVC Help
Powered by ViewVC 1.1.26