/[escript]/trunk/paso/src/SystemMatrix_copyRemoteCoupleBlock.c
ViewVC logotype

Diff of /trunk/paso/src/SystemMatrix_copyRemoteCoupleBlock.c

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 3827 by lgao, Tue Feb 14 11:42:08 2012 UTC revision 3828 by lgao, Wed Feb 15 03:27:58 2012 UTC
# Line 95  void Paso_SystemMatrix_copyRemoteCoupleB Line 95  void Paso_SystemMatrix_copyRemoteCoupleB
95    global_id = A->global_id;    global_id = A->global_id;
96    
97    /* distribute the number of cols in current col_coupleBlock to all ranks */    /* distribute the number of cols in current col_coupleBlock to all ranks */
98      #ifdef ESYS_MPI
99    MPI_Allgatherv(&num_couple_cols, 1, MPI_INT, recv_buf, recv_degree, recv_offset, MPI_INT, A->mpi_info->comm);    MPI_Allgatherv(&num_couple_cols, 1, MPI_INT, recv_buf, recv_degree, recv_offset, MPI_INT, A->mpi_info->comm);
100      #endif
101    
102    /* distribute global_ids of cols to be considered to all ranks*/    /* distribute global_ids of cols to be considered to all ranks*/
103    len = 0;    len = 0;
# Line 108  void Paso_SystemMatrix_copyRemoteCoupleB Line 110  void Paso_SystemMatrix_copyRemoteCoupleB
110    cols_array = TMPMEMALLOC(len, index_t);    cols_array = TMPMEMALLOC(len, index_t);
111    if (Esys_checkPtr(cols_array)) fprintf(stderr, "rank %d MALLOC has trouble\n", rank);    if (Esys_checkPtr(cols_array)) fprintf(stderr, "rank %d MALLOC has trouble\n", rank);
112    
113      #ifdef ESYS_MPI
114    MPI_Allgatherv(global_id, num_couple_cols, MPI_INT, cols_array, recv_degree, recv_offset, MPI_INT, A->mpi_info->comm);    MPI_Allgatherv(global_id, num_couple_cols, MPI_INT, cols_array, recv_degree, recv_offset, MPI_INT, A->mpi_info->comm);
115      #endif
116    
117    /* first, prepare the ptr_ptr to be received */    /* first, prepare the ptr_ptr to be received */
118    ptr_ptr = MEMALLOC(overlapped_n+1, index_t);    ptr_ptr = MEMALLOC(overlapped_n+1, index_t);
119    for (p=0; p<recv->numNeighbors; p++) {    for (p=0; p<recv->numNeighbors; p++) {
120      row = recv->offsetInShared[p];      row = recv->offsetInShared[p];
121      i = recv->offsetInShared[p+1];      i = recv->offsetInShared[p+1];
122        #ifdef ESYS_MPI
123      MPI_Irecv(&(ptr_ptr[row]), i-row, MPI_INT, recv->neighbor[p],      MPI_Irecv(&(ptr_ptr[row]), i-row, MPI_INT, recv->neighbor[p],
124          A->mpi_info->msg_tag_counter+recv->neighbor[p],          A->mpi_info->msg_tag_counter+recv->neighbor[p],
125          A->mpi_info->comm,          A->mpi_info->comm,
126          &(A->row_coupler->mpi_requests[p]));          &(A->row_coupler->mpi_requests[p]));
127        #endif
128    }    }
129    
130    /* now prepare the rows to be sent (the degree, the offset and the data) */    /* now prepare the rows to be sent (the degree, the offset and the data) */
# Line 213  void Paso_SystemMatrix_copyRemoteCoupleB Line 219  void Paso_SystemMatrix_copyRemoteCoupleB
219      }      }
220    
221      /* sending */      /* sending */
222        #ifdef ESYS_MPI
223      MPI_Issend(&(send_offset[i0]), i-i0, MPI_INT, send->neighbor[p],      MPI_Issend(&(send_offset[i0]), i-i0, MPI_INT, send->neighbor[p],
224          A->mpi_info->msg_tag_counter+rank,          A->mpi_info->msg_tag_counter+rank,
225          A->mpi_info->comm,          A->mpi_info->comm,
226          &(A->row_coupler->mpi_requests[p+recv->numNeighbors]));          &(A->row_coupler->mpi_requests[p+recv->numNeighbors]));
227        #endif
228      send_degree[p] = len;      send_degree[p] = len;
229      i0 = i;      i0 = i;
230    }    }
231    
232      #ifdef ESYS_MPI
233    MPI_Waitall(A->row_coupler->connector->send->numNeighbors+A->row_coupler->connector->recv->numNeighbors,    MPI_Waitall(A->row_coupler->connector->send->numNeighbors+A->row_coupler->connector->recv->numNeighbors,
234          A->row_coupler->mpi_requests,          A->row_coupler->mpi_requests,
235          A->row_coupler->mpi_stati);          A->row_coupler->mpi_stati);
236      #endif
237    A->mpi_info->msg_tag_counter += mpi_size;    A->mpi_info->msg_tag_counter += mpi_size;
238    
239    len = 0;    len = 0;
# Line 239  void Paso_SystemMatrix_copyRemoteCoupleB Line 249  void Paso_SystemMatrix_copyRemoteCoupleB
249    j=0;    j=0;
250    for (p=0; p<recv->numNeighbors; p++) {    for (p=0; p<recv->numNeighbors; p++) {
251      i = ptr_ptr[recv->offsetInShared[p+1]] - ptr_ptr[recv->offsetInShared[p]];      i = ptr_ptr[recv->offsetInShared[p+1]] - ptr_ptr[recv->offsetInShared[p]];
252        #ifdef ESYS_MPI
253      if (i > 0)      if (i > 0)
254      MPI_Irecv(&(ptr_idx[j]), i, MPI_INT, recv->neighbor[p],      MPI_Irecv(&(ptr_idx[j]), i, MPI_INT, recv->neighbor[p],
255                  A->mpi_info->msg_tag_counter+recv->neighbor[p],                  A->mpi_info->msg_tag_counter+recv->neighbor[p],
256                  A->mpi_info->comm,                  A->mpi_info->comm,
257                  &(A->row_coupler->mpi_requests[p]));                  &(A->row_coupler->mpi_requests[p]));
258        #endif
259      j += i;      j += i;
260    }    }
261    
262    j=0;    j=0;
263    for (p=0; p<num_neighbors; p++) {    for (p=0; p<num_neighbors; p++) {
264      i = send_degree[p] - j;      i = send_degree[p] - j;
265        #ifdef ESYS_MPI
266      if (i > 0)      if (i > 0)
267      MPI_Issend(&(send_idx[j]), i, MPI_INT, send->neighbor[p],      MPI_Issend(&(send_idx[j]), i, MPI_INT, send->neighbor[p],
268                  A->mpi_info->msg_tag_counter+rank,                  A->mpi_info->msg_tag_counter+rank,
269                  A->mpi_info->comm,                  A->mpi_info->comm,
270                  &(A->row_coupler->mpi_requests[p+recv->numNeighbors]));                  &(A->row_coupler->mpi_requests[p+recv->numNeighbors]));
271        #endif
272      j = send_degree[p];      j = send_degree[p];
273    }    }
274    
275      #ifdef ESYS_MPI
276    MPI_Waitall(A->row_coupler->connector->send->numNeighbors+A->row_coupler->connector->recv->numNeighbors,    MPI_Waitall(A->row_coupler->connector->send->numNeighbors+A->row_coupler->connector->recv->numNeighbors,
277                  A->row_coupler->mpi_requests,                  A->row_coupler->mpi_requests,
278                  A->row_coupler->mpi_stati);                  A->row_coupler->mpi_stati);
279      #endif
280    A->mpi_info->msg_tag_counter += mpi_size;    A->mpi_info->msg_tag_counter += mpi_size;
281    
282    /* allocate pattern and sparsematrix for remote_coupleBlock */    /* allocate pattern and sparsematrix for remote_coupleBlock */
# Line 275  void Paso_SystemMatrix_copyRemoteCoupleB Line 291  void Paso_SystemMatrix_copyRemoteCoupleB
291    j=0;    j=0;
292    for (p=0; p<recv->numNeighbors; p++) {    for (p=0; p<recv->numNeighbors; p++) {
293      i = ptr_ptr[recv->offsetInShared[p+1]] - ptr_ptr[recv->offsetInShared[p]];      i = ptr_ptr[recv->offsetInShared[p+1]] - ptr_ptr[recv->offsetInShared[p]];
294        #ifdef ESYS_MPI
295      if (i > 0)      if (i > 0)
296      MPI_Irecv(&(A->remote_coupleBlock->val[j]), i * block_size,      MPI_Irecv(&(A->remote_coupleBlock->val[j]), i * block_size,
297          MPI_DOUBLE, recv->neighbor[p],          MPI_DOUBLE, recv->neighbor[p],
298                  A->mpi_info->msg_tag_counter+recv->neighbor[p],                  A->mpi_info->msg_tag_counter+recv->neighbor[p],
299                  A->mpi_info->comm,                  A->mpi_info->comm,
300                  &(A->row_coupler->mpi_requests[p]));                  &(A->row_coupler->mpi_requests[p]));
301        #endif
302      j += (i * block_size);      j += (i * block_size);
303    }    }
304    
305    j=0;    j=0;
306    for (p=0; p<num_neighbors; p++) {    for (p=0; p<num_neighbors; p++) {
307      i = send_degree[p] - j;      i = send_degree[p] - j;
308        #ifdef ESYS_MPI
309      if (i > 0)      if (i > 0)
310      MPI_Issend(&(send_buf[j*block_size]), i*block_size, MPI_DOUBLE, send->neighbor[p],      MPI_Issend(&(send_buf[j*block_size]), i*block_size, MPI_DOUBLE, send->neighbor[p],
311                  A->mpi_info->msg_tag_counter+rank,                  A->mpi_info->msg_tag_counter+rank,
312                  A->mpi_info->comm,                  A->mpi_info->comm,
313                  &(A->row_coupler->mpi_requests[p+recv->numNeighbors]));                  &(A->row_coupler->mpi_requests[p+recv->numNeighbors]));
314        #endif
315      j = send_degree[p];      j = send_degree[p];
316    }    }
317    
318      #ifdef ESYS_MPI
319    MPI_Waitall(A->row_coupler->connector->send->numNeighbors+A->row_coupler->connector->recv->numNeighbors,    MPI_Waitall(A->row_coupler->connector->send->numNeighbors+A->row_coupler->connector->recv->numNeighbors,
320                  A->row_coupler->mpi_requests,                  A->row_coupler->mpi_requests,
321                  A->row_coupler->mpi_stati);                  A->row_coupler->mpi_stati);
322      #endif
323    A->mpi_info->msg_tag_counter += mpi_size;    A->mpi_info->msg_tag_counter += mpi_size;
324    
325    /* release all temp memory allocation */    /* release all temp memory allocation */

Legend:
Removed from v.3827  
changed lines
  Added in v.3828

  ViewVC Help
Powered by ViewVC 1.1.26