/[escript]/trunk/finley/src/NodeFile_gather.c
ViewVC logotype

Contents of /trunk/finley/src/NodeFile_gather.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1739 - (show annotations)
Fri Aug 29 06:19:53 2008 UTC (11 years, 1 month ago) by gross
File MIME type: text/plain
File size: 9114 byte(s)
Fix in the MPI mesh reader: Owner need to be set.


1
2 /* $Id$ */
3
4 /*******************************************************
5 *
6 * Copyright 2003-2007 by ACceSS MNRF
7 * Copyright 2007 by University of Queensland
8 *
9 * http://esscc.uq.edu.au
10 * Primary Business: Queensland, Australia
11 * Licensed under the Open Software License version 3.0
12 * http://www.opensource.org/licenses/osl-3.0.php
13 *
14 *******************************************************/
15
16 /**************************************************************
17 *
18 * Finley: Mesh: NodeFile
19 * gathers the NodeFile out from the NodeFile in using the entries
20 * in index[0:out->numNodes-1] which are between min_index and max_index (exclusive)
21 * the node index[i]
22 *
23 **************************************************************/
24
25 #include "NodeFile.h"
26
27 /**************************************************************/
28
29 void Finley_NodeFile_gatherEntries(dim_t n, index_t* index, index_t min_index, index_t max_index,
30 index_t* Id_out, index_t* Id_in,
31 index_t* Tag_out, index_t* Tag_in,
32 index_t* globalDegreesOfFreedom_out, index_t* globalDegreesOfFreedom_in,
33 dim_t numDim, double* Coordinates_out, double* Coordinates_in)
34 {
35 dim_t i;
36 register index_t k;
37 register const index_t range=max_index-min_index;
38 const size_t numDim_size=(size_t)numDim*sizeof(double);
39
40 #pragma omp parallel for private(i,k) schedule(static)
41 for (i=0;i<n;i++) {
42 k=index[i]-min_index;
43 if ((k>=0) && (k <range)) {
44 Id_out[i]=Id_in[k];
45 Tag_out[i]=Tag_in[k];
46 globalDegreesOfFreedom_out[i]=globalDegreesOfFreedom_in[k];
47 memcpy(&(Coordinates_out[INDEX2(0,i,numDim)]), &(Coordinates_in[INDEX2(0,k,numDim)]), numDim_size);
48 }
49 }
50 }
51
52 void Finley_NodeFile_gather(index_t* index, Finley_NodeFile* in, Finley_NodeFile* out)
53 {
54 index_t min_id, max_id;
55 Finley_NodeFile_setGlobalIdRange(&min_id,&max_id,in);
56 Finley_NodeFile_gatherEntries(out->numNodes, index, min_id, max_id,
57 out->Id, in->Id,
58 out->Tag, in->Tag,
59 out->globalDegreesOfFreedom, in->globalDegreesOfFreedom,
60 out->numDim, out->Coordinates, in->Coordinates);
61 }
62
63 void Finley_NodeFile_gather_global(index_t* index, Finley_NodeFile* in, Finley_NodeFile* out)
64 {
65 index_t min_id, max_id, undefined_node;
66 Paso_MPI_rank buffer_rank, dest, source, *distribution=NULL;
67 index_t *Id_buffer=NULL, *Tag_buffer=NULL, *globalDegreesOfFreedom_buffer=NULL;
68 double* Coordinates_buffer=NULL;
69 dim_t p, buffer_len,n;
70 char error_msg[100];
71 #ifdef PASO_MPI
72 MPI_Status status;
73 #endif
74
75 /* get the global range of node ids */
76 Finley_NodeFile_setGlobalIdRange(&min_id,&max_id,in);
77 undefined_node=min_id-1;
78
79 distribution=TMPMEMALLOC(in->MPIInfo->size+1, index_t);
80
81 if ( !Finley_checkPtr(distribution) ) {
82 /* distribute the range of node ids */
83 buffer_len=Paso_MPIInfo_setDistribution(in->MPIInfo,min_id,max_id,distribution);
84 /* allocate buffers */
85 Id_buffer=TMPMEMALLOC(buffer_len,index_t);
86 Tag_buffer=TMPMEMALLOC(buffer_len,index_t);
87 globalDegreesOfFreedom_buffer=TMPMEMALLOC(buffer_len,index_t);
88 Coordinates_buffer=TMPMEMALLOC(buffer_len*out->numDim,double);
89 if (! (Finley_checkPtr(Id_buffer) || Finley_checkPtr(Tag_buffer) ||
90 Finley_checkPtr(globalDegreesOfFreedom_buffer) || Finley_checkPtr(Coordinates_buffer) ) ) {
91 /* fill Id_buffer by the undefined_node marker to check if nodes are defined */
92 #pragma omp parallel for private(n) schedule(static)
93 for (n=0;n<buffer_len;n++) Id_buffer[n]=undefined_node;
94
95 /* fill the buffer by sending portions around in a circle */
96 dest=Paso_MPIInfo_mod(in->MPIInfo->size, in->MPIInfo->rank + 1);
97 source=Paso_MPIInfo_mod(in->MPIInfo->size, in->MPIInfo->rank - 1);
98 buffer_rank=in->MPIInfo->rank;
99 for (p=0; p< in->MPIInfo->size; ++p) {
100 if (p>0) { /* the initial send can be skipped */
101 #ifdef PASO_MPI
102 MPI_Sendrecv_replace(Id_buffer, buffer_len, MPI_INT,
103 dest, in->MPIInfo->msg_tag_counter, source, in->MPIInfo->msg_tag_counter,
104 in->MPIInfo->comm,&status);
105 MPI_Sendrecv_replace(Tag_buffer, buffer_len, MPI_INT,
106 dest, in->MPIInfo->msg_tag_counter+1, source, in->MPIInfo->msg_tag_counter+1,
107 in->MPIInfo->comm,&status);
108 MPI_Sendrecv_replace(globalDegreesOfFreedom_buffer, buffer_len, MPI_INT,
109 dest, in->MPIInfo->msg_tag_counter+2, source, in->MPIInfo->msg_tag_counter+2,
110 in->MPIInfo->comm,&status);
111 MPI_Sendrecv_replace(Coordinates_buffer, buffer_len*out->numDim, MPI_DOUBLE,
112 dest, in->MPIInfo->msg_tag_counter+3, source, in->MPIInfo->msg_tag_counter+3,
113 in->MPIInfo->comm,&status);
114 #endif
115 in->MPIInfo->msg_tag_counter+=4;
116 }
117 buffer_rank=Paso_MPIInfo_mod(in->MPIInfo->size, buffer_rank-1);
118 Finley_NodeFile_scatterEntries(in->numNodes, in->Id,
119 distribution[buffer_rank], distribution[buffer_rank+1],
120 Id_buffer, in->Id,
121 Tag_buffer, in->Tag,
122 globalDegreesOfFreedom_buffer, in->globalDegreesOfFreedom,
123 out->numDim, Coordinates_buffer, in->Coordinates);
124 }
125 /* now entries are collected from the buffer again by sending the entries around in a circle */
126 dest=Paso_MPIInfo_mod(in->MPIInfo->size, in->MPIInfo->rank + 1);
127 source=Paso_MPIInfo_mod(in->MPIInfo->size, in->MPIInfo->rank - 1);
128 buffer_rank=in->MPIInfo->rank;
129 for (p=0; p< in->MPIInfo->size; ++p) {
130 Finley_NodeFile_gatherEntries(out->numNodes, index,
131 distribution[buffer_rank], distribution[buffer_rank+1],
132 out->Id, Id_buffer,
133 out->Tag, Tag_buffer,
134 out->globalDegreesOfFreedom, globalDegreesOfFreedom_buffer,
135 out->numDim, out->Coordinates, Coordinates_buffer);
136 if (p<in->MPIInfo->size-1) { /* the last send can be skipped */
137 #ifdef PASO_MPI
138 MPI_Sendrecv_replace(Id_buffer, buffer_len, MPI_INT,
139 dest, in->MPIInfo->msg_tag_counter, source, in->MPIInfo->msg_tag_counter,
140 in->MPIInfo->comm,&status);
141 MPI_Sendrecv_replace(Tag_buffer, buffer_len, MPI_INT,
142 dest, in->MPIInfo->msg_tag_counter+1, source, in->MPIInfo->msg_tag_counter+1,
143 in->MPIInfo->comm,&status);
144 MPI_Sendrecv_replace(globalDegreesOfFreedom_buffer, buffer_len, MPI_INT,
145 dest, in->MPIInfo->msg_tag_counter+2, source, in->MPIInfo->msg_tag_counter+2,
146 in->MPIInfo->comm,&status);
147 MPI_Sendrecv_replace(Coordinates_buffer, buffer_len*out->numDim, MPI_DOUBLE,
148 dest, in->MPIInfo->msg_tag_counter+3, source, in->MPIInfo->msg_tag_counter+3,
149 in->MPIInfo->comm,&status);
150 #endif
151 in->MPIInfo->msg_tag_counter+=4;
152 }
153 buffer_rank=Paso_MPIInfo_mod(in->MPIInfo->size, buffer_rank-1);
154 }
155 /* check if all nodes are set: */
156 #pragma omp parallel for private(n) schedule(static)
157 for (n=0; n< out->numNodes; ++n) {
158 if (out->Id[n] == undefined_node ) {
159 sprintf(error_msg,"Finley_NodeFile_gather_global: Node id %d is referenced but is not defined.",out->Id[n]);
160 Finley_setError(VALUE_ERROR,error_msg);
161 }
162 }
163
164 }
165 TMPMEMFREE(Id_buffer);
166 TMPMEMFREE(Tag_buffer);
167 TMPMEMFREE(globalDegreesOfFreedom_buffer);
168 TMPMEMFREE(Coordinates_buffer);
169 }
170 TMPMEMFREE(distribution);
171 /* make sure that the error is global */
172 Paso_MPIInfo_noError(in->MPIInfo);
173 }

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26