/[escript]/trunk/finley/src/Mesh_optimizeDOFDistribution.c
ViewVC logotype

Annotation of /trunk/finley/src/Mesh_optimizeDOFDistribution.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1552 - (hide annotations)
Thu May 8 08:52:41 2008 UTC (11 years, 4 months ago) by gross
File MIME type: text/plain
File size: 10534 byte(s)
some changes to make the implementatiopn of a upwind MPI version easier
1 ksteube 1315
2     /* $Id$ */
3    
4     /*******************************************************
5     *
6     * Copyright 2003-2007 by ACceSS MNRF
7     * Copyright 2007 by University of Queensland
8     *
9     * http://esscc.uq.edu.au
10     * Primary Business: Queensland, Australia
11     * Licensed under the Open Software License version 3.0
12     * http://www.opensource.org/licenses/osl-3.0.php
13     *
14     *******************************************************/
15    
16     /**************************************************************/
17    
18     /* Finley: Mesh: optimizes the distribution of DOFs across processors */
19     /* using ParMETIS. On return a new distribution is given and the globalDOF are relabled */
20     /* accordingly but the mesh has not been redesitributed yet */
21    
22     /**************************************************************/
23    
24     #include "Mesh.h"
25     #include "IndexList.h"
26     #ifdef _OPENMP
27     #include <omp.h>
28     #endif
29 ksteube 1459 #ifdef PARMETIS
30     #include "parmetis.h"
31     #endif
32 ksteube 1315
33     /**************************************************************/
34    
35     void Finley_Mesh_optimizeDOFDistribution(Finley_Mesh* in,dim_t *distribution) {
36    
37     dim_t dim, i,j,k, myNumVertices,p, mpiSize, len, globalNumVertices,*partition_count=NULL, *new_distribution=NULL, *loc_partition_count=NULL;
38     bool_t *setNewDOFId=NULL;
39     index_t myFirstVertex, myLastVertex, firstVertex, lastVertex, *newGlobalDOFID=NULL;
40     size_t mpiSize_size;
41     index_t* partition=NULL;
42     Paso_Pattern *pattern=NULL;
43     Paso_MPI_rank myRank,dest,source,current_rank, rank;
44     Finley_IndexList* index_list=NULL;
45     float *xyz=NULL;
46 ksteube 1459 int c;
47 ksteube 1315
48     #ifdef PASO_MPI
49     MPI_Status status;
50     #endif
51    
52     if (in==NULL) return;
53     if (in->Nodes == NULL) return;
54    
55     myRank=in->MPIInfo->rank;
56     mpiSize=in->MPIInfo->size;
57     mpiSize_size=mpiSize*sizeof(dim_t);
58     dim=in->Nodes->numDim;
59     /* first step is to distribute the elements according to a global X of DOF */
60    
61     myFirstVertex=distribution[myRank];
62     myLastVertex=distribution[myRank+1];
63     myNumVertices=myLastVertex-myFirstVertex;
64     globalNumVertices=distribution[mpiSize];
65     len=0;
66     for (p=0;p<mpiSize;++p) len=MAX(len,distribution[p+1]-distribution[p]);
67     partition=TMPMEMALLOC(len,index_t); /* len is used for the sending around of partition later on */
68     xyz=TMPMEMALLOC(myNumVertices*dim,float);
69     partition_count=TMPMEMALLOC(mpiSize+1,dim_t);
70     new_distribution=TMPMEMALLOC(mpiSize+1,dim_t);
71     newGlobalDOFID=TMPMEMALLOC(len,index_t);
72     setNewDOFId=TMPMEMALLOC(in->Nodes->numNodes,bool_t);
73     if (!(Finley_checkPtr(partition) || Finley_checkPtr(xyz) || Finley_checkPtr(partition_count) || Finley_checkPtr(partition_count) || Finley_checkPtr(newGlobalDOFID) || Finley_checkPtr(setNewDOFId))) {
74 ksteube 1459 dim_t *recvbuf=TMPMEMALLOC(mpiSize*mpiSize,dim_t);
75 ksteube 1315
76     /* set the coordinates: *?
77     /* it is assumed that at least one node on this processor provides a coordinate */
78     #pragma omp parallel for private(i,j,k)
79     for (i=0;i<in->Nodes->numNodes;++i) {
80     k=in->Nodes->globalDegreesOfFreedom[i]-myFirstVertex;
81     if ((k>=0) && (k<myNumVertices)) {
82     for (j=0;j<dim;++j) xyz[k*dim+j]=(float)(in->Nodes->Coordinates[INDEX2(j,i,dim)]);
83     }
84     }
85    
86     index_list=TMPMEMALLOC(myNumVertices,Finley_IndexList);
87     /* ksteube CSR of DOF IDs */
88     /* create the adjacency structure xadj and adjncy */
89     if (! Finley_checkPtr(index_list)) {
90     #pragma omp parallel private(i)
91     {
92     #pragma omp for schedule(static)
93     for(i=0;i<myNumVertices;++i) {
94     index_list[i].extension=NULL;
95     index_list[i].n=0;
96     }
97     /* ksteube build CSR format */
98     /* insert contributions from element matrices into colums index index_list: */
99     Finley_IndexList_insertElementsWithRowRange(index_list, myFirstVertex, myLastVertex,
100     in->Elements,in->Nodes->globalDegreesOfFreedom,
101     in->Nodes->globalDegreesOfFreedom);
102     Finley_IndexList_insertElementsWithRowRange(index_list, myFirstVertex, myLastVertex,
103     in->FaceElements,in->Nodes->globalDegreesOfFreedom,
104     in->Nodes->globalDegreesOfFreedom);
105     Finley_IndexList_insertElementsWithRowRange(index_list, myFirstVertex, myLastVertex,
106     in->ContactElements,in->Nodes->globalDegreesOfFreedom,
107     in->Nodes->globalDegreesOfFreedom);
108     Finley_IndexList_insertElementsWithRowRange(index_list, myFirstVertex, myLastVertex,
109     in->Points,in->Nodes->globalDegreesOfFreedom,
110     in->Nodes->globalDegreesOfFreedom);
111     }
112    
113     /* create the local matrix pattern */
114 gross 1552 pattern=Finley_IndexList_createPattern(0,myNumVertices,index_list,0,globalNumVertices,0);
115 ksteube 1315
116     /* clean up index list */
117     if (index_list!=NULL) {
118     #pragma omp parallel for private(i)
119     for(i=0;i<myNumVertices;++i) Finley_IndexList_free(index_list[i].extension);
120     }
121    
122     if (Finley_noError()) {
123    
124 ksteube 1459 #ifdef PARMETIS
125     if (in->MPIInfo->size>1) {
126     int i;
127     int wgtflag = 0;
128     int numflag = 0; /* pattern->ptr is C style: starting from 0 instead of 1 */
129     int ncon = 1;
130     int edgecut;
131     int options[2];
132     float *tpwgts = TMPMEMALLOC(ncon*mpiSize,float);
133     float *ubvec = TMPMEMALLOC(ncon,float);
134     for (i=0; i<ncon*mpiSize; i++) tpwgts[i] = 1.0/(float)mpiSize;
135     for (i=0; i<ncon; i++) ubvec[i] = 1.05;
136     options[0] = 0;
137     options[1] = 15;
138     ParMETIS_V3_PartGeomKway(distribution,
139 ksteube 1315 pattern->ptr,
140     pattern->index,
141 ksteube 1459 NULL,
142     NULL,
143     &wgtflag,
144     &numflag,
145     &dim,
146 ksteube 1315 xyz,
147 ksteube 1459 &ncon,
148     &mpiSize,
149     tpwgts,
150     ubvec,
151     options,
152     &edgecut,
153     partition, /* new CPU ownership of elements */
154     &(in->MPIInfo->comm));
155     printf("ParMETIS number of edges cut by partitioning: %d\n", edgecut);
156     TMPMEMFREE(ubvec);
157     TMPMEMFREE(tpwgts);
158     } else {
159     for (i=0;i<myNumVertices;++i) partition[i]=0; /* CPU 0 owns it */
160     }
161     #else
162     for (i=0;i<myNumVertices;++i) partition[i]=myRank; /* CPU 0 owns it */
163     #endif
164    
165 ksteube 1315 }
166    
167     Paso_Pattern_free(pattern);
168    
169     /* create a new distributioin and labeling of the DOF */
170     memset(new_distribution,0,mpiSize_size);
171     #pragma omp parallel private(loc_partition_count)
172     {
173     loc_partition_count=THREAD_MEMALLOC(mpiSize,dim_t);
174     memset(loc_partition_count,0,mpiSize_size);
175     #pragma omp for private(i)
176     for (i=0;i<myNumVertices;++i) loc_partition_count[partition[i]]++ ;
177     #pragma omp critical
178     {
179     for (i=0;i<mpiSize;++i) new_distribution[i]+=loc_partition_count[i];
180     }
181     THREAD_MEMFREE(loc_partition_count);
182     }
183     #ifdef PASO_MPI
184 ksteube 1459 /* recvbuf will be the concatenation of each CPU's contribution to new_distribution */
185     MPI_Allgather(new_distribution, mpiSize, MPI_INT, recvbuf, mpiSize, MPI_INT, in->MPIInfo->comm);
186 ksteube 1315 #else
187 ksteube 1459 for (i=0;i<mpiSize;++i) recvbuf[i]=new_distribution[i];
188 ksteube 1315 #endif
189     new_distribution[0]=0;
190 ksteube 1459 for (rank=0; rank<mpiSize;rank++) {
191     c=0;
192     for (i=0;i<myRank;++i) c+=recvbuf[rank+mpiSize*i];
193     for (i=0;i<myNumVertices;++i) {
194     if (rank==partition[i]) {
195     newGlobalDOFID[i]=new_distribution[rank]+c;
196     c++;
197     }
198     }
199     for (i=myRank+1;i<mpiSize;++i) c+=recvbuf[rank+mpiSize*i];
200     new_distribution[rank+1]=new_distribution[rank]+c;
201 ksteube 1315 }
202 ksteube 1459 TMPMEMFREE(recvbuf);
203    
204 ksteube 1315 /* now the overlap needs to be created by sending the partition around*/
205    
206     dest=Paso_MPIInfo_mod(mpiSize, myRank + 1);
207     source=Paso_MPIInfo_mod(mpiSize, myRank - 1);
208     current_rank=myRank;
209     #pragma omp parallel for private(i)
210     for (i=0;i<in->Nodes->numNodes;++i) setNewDOFId[i]=TRUE;
211    
212     for (p=0; p< mpiSize; ++p) {
213    
214     firstVertex=distribution[current_rank];
215     lastVertex=distribution[current_rank+1];
216     #pragma omp parallel for private(i,j,k)
217     for (i=0;i<in->Nodes->numNodes;++i) {
218     k=in->Nodes->globalDegreesOfFreedom[i];
219     if (setNewDOFId[i] && (firstVertex<=k) && (k<lastVertex)) {
220     in->Nodes->globalDegreesOfFreedom[i]=newGlobalDOFID[k-firstVertex];
221     setNewDOFId[i]=FALSE;
222     }
223     }
224    
225     if (p<mpiSize-1) { /* the final send can be skipped */
226     #ifdef PASO_MPI
227     MPI_Sendrecv_replace(newGlobalDOFID,len, MPI_INT,
228     dest, in->MPIInfo->msg_tag_counter,
229     source, in->MPIInfo->msg_tag_counter,
230     in->MPIInfo->comm,&status);
231     #endif
232     in->MPIInfo->msg_tag_counter++;
233     current_rank=Paso_MPIInfo_mod(mpiSize, current_rank-1);
234     }
235     }
236     for (i=0;i<mpiSize+1;++i) distribution[i]=new_distribution[i];
237    
238    
239     }
240     TMPMEMFREE(index_list);
241     }
242     TMPMEMFREE(newGlobalDOFID);
243     TMPMEMFREE(setNewDOFId);
244     TMPMEMFREE(new_distribution);
245     TMPMEMFREE(partition_count);
246     TMPMEMFREE(partition);
247     TMPMEMFREE(xyz);
248     return;
249     }

  ViewVC Help
Powered by ViewVC 1.1.26