/[escript]/trunk-mpi-branch/finley/src/Mesh_optimizeDOFDistribution.c
ViewVC logotype

Contents of /trunk-mpi-branch/finley/src/Mesh_optimizeDOFDistribution.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1226 - (show annotations)
Fri Aug 3 08:00:07 2007 UTC (11 years, 8 months ago) by gross
File MIME type: text/plain
File size: 5048 byte(s)
hook for ParaMetis added. still needs some work
1 /*
2 ************************************************************
3 * Copyright 2007 by ACcESS MNRF *
4 * *
5 * http://www.access.edu.au *
6 * Primary Business: Queensland, Australia *
7 * Licensed under the Open Software License version 3.0 *
8 * http://www.opensource.org/licenses/osl-3.0.php *
9 * *
10 ************************************************************
11 */
12
13 /**************************************************************/
14
15 /* Finley: Mesh: optimizes the distribution of DOFs across processors */
16 /* using ParMETIS. On return the mpiRankOfDOF (includes overlap for the current distribution) */
17 /* giving the new processor rank assigned to a DOF. distribution specifies the current distribution of */
18 /* DOFs */
19
20 /**************************************************************/
21
22 /* Author: gross@access.edu.au */
23 /* Version: $Id$ */
24
25 /**************************************************************/
26
27 #include "Mesh.h"
28
29 /**************************************************************/
30
31 void Finley_Mesh_optimizeDOFDistribution(Finley_Mesh* in,dim_t *distribution,Paso_MPI_rank* mpiRankOfDOF) {
32 dim_t dim, i,j,k, myNumVertices,p, mpiSize, len;
33 index_t myFirstVertex, myLastVertex, firstVertex, lastVertex;
34 index_t* partition=NULL;
35 Paso_MPI_rank myRank,dest,source,current_rank;
36 float *xyz=NULL;
37 #ifdef PASO_MPI
38 MPI_Status status;
39 #endif
40
41 if (in==NULL) return;
42 if (in->Nodes == NULL) return;
43
44 myRank=in->MPIInfo->rank;
45 mpiSize=in->MPIInfo->size;
46 dim=in->Nodes->numDim;
47 /* first step is to distribute the elements according to a global X of DOF */
48
49 myFirstVertex=distribution[myRank];
50 myLastVertex=distribution[myRank+1];
51 myNumVertices=myLastVertex-myFirstVertex;
52 len=0;
53 for (p=0;p<mpiSize;++p) len=MAX(len,distribution[p+1]-distribution[p]);
54 partition=TMPMEMALLOC(len,index_t); /* len is used for the sending around of partition later on */
55 xyz=TMPMEMALLOC(myNumVertices*dim,float);
56 if (!(Finley_checkPtr(partition) || Finley_checkPtr(xyz))) {
57
58 /* set the coordinates: *?
59 /* it is assumed that at least one node on this processor provides a coordinate */
60 #pragma omp parallel for private(i,j,k);
61 for (i=0;i<in->Nodes->numNodes;++i) {
62 k=in->Nodes->globalDegreesOfFreedom[i]-myFirstVertex;
63 if ((k>=0) && (k<myNumVertices)) {
64 for (j=0;j<dim;++j) xyz[k*dim+j]=(float)(in->Nodes->Coordinates[INDEX2(j,i,dim)]);
65 }
66 }
67 /*
68 xadj
69 adjncy
70
71
72 ParMETIS_V3_PartGeomKway(distribution,
73 xadj,
74 adjncy,
75 idxtype *vwgt, +
76 idxtype *adjwgt, +
77 int *wgtflag, +
78 int *numflag, +
79 dim,
80 xyz,
81 int *ncon, +
82 mpiSize,
83 float *tpwgts, +
84 float *ubvec, +
85 int *options, +
86 int *edgecut, +
87 partition,
88 in->MPIInfo->comm);
89 */
90 for (i=0;i<myNumVertices;++i) partition[i]=myRank; /* remove */
91
92 /* now the overlap needs to be created by sending the partition around*/
93
94 dest=Paso_MPIInfo_mod(mpiSize, myRank + 1);
95 source=Paso_MPIInfo_mod(mpiSize, myRank - 1);
96 current_rank=myRank;
97 for (p=0; p< mpiSize; ++p) {
98
99 firstVertex=distribution[current_rank];
100 lastVertex=distribution[current_rank+1];
101 #pragma omp parallel for private(i,j,k);
102 for (i=0;i<in->Nodes->numNodes;++i) {
103 k=in->Nodes->globalDegreesOfFreedom[i];
104 if ((firstVertex<=k) && (k<lastVertex)) mpiRankOfDOF[i]=partition[k-firstVertex];
105 }
106
107 if (p<mpiSize-1) { /* the final send can be skipped */
108 #ifdef PASO_MPI
109 MPI_Sendrecv_replace(partition,len, MPI_INT,
110 dest, in->MPIInfo->msg_tag_counter,
111 source, in->MPIInfo->msg_tag_counter,
112 in->MPIInfo->comm,&status);
113 #endif
114 in->MPIInfo->msg_tag_counter++;
115 current_rank=Paso_MPIInfo_mod(mpiSize, current_rank-1);
116 }
117 }
118 }
119 TMPMEMFREE(partition);
120 TMPMEMFREE(xyz);
121 return;
122 }

  ViewVC Help
Powered by ViewVC 1.1.26