/[escript]/trunk-mpi-branch/finley/src/Mesh_optimizeDOFLabeling.c
ViewVC logotype

Contents of /trunk-mpi-branch/finley/src/Mesh_optimizeDOFLabeling.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1293 - (show annotations)
Fri Sep 7 01:10:05 2007 UTC (12 years, 8 months ago) by gross
File MIME type: text/plain
File size: 5924 byte(s)
Finally I managed to add a DOF relabeling algeorithm. It is very simple at this stage. It runs for MPI 
where it is looking at the part of the stiffness matrix with rows and columns (mainBlock) controlled by a processor. 
The local optimization is then distributed and used in the coupleBlock matrix. 



1 /*
2 ************************************************************
3 * Copyright 2007 by ACcESS MNRF *
4 * *
5 * http://www.access.edu.au *
6 * Primary Business: Queensland, Australia *
7 * Licensed under the Open Software License version 3.0 *
8 * http://www.opensource.org/licenses/osl-3.0.php *
9 * *
10 ************************************************************
11 */
12
13 /************************************************************************/
14
15 /* Finley: Mesh: optimizes the labeling of the DOFs on each processor */
16
17 /************************************************************************/
18
19 /* Author: gross@access.edu.au */
20 /* Version: $Id$ */
21
22 /**************************************************************/
23
24 #include "Mesh.h"
25 #include "IndexList.h"
26
27 /**************************************************************/
28
29 void Finley_Mesh_optimizeDOFLabeling(Finley_Mesh* in,dim_t *distribution) {
30
31 index_t myFirstVertex,myLastVertex, *newGlobalDOFID=NULL, firstVertex, lastVertex;
32 register index_t k;
33 dim_t mpiSize, myNumVertices,len, p, i;
34 Paso_Pattern *pattern=NULL;
35 Paso_MPI_rank myRank,dest,source,current_rank, rank;
36 Finley_IndexList* index_list=NULL;
37 #ifdef PASO_MPI
38 MPI_Status status;
39 #endif
40
41 if (in==NULL) return;
42 if (in->Nodes == NULL) return;
43
44 myRank=in->MPIInfo->rank;
45 mpiSize=in->MPIInfo->size;
46 myFirstVertex=distribution[myRank];
47 myLastVertex=distribution[myRank+1];
48 myNumVertices=myLastVertex-myFirstVertex;
49 len=0;
50 for (p=0;p<mpiSize;++p) len=MAX(len,distribution[p+1]-distribution[p]);
51
52 index_list=TMPMEMALLOC(myNumVertices,Finley_IndexList);
53 newGlobalDOFID=TMPMEMALLOC(len,index_t);
54 /* create the adjacency structure xadj and adjncy */
55 if (! ( Finley_checkPtr(index_list) || Finley_checkPtr(newGlobalDOFID) ) ) {
56 #pragma omp parallel private(i)
57 {
58 #pragma omp for schedule(static)
59 for(i=0;i<myNumVertices;++i) {
60 index_list[i].extension=NULL;
61 index_list[i].n=0;
62 }
63 /* insert contributions from element matrices into colums index index_list: */
64 Finley_IndexList_insertElementsWithRowRange(index_list, myFirstVertex, myLastVertex,
65 in->Elements,in->Nodes->globalDegreesOfFreedom,
66 in->Nodes->globalDegreesOfFreedom);
67 Finley_IndexList_insertElementsWithRowRange(index_list, myFirstVertex, myLastVertex,
68 in->FaceElements,in->Nodes->globalDegreesOfFreedom,
69 in->Nodes->globalDegreesOfFreedom);
70 Finley_IndexList_insertElementsWithRowRange(index_list, myFirstVertex, myLastVertex,
71 in->ContactElements,in->Nodes->globalDegreesOfFreedom,
72 in->Nodes->globalDegreesOfFreedom);
73 Finley_IndexList_insertElementsWithRowRange(index_list, myFirstVertex, myLastVertex,
74 in->Points,in->Nodes->globalDegreesOfFreedom,
75 in->Nodes->globalDegreesOfFreedom);
76 }
77 /* create the local matrix pattern */
78 pattern=Finley_IndexList_createPattern(myNumVertices,index_list,myFirstVertex, myLastVertex,-myFirstVertex);
79
80 /* clean up index list */
81 if (index_list!=NULL) {
82 #pragma omp parallel for private(i)
83 for(i=0;i<myNumVertices;++i) Finley_IndexList_free(index_list[i].extension);
84 }
85
86 if (Finley_noError()) Paso_Pattern_reduceBandwidth(pattern,newGlobalDOFID);
87
88 Paso_Pattern_free(pattern);
89 }
90 Paso_MPIInfo_noError(in->MPIInfo);
91 if (Finley_noError()) {
92 /* shift new labeling to create a global id */
93 #pragma omp parallel for private(i)
94 for (i=0;i<myNumVertices;++i) newGlobalDOFID[i]+=myFirstVertex;
95
96
97 /* distribute new labeling to other processors */
98 dest=Paso_MPIInfo_mod(mpiSize, myRank + 1);
99 source=Paso_MPIInfo_mod(mpiSize, myRank - 1);
100 current_rank=myRank;
101 for (p=0; p< mpiSize; ++p) {
102 firstVertex=distribution[current_rank];
103 lastVertex=distribution[current_rank+1];
104 #pragma omp parallel for private(i,j,k)
105 for (i=0;i<in->Nodes->numNodes;++i) {
106 k=in->Nodes->globalDegreesOfFreedom[i];
107 if ( (firstVertex<=k) && (k<lastVertex)) {
108 in->Nodes->globalDegreesOfFreedom[i]=newGlobalDOFID[k-firstVertex];
109 }
110 }
111
112 if (p<mpiSize-1) { /* the final send can be skipped */
113 #ifdef PASO_MPI
114 MPI_Sendrecv_replace(newGlobalDOFID,len, MPI_INT,
115 dest, in->MPIInfo->msg_tag_counter,
116 source, in->MPIInfo->msg_tag_counter,
117 in->MPIInfo->comm,&status);
118 #endif
119 in->MPIInfo->msg_tag_counter++;
120 current_rank=Paso_MPIInfo_mod(mpiSize, current_rank-1);
121 }
122 }
123 }
124 TMPMEMFREE(index_list);
125 TMPMEMFREE(newGlobalDOFID);
126 #if 0
127 for (i=0;i<in->Nodes->numNodes;++i) printf("%d ",in->Nodes->globalDegreesOfFreedom[i]);
128 printf("\n");
129 #endif
130 return;
131 }

  ViewVC Help
Powered by ViewVC 1.1.26