/[escript]/trunk/finley/src/Mesh_optimizeDOFLabeling.c
ViewVC logotype

Contents of /trunk/finley/src/Mesh_optimizeDOFLabeling.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1315 - (show annotations)
Tue Sep 25 02:41:13 2007 UTC (12 years ago) by ksteube
File MIME type: text/plain
File size: 5672 byte(s)
Copied more files from MPI branch to trunk

1
2 /* $Id$ */
3
4 /*******************************************************
5 *
6 * Copyright 2003-2007 by ACceSS MNRF
7 * Copyright 2007 by University of Queensland
8 *
9 * http://esscc.uq.edu.au
10 * Primary Business: Queensland, Australia
11 * Licensed under the Open Software License version 3.0
12 * http://www.opensource.org/licenses/osl-3.0.php
13 *
14 *******************************************************/
15
16 /************************************************************************/
17
18 /* Finley: Mesh: optimizes the labeling of the DOFs on each processor */
19
20 /************************************************************************/
21
22 #include "Mesh.h"
23 #include "IndexList.h"
24
25 /**************************************************************/
26
27 void Finley_Mesh_optimizeDOFLabeling(Finley_Mesh* in,dim_t *distribution) {
28
29 index_t myFirstVertex,myLastVertex, *newGlobalDOFID=NULL, firstVertex, lastVertex;
30 register index_t k;
31 dim_t mpiSize, myNumVertices,len, p, i;
32 Paso_Pattern *pattern=NULL;
33 Paso_MPI_rank myRank,dest,source,current_rank, rank;
34 Finley_IndexList* index_list=NULL;
35 #ifdef PASO_MPI
36 MPI_Status status;
37 #endif
38
39 if (in==NULL) return;
40 if (in->Nodes == NULL) return;
41
42 myRank=in->MPIInfo->rank;
43 mpiSize=in->MPIInfo->size;
44 myFirstVertex=distribution[myRank];
45 myLastVertex=distribution[myRank+1];
46 myNumVertices=myLastVertex-myFirstVertex;
47 len=0;
48 for (p=0;p<mpiSize;++p) len=MAX(len,distribution[p+1]-distribution[p]);
49
50 index_list=TMPMEMALLOC(myNumVertices,Finley_IndexList);
51 newGlobalDOFID=TMPMEMALLOC(len,index_t);
52 /* create the adjacency structure xadj and adjncy */
53 if (! ( Finley_checkPtr(index_list) || Finley_checkPtr(newGlobalDOFID) ) ) {
54 #pragma omp parallel private(i)
55 {
56 #pragma omp for schedule(static)
57 for(i=0;i<myNumVertices;++i) {
58 index_list[i].extension=NULL;
59 index_list[i].n=0;
60 }
61 /* insert contributions from element matrices into colums index index_list: */
62 Finley_IndexList_insertElementsWithRowRange(index_list, myFirstVertex, myLastVertex,
63 in->Elements,in->Nodes->globalDegreesOfFreedom,
64 in->Nodes->globalDegreesOfFreedom);
65 Finley_IndexList_insertElementsWithRowRange(index_list, myFirstVertex, myLastVertex,
66 in->FaceElements,in->Nodes->globalDegreesOfFreedom,
67 in->Nodes->globalDegreesOfFreedom);
68 Finley_IndexList_insertElementsWithRowRange(index_list, myFirstVertex, myLastVertex,
69 in->ContactElements,in->Nodes->globalDegreesOfFreedom,
70 in->Nodes->globalDegreesOfFreedom);
71 Finley_IndexList_insertElementsWithRowRange(index_list, myFirstVertex, myLastVertex,
72 in->Points,in->Nodes->globalDegreesOfFreedom,
73 in->Nodes->globalDegreesOfFreedom);
74 }
75 /* create the local matrix pattern */
76 pattern=Finley_IndexList_createPattern(myNumVertices,index_list,myFirstVertex, myLastVertex,-myFirstVertex);
77
78 /* clean up index list */
79 if (index_list!=NULL) {
80 #pragma omp parallel for private(i)
81 for(i=0;i<myNumVertices;++i) Finley_IndexList_free(index_list[i].extension);
82 }
83
84 if (Finley_noError()) Paso_Pattern_reduceBandwidth(pattern,newGlobalDOFID);
85
86 Paso_Pattern_free(pattern);
87 }
88 Paso_MPIInfo_noError(in->MPIInfo);
89 if (Finley_noError()) {
90 /* shift new labeling to create a global id */
91 #pragma omp parallel for private(i)
92 for (i=0;i<myNumVertices;++i) newGlobalDOFID[i]+=myFirstVertex;
93
94
95 /* distribute new labeling to other processors */
96 dest=Paso_MPIInfo_mod(mpiSize, myRank + 1);
97 source=Paso_MPIInfo_mod(mpiSize, myRank - 1);
98 current_rank=myRank;
99 for (p=0; p< mpiSize; ++p) {
100 firstVertex=distribution[current_rank];
101 lastVertex=distribution[current_rank+1];
102 #pragma omp parallel for private(i,k)
103 for (i=0;i<in->Nodes->numNodes;++i) {
104 k=in->Nodes->globalDegreesOfFreedom[i];
105 if ( (firstVertex<=k) && (k<lastVertex)) {
106 in->Nodes->globalDegreesOfFreedom[i]=newGlobalDOFID[k-firstVertex];
107 }
108 }
109
110 if (p<mpiSize-1) { /* the final send can be skipped */
111 #ifdef PASO_MPI
112 MPI_Sendrecv_replace(newGlobalDOFID,len, MPI_INT,
113 dest, in->MPIInfo->msg_tag_counter,
114 source, in->MPIInfo->msg_tag_counter,
115 in->MPIInfo->comm,&status);
116 #endif
117 in->MPIInfo->msg_tag_counter++;
118 current_rank=Paso_MPIInfo_mod(mpiSize, current_rank-1);
119 }
120 }
121 }
122 TMPMEMFREE(index_list);
123 TMPMEMFREE(newGlobalDOFID);
124 #if 0
125 for (i=0;i<in->Nodes->numNodes;++i) printf("%d ",in->Nodes->globalDegreesOfFreedom[i]);
126 printf("\n");
127 #endif
128 return;
129 }

  ViewVC Help
Powered by ViewVC 1.1.26