/[escript]/trunk-mpi-branch/finley/src/NodeFile_gather.c
ViewVC logotype

Contents of /trunk-mpi-branch/finley/src/NodeFile_gather.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1249 - (show annotations)
Thu Aug 16 00:47:44 2007 UTC (11 years, 8 months ago) by ksteube
File MIME type: text/plain
File size: 9404 byte(s)
Fixed Paso_MPIInfo_mod for n=1 (single CPU).
Removed four unit tests relating to Interval() object.
Removed unit test file escript/test/DataProfTestCase.cpp.

1 /*
2 ************************************************************
3 * Copyright 2006 by ACcESS MNRF *
4 * *
5 * http://www.access.edu.au *
6 * Primary Business: Queensland, Australia *
7 * Licensed under the Open Software License version 3.0 *
8 * http://www.opensource.org/licenses/osl-3.0.php *
9 * *
10 ************************************************************
11 */
12
13 /**************************************************************/
14
15 /* Finley: Mesh: NodeFile */
16
17 /* gathers the NodeFile out from the NodeFile in using the entries
18 /* in index[0:out->numNodes-1] which are between min_index and max_index (exclusive) */
19 /* the node index[i]
20
21 /**************************************************************/
22
23 /* Author: gross@access.edu.au */
24 /* Version: $Id$ */
25
26 /**************************************************************/
27
28 #include "NodeFile.h"
29
30 /**************************************************************/
31
32 void Finley_NodeFile_gatherEntries(dim_t n, index_t* index, index_t min_index, index_t max_index,
33 index_t* Id_out, index_t* Id_in,
34 index_t* Tag_out, index_t* Tag_in,
35 index_t* globalDegreesOfFreedom_out, index_t* globalDegreesOfFreedom_in,
36 dim_t numDim, double* Coordinates_out, double* Coordinates_in)
37 {
38 dim_t i,j;
39 register index_t k;
40 register const index_t range=max_index-min_index;
41 const size_t numDim_size=(size_t)numDim*sizeof(double);
42
43 #pragma omp parallel for private(i,j,k) schedule(static)
44 for (i=0;i<n;i++) {
45 k=index[i]-min_index;
46 if ((k>=0) && (k <range)) {
47 Id_out[i]=Id_in[k];
48 Tag_out[i]=Tag_in[k];
49 globalDegreesOfFreedom_out[i]=globalDegreesOfFreedom_in[k];
50 memcpy(&(Coordinates_out[INDEX2(0,i,numDim)]), &(Coordinates_in[INDEX2(0,k,numDim)]), numDim_size);
51 }
52 }
53 }
54
55 void Finley_NodeFile_gather(index_t* index, Finley_NodeFile* in, Finley_NodeFile* out)
56 {
57 Finley_NodeFile_gatherEntries(out->numNodes, index, 0, in->numNodes,
58 out->Id, in->Id,
59 out->Tag, in->Tag,
60 out->globalDegreesOfFreedom, in->globalDegreesOfFreedom,
61 out->numDim, out->Coordinates, in->Coordinates);
62 out->isPrepared=FINLEY_UNPREPARED;
63 }
64
65 void Finley_NodeFile_gather_global(index_t* index, Finley_NodeFile* in, Finley_NodeFile* out)
66 {
67 index_t min_id, max_id, undefined_node;
68 Paso_MPI_rank buffer_rank, dest, source, *distribution=NULL;
69 index_t *Id_buffer=NULL, *Tag_buffer=NULL, *globalDegreesOfFreedom_buffer=NULL;
70 double* Coordinates_buffer=NULL;
71 dim_t p, buffer_len,n;
72 char error_msg[100];
73 #ifdef PASO_MPI
74 MPI_Status status;
75 #endif
76
77 /* get the global range of node ids */
78 Finley_NodeFile_setGlobalIdRange(&min_id,&max_id,in);
79 undefined_node=min_id-1;
80
81 distribution=TMPMEMALLOC(in->MPIInfo->size+1, index_t);
82
83 if ( !Finley_checkPtr(distribution) ) {
84 /* distribute the range of node ids */
85 buffer_len=Paso_MPIInfo_setDistribution(in->MPIInfo,min_id,max_id,distribution);
86 /* allocate buffers */
87 Id_buffer=TMPMEMALLOC(buffer_len,index_t);
88 Tag_buffer=TMPMEMALLOC(buffer_len,index_t);
89 globalDegreesOfFreedom_buffer=TMPMEMALLOC(buffer_len,index_t);
90 Coordinates_buffer=TMPMEMALLOC(buffer_len*out->numDim,double);
91 if (! (Finley_checkPtr(Id_buffer) || Finley_checkPtr(Tag_buffer) ||
92 Finley_checkPtr(globalDegreesOfFreedom_buffer) || Finley_checkPtr(Coordinates_buffer) ) ) {
93 /* fill Id_buffer by the undefined_node marker to check if nodes are defined */
94 #pragma omp parallel for private(n) schedule(static)
95 for (n=0;n<buffer_len;n++) Id_buffer[n]=undefined_node;
96
97 /* fill the buffer by sending portions around in a circle */
98 dest=Paso_MPIInfo_mod(in->MPIInfo->size, in->MPIInfo->rank + 1);
99 source=Paso_MPIInfo_mod(in->MPIInfo->size, in->MPIInfo->rank - 1);
100 buffer_rank=in->MPIInfo->rank;
101 for (p=0; p< in->MPIInfo->size; ++p) {
102 if (p>0) { /* the initial send can be skipped */
103 #ifdef PASO_MPI
104 MPI_Sendrecv_replace(Id_buffer, buffer_len, MPI_INT,
105 dest, in->MPIInfo->msg_tag_counter, source, in->MPIInfo->msg_tag_counter,
106 in->MPIInfo->comm,&status);
107 MPI_Sendrecv_replace(Tag_buffer, buffer_len, MPI_INT,
108 dest, in->MPIInfo->msg_tag_counter+1, source, in->MPIInfo->msg_tag_counter+1,
109 in->MPIInfo->comm,&status);
110 MPI_Sendrecv_replace(globalDegreesOfFreedom_buffer, buffer_len, MPI_INT,
111 dest, in->MPIInfo->msg_tag_counter+2, source, in->MPIInfo->msg_tag_counter+2,
112 in->MPIInfo->comm,&status);
113 MPI_Sendrecv_replace(Coordinates_buffer, buffer_len*out->numDim, MPI_DOUBLE,
114 dest, in->MPIInfo->msg_tag_counter+3, source, in->MPIInfo->msg_tag_counter+3,
115 in->MPIInfo->comm,&status);
116 #endif
117 in->MPIInfo->msg_tag_counter+=4;
118 }
119 buffer_rank=Paso_MPIInfo_mod(in->MPIInfo->size, buffer_rank-1);
120 Finley_NodeFile_scatterEntries(in->numNodes, in->Id,
121 distribution[buffer_rank], distribution[buffer_rank+1],
122 Id_buffer, in->Id,
123 Tag_buffer, in->Tag,
124 globalDegreesOfFreedom_buffer, in->globalDegreesOfFreedom,
125 out->numDim, Coordinates_buffer, in->Coordinates);
126 }
127 /* now entries are collected from the buffer again by sending the entries around in a circle */
128 dest=Paso_MPIInfo_mod(in->MPIInfo->size, in->MPIInfo->rank + 1);
129 source=Paso_MPIInfo_mod(in->MPIInfo->size, in->MPIInfo->rank - 1);
130 buffer_rank=in->MPIInfo->rank;
131 for (p=0; p< in->MPIInfo->size; ++p) {
132 Finley_NodeFile_gatherEntries(out->numNodes, index,
133 distribution[buffer_rank], distribution[buffer_rank+1],
134 out->Id, Id_buffer,
135 out->Tag, Tag_buffer,
136 out->globalDegreesOfFreedom, globalDegreesOfFreedom_buffer,
137 out->numDim, out->Coordinates, Coordinates_buffer);
138 if (p<in->MPIInfo->size-1) { /* the last send can be skipped */
139 #ifdef PASO_MPI
140 MPI_Sendrecv_replace(Id_buffer, buffer_len, MPI_INT,
141 dest, in->MPIInfo->msg_tag_counter, source, in->MPIInfo->msg_tag_counter,
142 in->MPIInfo->comm,&status);
143 MPI_Sendrecv_replace(Tag_buffer, buffer_len, MPI_INT,
144 dest, in->MPIInfo->msg_tag_counter+1, source, in->MPIInfo->msg_tag_counter+1,
145 in->MPIInfo->comm,&status);
146 MPI_Sendrecv_replace(globalDegreesOfFreedom_buffer, buffer_len, MPI_INT,
147 dest, in->MPIInfo->msg_tag_counter+2, source, in->MPIInfo->msg_tag_counter+2,
148 in->MPIInfo->comm,&status);
149 MPI_Sendrecv_replace(Coordinates_buffer, buffer_len*out->numDim, MPI_DOUBLE,
150 dest, in->MPIInfo->msg_tag_counter+3, source, in->MPIInfo->msg_tag_counter+3,
151 in->MPIInfo->comm,&status);
152 #endif
153 in->MPIInfo->msg_tag_counter+=4;
154 }
155 buffer_rank=Paso_MPIInfo_mod(in->MPIInfo->size, buffer_rank-1);
156 }
157 /* check if all nodes are set: */
158 #pragma omp parallel for private(n) schedule(static)
159 for (n=0; n< out->numNodes; ++n) {
160 if (out->Id[n] == undefined_node ) {
161 sprintf(error_msg,"Finley_NodeFile_gather_global: Node id %d is referenced by an element (n=%d) but is not defined.",out->Id[n], n);
162 Finley_setError(VALUE_ERROR,error_msg);
163 }
164 }
165
166 }
167 TMPMEMFREE(Id_buffer);
168 TMPMEMFREE(Tag_buffer);
169 TMPMEMFREE(globalDegreesOfFreedom_buffer);
170 TMPMEMFREE(Coordinates_buffer);
171 }
172 TMPMEMFREE(distribution);
173 out->isPrepared=FINLEY_UNPREPARED;
174 /* make sure that the error is global */
175 Paso_MPIInfo_noError(in->MPIInfo);
176 }

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26