/[escript]/trunk/finley/src/NodeFile_gather.c
ViewVC logotype

Contents of /trunk/finley/src/NodeFile_gather.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2548 - (show annotations)
Mon Jul 20 06:20:06 2009 UTC (10 years, 3 months ago) by jfenwick
File MIME type: text/plain
File size: 9086 byte(s)
Updating copyright notices
1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2009 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 /**************************************************************
16 *
17 * Finley: Mesh: NodeFile
18 * gathers the NodeFile out from the NodeFile in using the entries
19 * in index[0:out->numNodes-1] which are between min_index and max_index (exclusive)
20 * the node index[i]
21 *
22 **************************************************************/
23
24 #include "NodeFile.h"
25
26 /**************************************************************/
27
28 void Finley_NodeFile_gatherEntries(dim_t n, index_t* index, index_t min_index, index_t max_index,
29 index_t* Id_out, index_t* Id_in,
30 index_t* Tag_out, index_t* Tag_in,
31 index_t* globalDegreesOfFreedom_out, index_t* globalDegreesOfFreedom_in,
32 dim_t numDim, double* Coordinates_out, double* Coordinates_in)
33 {
34 dim_t i;
35 register index_t k;
36 const index_t range=max_index-min_index;
37 const size_t numDim_size=(size_t)numDim*sizeof(double);
38 #pragma omp parallel for private(i,k) schedule(static)
39 for (i=0;i<n;i++) {
40 k=index[i]-min_index;
41 if ((k>=0) && (k <range)) {
42 Id_out[i]=Id_in[k];
43 Tag_out[i]=Tag_in[k];
44 globalDegreesOfFreedom_out[i]=globalDegreesOfFreedom_in[k];
45 memcpy(&(Coordinates_out[INDEX2(0,i,numDim)]), &(Coordinates_in[INDEX2(0,k,numDim)]), numDim_size);
46 }
47 }
48 }
49
50 void Finley_NodeFile_gather(index_t* index, Finley_NodeFile* in, Finley_NodeFile* out)
51 {
52 index_t min_id, max_id;
53 Finley_NodeFile_setGlobalIdRange(&min_id,&max_id,in);
54 Finley_NodeFile_gatherEntries(out->numNodes, index, min_id, max_id,
55 out->Id, in->Id,
56 out->Tag, in->Tag,
57 out->globalDegreesOfFreedom, in->globalDegreesOfFreedom,
58 out->numDim, out->Coordinates, in->Coordinates);
59 }
60
61 void Finley_NodeFile_gather_global(index_t* index, Finley_NodeFile* in, Finley_NodeFile* out)
62 {
63 index_t min_id, max_id, undefined_node;
64 Paso_MPI_rank buffer_rank, dest, source, *distribution=NULL;
65 index_t *Id_buffer=NULL, *Tag_buffer=NULL, *globalDegreesOfFreedom_buffer=NULL;
66 double* Coordinates_buffer=NULL;
67 dim_t p, buffer_len,n;
68 char error_msg[100];
69 #ifdef PASO_MPI
70 MPI_Status status;
71 #endif
72
73 /* get the global range of node ids */
74 Finley_NodeFile_setGlobalIdRange(&min_id,&max_id,in);
75 undefined_node=min_id-1;
76
77 distribution=TMPMEMALLOC(in->MPIInfo->size+1, index_t);
78
79 if ( !Finley_checkPtr(distribution) ) {
80 /* distribute the range of node ids */
81 buffer_len=Paso_MPIInfo_setDistribution(in->MPIInfo,min_id,max_id,distribution);
82 /* allocate buffers */
83 Id_buffer=TMPMEMALLOC(buffer_len,index_t);
84 Tag_buffer=TMPMEMALLOC(buffer_len,index_t);
85 globalDegreesOfFreedom_buffer=TMPMEMALLOC(buffer_len,index_t);
86 Coordinates_buffer=TMPMEMALLOC(buffer_len*out->numDim,double);
87 if (! (Finley_checkPtr(Id_buffer) || Finley_checkPtr(Tag_buffer) ||
88 Finley_checkPtr(globalDegreesOfFreedom_buffer) || Finley_checkPtr(Coordinates_buffer) ) ) {
89 /* fill Id_buffer by the undefined_node marker to check if nodes are defined */
90 #pragma omp parallel for private(n) schedule(static)
91 for (n=0;n<buffer_len;n++) Id_buffer[n]=undefined_node;
92
93 /* fill the buffer by sending portions around in a circle */
94 dest=Paso_MPIInfo_mod(in->MPIInfo->size, in->MPIInfo->rank + 1);
95 source=Paso_MPIInfo_mod(in->MPIInfo->size, in->MPIInfo->rank - 1);
96 buffer_rank=in->MPIInfo->rank;
97 for (p=0; p< in->MPIInfo->size; ++p) {
98 if (p>0) { /* the initial send can be skipped */
99 #ifdef PASO_MPI
100 MPI_Sendrecv_replace(Id_buffer, buffer_len, MPI_INT,
101 dest, in->MPIInfo->msg_tag_counter, source, in->MPIInfo->msg_tag_counter,
102 in->MPIInfo->comm,&status);
103 MPI_Sendrecv_replace(Tag_buffer, buffer_len, MPI_INT,
104 dest, in->MPIInfo->msg_tag_counter+1, source, in->MPIInfo->msg_tag_counter+1,
105 in->MPIInfo->comm,&status);
106 MPI_Sendrecv_replace(globalDegreesOfFreedom_buffer, buffer_len, MPI_INT,
107 dest, in->MPIInfo->msg_tag_counter+2, source, in->MPIInfo->msg_tag_counter+2,
108 in->MPIInfo->comm,&status);
109 MPI_Sendrecv_replace(Coordinates_buffer, buffer_len*out->numDim, MPI_DOUBLE,
110 dest, in->MPIInfo->msg_tag_counter+3, source, in->MPIInfo->msg_tag_counter+3,
111 in->MPIInfo->comm,&status);
112 #endif
113 in->MPIInfo->msg_tag_counter+=4;
114 }
115 buffer_rank=Paso_MPIInfo_mod(in->MPIInfo->size, buffer_rank-1);
116 Finley_NodeFile_scatterEntries(in->numNodes, in->Id,
117 distribution[buffer_rank], distribution[buffer_rank+1],
118 Id_buffer, in->Id,
119 Tag_buffer, in->Tag,
120 globalDegreesOfFreedom_buffer, in->globalDegreesOfFreedom,
121 out->numDim, Coordinates_buffer, in->Coordinates);
122 }
123 /* now entries are collected from the buffer again by sending the entries around in a circle */
124 dest=Paso_MPIInfo_mod(in->MPIInfo->size, in->MPIInfo->rank + 1);
125 source=Paso_MPIInfo_mod(in->MPIInfo->size, in->MPIInfo->rank - 1);
126 buffer_rank=in->MPIInfo->rank;
127 for (p=0; p< in->MPIInfo->size; ++p) {
128 Finley_NodeFile_gatherEntries(out->numNodes, index,
129 distribution[buffer_rank], distribution[buffer_rank+1],
130 out->Id, Id_buffer,
131 out->Tag, Tag_buffer,
132 out->globalDegreesOfFreedom, globalDegreesOfFreedom_buffer,
133 out->numDim, out->Coordinates, Coordinates_buffer);
134 if (p<in->MPIInfo->size-1) { /* the last send can be skipped */
135 #ifdef PASO_MPI
136 MPI_Sendrecv_replace(Id_buffer, buffer_len, MPI_INT,
137 dest, in->MPIInfo->msg_tag_counter, source, in->MPIInfo->msg_tag_counter,
138 in->MPIInfo->comm,&status);
139 MPI_Sendrecv_replace(Tag_buffer, buffer_len, MPI_INT,
140 dest, in->MPIInfo->msg_tag_counter+1, source, in->MPIInfo->msg_tag_counter+1,
141 in->MPIInfo->comm,&status);
142 MPI_Sendrecv_replace(globalDegreesOfFreedom_buffer, buffer_len, MPI_INT,
143 dest, in->MPIInfo->msg_tag_counter+2, source, in->MPIInfo->msg_tag_counter+2,
144 in->MPIInfo->comm,&status);
145 MPI_Sendrecv_replace(Coordinates_buffer, buffer_len*out->numDim, MPI_DOUBLE,
146 dest, in->MPIInfo->msg_tag_counter+3, source, in->MPIInfo->msg_tag_counter+3,
147 in->MPIInfo->comm,&status);
148 #endif
149 in->MPIInfo->msg_tag_counter+=4;
150 }
151 buffer_rank=Paso_MPIInfo_mod(in->MPIInfo->size, buffer_rank-1);
152 }
153 /* check if all nodes are set: */
154 #pragma omp parallel for private(n) schedule(static)
155 for (n=0; n< out->numNodes; ++n) {
156 if (out->Id[n] == undefined_node ) {
157 sprintf(error_msg,"Finley_NodeFile_gather_global: Node id %d at position %d is referenced but is not defined.",out->Id[n],n);
158 Finley_setError(VALUE_ERROR,error_msg);
159 }
160 }
161
162 }
163 TMPMEMFREE(Id_buffer);
164 TMPMEMFREE(Tag_buffer);
165 TMPMEMFREE(globalDegreesOfFreedom_buffer);
166 TMPMEMFREE(Coordinates_buffer);
167 }
168 TMPMEMFREE(distribution);
169 /* make sure that the error is global */
170 Paso_MPIInfo_noError(in->MPIInfo);
171 }

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26