/[escript]/branches/arrayview_from_1695_trunk/finley/src/NodeFile_gather.c
ViewVC logotype

Contents of /branches/arrayview_from_1695_trunk/finley/src/NodeFile_gather.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1781 - (show annotations)
Thu Sep 11 05:03:14 2008 UTC (10 years, 11 months ago) by jfenwick
File MIME type: text/plain
File size: 9121 byte(s)
Branch commit

Merged changes from trunk version 1695 up to and including version 1779.


1
2 /* $Id$ */
3
4 /*******************************************************
5 *
6 * Copyright 2003-2007 by ACceSS MNRF
7 * Copyright 2007 by University of Queensland
8 *
9 * http://esscc.uq.edu.au
10 * Primary Business: Queensland, Australia
11 * Licensed under the Open Software License version 3.0
12 * http://www.opensource.org/licenses/osl-3.0.php
13 *
14 *******************************************************/
15
16 /**************************************************************
17 *
18 * Finley: Mesh: NodeFile
19 * gathers the NodeFile out from the NodeFile in using the entries
20 * in index[0:out->numNodes-1] which are between min_index and max_index (exclusive)
21 * the node index[i]
22 *
23 **************************************************************/
24
25 #include "NodeFile.h"
26
27 /**************************************************************/
28
29 void Finley_NodeFile_gatherEntries(dim_t n, index_t* index, index_t min_index, index_t max_index,
30 index_t* Id_out, index_t* Id_in,
31 index_t* Tag_out, index_t* Tag_in,
32 index_t* globalDegreesOfFreedom_out, index_t* globalDegreesOfFreedom_in,
33 dim_t numDim, double* Coordinates_out, double* Coordinates_in)
34 {
35 dim_t i;
36 register index_t k;
37 const index_t range=max_index-min_index;
38 const size_t numDim_size=(size_t)numDim*sizeof(double);
39 #pragma omp parallel for private(i,k) schedule(static)
40 for (i=0;i<n;i++) {
41 k=index[i]-min_index;
42 if ((k>=0) && (k <range)) {
43 Id_out[i]=Id_in[k];
44 Tag_out[i]=Tag_in[k];
45 globalDegreesOfFreedom_out[i]=globalDegreesOfFreedom_in[k];
46 memcpy(&(Coordinates_out[INDEX2(0,i,numDim)]), &(Coordinates_in[INDEX2(0,k,numDim)]), numDim_size);
47 }
48 }
49 }
50
51 void Finley_NodeFile_gather(index_t* index, Finley_NodeFile* in, Finley_NodeFile* out)
52 {
53 index_t min_id, max_id;
54 Finley_NodeFile_setGlobalIdRange(&min_id,&max_id,in);
55 Finley_NodeFile_gatherEntries(out->numNodes, index, min_id, max_id,
56 out->Id, in->Id,
57 out->Tag, in->Tag,
58 out->globalDegreesOfFreedom, in->globalDegreesOfFreedom,
59 out->numDim, out->Coordinates, in->Coordinates);
60 }
61
62 void Finley_NodeFile_gather_global(index_t* index, Finley_NodeFile* in, Finley_NodeFile* out)
63 {
64 index_t min_id, max_id, undefined_node;
65 Paso_MPI_rank buffer_rank, dest, source, *distribution=NULL;
66 index_t *Id_buffer=NULL, *Tag_buffer=NULL, *globalDegreesOfFreedom_buffer=NULL;
67 double* Coordinates_buffer=NULL;
68 dim_t p, buffer_len,n;
69 char error_msg[100];
70 #ifdef PASO_MPI
71 MPI_Status status;
72 #endif
73
74 /* get the global range of node ids */
75 Finley_NodeFile_setGlobalIdRange(&min_id,&max_id,in);
76 undefined_node=min_id-1;
77
78 distribution=TMPMEMALLOC(in->MPIInfo->size+1, index_t);
79
80 if ( !Finley_checkPtr(distribution) ) {
81 /* distribute the range of node ids */
82 buffer_len=Paso_MPIInfo_setDistribution(in->MPIInfo,min_id,max_id,distribution);
83 /* allocate buffers */
84 Id_buffer=TMPMEMALLOC(buffer_len,index_t);
85 Tag_buffer=TMPMEMALLOC(buffer_len,index_t);
86 globalDegreesOfFreedom_buffer=TMPMEMALLOC(buffer_len,index_t);
87 Coordinates_buffer=TMPMEMALLOC(buffer_len*out->numDim,double);
88 if (! (Finley_checkPtr(Id_buffer) || Finley_checkPtr(Tag_buffer) ||
89 Finley_checkPtr(globalDegreesOfFreedom_buffer) || Finley_checkPtr(Coordinates_buffer) ) ) {
90 /* fill Id_buffer by the undefined_node marker to check if nodes are defined */
91 #pragma omp parallel for private(n) schedule(static)
92 for (n=0;n<buffer_len;n++) Id_buffer[n]=undefined_node;
93
94 /* fill the buffer by sending portions around in a circle */
95 dest=Paso_MPIInfo_mod(in->MPIInfo->size, in->MPIInfo->rank + 1);
96 source=Paso_MPIInfo_mod(in->MPIInfo->size, in->MPIInfo->rank - 1);
97 buffer_rank=in->MPIInfo->rank;
98 for (p=0; p< in->MPIInfo->size; ++p) {
99 if (p>0) { /* the initial send can be skipped */
100 #ifdef PASO_MPI
101 MPI_Sendrecv_replace(Id_buffer, buffer_len, MPI_INT,
102 dest, in->MPIInfo->msg_tag_counter, source, in->MPIInfo->msg_tag_counter,
103 in->MPIInfo->comm,&status);
104 MPI_Sendrecv_replace(Tag_buffer, buffer_len, MPI_INT,
105 dest, in->MPIInfo->msg_tag_counter+1, source, in->MPIInfo->msg_tag_counter+1,
106 in->MPIInfo->comm,&status);
107 MPI_Sendrecv_replace(globalDegreesOfFreedom_buffer, buffer_len, MPI_INT,
108 dest, in->MPIInfo->msg_tag_counter+2, source, in->MPIInfo->msg_tag_counter+2,
109 in->MPIInfo->comm,&status);
110 MPI_Sendrecv_replace(Coordinates_buffer, buffer_len*out->numDim, MPI_DOUBLE,
111 dest, in->MPIInfo->msg_tag_counter+3, source, in->MPIInfo->msg_tag_counter+3,
112 in->MPIInfo->comm,&status);
113 #endif
114 in->MPIInfo->msg_tag_counter+=4;
115 }
116 buffer_rank=Paso_MPIInfo_mod(in->MPIInfo->size, buffer_rank-1);
117 Finley_NodeFile_scatterEntries(in->numNodes, in->Id,
118 distribution[buffer_rank], distribution[buffer_rank+1],
119 Id_buffer, in->Id,
120 Tag_buffer, in->Tag,
121 globalDegreesOfFreedom_buffer, in->globalDegreesOfFreedom,
122 out->numDim, Coordinates_buffer, in->Coordinates);
123 }
124 /* now entries are collected from the buffer again by sending the entries around in a circle */
125 dest=Paso_MPIInfo_mod(in->MPIInfo->size, in->MPIInfo->rank + 1);
126 source=Paso_MPIInfo_mod(in->MPIInfo->size, in->MPIInfo->rank - 1);
127 buffer_rank=in->MPIInfo->rank;
128 for (p=0; p< in->MPIInfo->size; ++p) {
129 Finley_NodeFile_gatherEntries(out->numNodes, index,
130 distribution[buffer_rank], distribution[buffer_rank+1],
131 out->Id, Id_buffer,
132 out->Tag, Tag_buffer,
133 out->globalDegreesOfFreedom, globalDegreesOfFreedom_buffer,
134 out->numDim, out->Coordinates, Coordinates_buffer);
135 if (p<in->MPIInfo->size-1) { /* the last send can be skipped */
136 #ifdef PASO_MPI
137 MPI_Sendrecv_replace(Id_buffer, buffer_len, MPI_INT,
138 dest, in->MPIInfo->msg_tag_counter, source, in->MPIInfo->msg_tag_counter,
139 in->MPIInfo->comm,&status);
140 MPI_Sendrecv_replace(Tag_buffer, buffer_len, MPI_INT,
141 dest, in->MPIInfo->msg_tag_counter+1, source, in->MPIInfo->msg_tag_counter+1,
142 in->MPIInfo->comm,&status);
143 MPI_Sendrecv_replace(globalDegreesOfFreedom_buffer, buffer_len, MPI_INT,
144 dest, in->MPIInfo->msg_tag_counter+2, source, in->MPIInfo->msg_tag_counter+2,
145 in->MPIInfo->comm,&status);
146 MPI_Sendrecv_replace(Coordinates_buffer, buffer_len*out->numDim, MPI_DOUBLE,
147 dest, in->MPIInfo->msg_tag_counter+3, source, in->MPIInfo->msg_tag_counter+3,
148 in->MPIInfo->comm,&status);
149 #endif
150 in->MPIInfo->msg_tag_counter+=4;
151 }
152 buffer_rank=Paso_MPIInfo_mod(in->MPIInfo->size, buffer_rank-1);
153 }
154 /* check if all nodes are set: */
155 #pragma omp parallel for private(n) schedule(static)
156 for (n=0; n< out->numNodes; ++n) {
157 if (out->Id[n] == undefined_node ) {
158 sprintf(error_msg,"Finley_NodeFile_gather_global: Node id %d at position %d is referenced but is not defined.",out->Id[n],n);
159 Finley_setError(VALUE_ERROR,error_msg);
160 }
161 }
162
163 }
164 TMPMEMFREE(Id_buffer);
165 TMPMEMFREE(Tag_buffer);
166 TMPMEMFREE(globalDegreesOfFreedom_buffer);
167 TMPMEMFREE(Coordinates_buffer);
168 }
169 TMPMEMFREE(distribution);
170 /* make sure that the error is global */
171 Paso_MPIInfo_noError(in->MPIInfo);
172 }

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26