/[escript]/branches/trilinos_from_5897/dudley/src/NodeFile_createDenseLabelings.cpp
ViewVC logotype

Contents of /branches/trilinos_from_5897/dudley/src/NodeFile_createDenseLabelings.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 6009 - (show annotations)
Wed Mar 2 04:13:26 2016 UTC (3 years, 1 month ago) by caltinay
File size: 20707 byte(s)
Much needed sync with trunk...

1
2 /*****************************************************************************
3 *
4 * Copyright (c) 2003-2016 by The University of Queensland
5 * http://www.uq.edu.au
6 *
7 * Primary Business: Queensland, Australia
8 * Licensed under the Open Software License version 3.0
9 * http://www.opensource.org/licenses/osl-3.0.php
10 *
11 * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12 * Development 2012-2013 by School of Earth Sciences
13 * Development from 2014 by Centre for Geoscience Computing (GeoComp)
14 *
15 *****************************************************************************/
16
17 /****************************************************************************/
18
19 /* Dudley: Mesh: NodeFile */
20
21 /* creates a dense labeling of the global degrees of freedom */
22 /* and returns the new number of global degrees of freedom */
23
24 /****************************************************************************/
25
26 #include "NodeFile.h"
27
28 namespace dudley {
29
30 dim_t Dudley_NodeFile_createDenseDOFLabeling(Dudley_NodeFile* in)
31 {
32 index_t min_dof, max_dof, unset_dof = -1, set_dof = 1, dof_0, dof_1, *DOF_buffer = NULL, k;
33 int buffer_rank, *distribution = NULL;
34 dim_t p, buffer_len, n, myDOFs, *offsets = NULL, *loc_offsets = NULL, new_numGlobalDOFs = 0, myNewDOFs;
35 bool *set_new_DOF = NULL;
36 #ifdef ESYS_MPI
37 int dest, source;
38 MPI_Status status;
39 #endif
40
41 /* get the global range of node ids */
42 Dudley_NodeFile_setGlobalDOFRange(&min_dof, &max_dof, in);
43
44 distribution = new index_t[in->MPIInfo->size + 1];
45 offsets = new dim_t[in->MPIInfo->size];
46 loc_offsets = new dim_t[in->MPIInfo->size];
47 set_new_DOF = new bool[in->numNodes];
48
49 /* distribute the range of node ids */
50 buffer_len = in->MPIInfo->setDistribution(min_dof, max_dof, distribution);
51 myDOFs = distribution[in->MPIInfo->rank + 1] - distribution[in->MPIInfo->rank];
52 /* allocate buffers */
53 DOF_buffer = new index_t[buffer_len];
54 /* fill DOF_buffer by the unset_dof marker to check if nodes are defined */
55 #pragma omp parallel for private(n) schedule(static)
56 for (n = 0; n < buffer_len; n++)
57 DOF_buffer[n] = unset_dof;
58
59 /* fill the buffer by sending portions around in a circle */
60 #ifdef ESYS_MPI
61 dest = in->MPIInfo->mod_rank(in->MPIInfo->rank + 1);
62 source = in->MPIInfo->mod_rank(in->MPIInfo->rank - 1);
63 #endif
64 buffer_rank = in->MPIInfo->rank;
65 for (p = 0; p < in->MPIInfo->size; ++p) {
66 if (p > 0) { /* the initial send can be skipped */
67 #ifdef ESYS_MPI
68 MPI_Sendrecv_replace(DOF_buffer, buffer_len, MPI_INT,
69 dest, in->MPIInfo->counter(), source, in->MPIInfo->counter(),
70 in->MPIInfo->comm, &status);
71 in->MPIInfo->incCounter();
72 #endif
73 }
74 buffer_rank = in->MPIInfo->mod_rank(buffer_rank - 1);
75 dof_0 = distribution[buffer_rank];
76 dof_1 = distribution[buffer_rank + 1];
77 #pragma omp parallel for private(n,k) schedule(static)
78 for (n = 0; n < in->numNodes; n++) {
79 k = in->globalDegreesOfFreedom[n];
80 if ((dof_0 <= k) && (k < dof_1)) {
81 DOF_buffer[k - dof_0] = set_dof;
82 }
83 }
84 }
85 /* count the entries in the DOF_buffer */
86 /* TODO: OMP parallel */
87 myNewDOFs = 0;
88 for (n = 0; n < myDOFs; ++n) {
89 if (DOF_buffer[n] == set_dof) {
90 DOF_buffer[n] = myNewDOFs;
91 myNewDOFs++;
92 }
93 }
94 memset(loc_offsets, 0, in->MPIInfo->size * sizeof(dim_t));
95 loc_offsets[in->MPIInfo->rank] = myNewDOFs;
96 #ifdef ESYS_MPI
97 MPI_Allreduce(loc_offsets, offsets, in->MPIInfo->size, MPI_INT, MPI_SUM, in->MPIInfo->comm);
98 new_numGlobalDOFs = 0;
99 for (n = 0; n < in->MPIInfo->size; ++n) {
100 loc_offsets[n] = new_numGlobalDOFs;
101 new_numGlobalDOFs += offsets[n];
102 }
103 #else
104 new_numGlobalDOFs = loc_offsets[0];
105 loc_offsets[0] = 0;
106 #endif
107 #pragma omp parallel
108 {
109 #pragma omp for private(n) schedule(static)
110 for (n = 0; n < myDOFs; ++n)
111 DOF_buffer[n] += loc_offsets[in->MPIInfo->rank];
112 /* now entries are collected from the buffer again by sending the entries around in a circle */
113 #pragma omp for private(n) schedule(static)
114 for (n = 0; n < in->numNodes; ++n)
115 set_new_DOF[n] = true;
116 }
117 #ifdef ESYS_MPI
118 dest = in->MPIInfo->mod_rank(in->MPIInfo->rank + 1);
119 source = in->MPIInfo->mod_rank(in->MPIInfo->rank - 1);
120 #endif
121 buffer_rank = in->MPIInfo->rank;
122 for (p = 0; p < in->MPIInfo->size; ++p) {
123 dof_0 = distribution[buffer_rank];
124 dof_1 = distribution[buffer_rank + 1];
125 #pragma omp parallel for private(n,k) schedule(static)
126 for (n = 0; n < in->numNodes; n++) {
127 k = in->globalDegreesOfFreedom[n];
128 if (set_new_DOF[n] && (dof_0 <= k) && (k < dof_1)) {
129 in->globalDegreesOfFreedom[n] = DOF_buffer[k - dof_0];
130 set_new_DOF[n] = false;
131 }
132 }
133 if (p < in->MPIInfo->size - 1) { /* the last send can be skipped */
134 #ifdef ESYS_MPI
135 MPI_Sendrecv_replace(DOF_buffer, buffer_len, MPI_INT,
136 dest, in->MPIInfo->counter(), source, in->MPIInfo->counter(),
137 in->MPIInfo->comm, &status);
138 in->MPIInfo->incCounter();
139 #endif
140 }
141 buffer_rank = in->MPIInfo->mod_rank(buffer_rank - 1);
142 }
143 delete[] DOF_buffer;
144 delete[] distribution;
145 delete[] loc_offsets;
146 delete[] offsets;
147 delete[] set_new_DOF;
148 return new_numGlobalDOFs;
149 }
150
151 void Dudley_NodeFile_assignMPIRankToDOFs(Dudley_NodeFile * in, int * mpiRankOfDOF, index_t * distribution)
152 {
153 index_t min_DOF, max_DOF, k;
154 dim_t n;
155 int p, p_min = in->MPIInfo->size, p_max = -1;
156 /* first we calculate the min and max dof on this processor to reduce costs for searching */
157 Dudley_NodeFile_setDOFRange(&min_DOF, &max_DOF, in);
158
159 for (p = 0; p < in->MPIInfo->size; ++p) {
160 if (distribution[p] <= min_DOF)
161 p_min = p;
162 if (distribution[p] <= max_DOF)
163 p_max = p;
164 }
165 #pragma omp parallel for private(n,k,p) schedule(static)
166 for (n = 0; n < in->numNodes; ++n) {
167 k = in->globalDegreesOfFreedom[n];
168 for (p = p_min; p <= p_max; ++p) {
169 if (k < distribution[p + 1]) {
170 mpiRankOfDOF[n] = p;
171 break;
172 }
173 }
174 }
175 }
176
177 dim_t Dudley_NodeFile_createDenseReducedDOFLabeling(Dudley_NodeFile * in, index_t * reducedNodeMask)
178 {
179 index_t min_dof, max_dof, unset_dof = -1, set_dof = 1, dof_0, dof_1, *DOF_buffer = NULL, k;
180 int buffer_rank, *distribution = NULL;
181 dim_t p, buffer_len, n, myDOFs, *offsets = NULL, *loc_offsets = NULL, globalNumReducedDOFs = 0, myNewDOFs;
182 #ifdef ESYS_MPI
183 int dest, source;
184 MPI_Status status;
185 #endif
186
187 /* get the global range of node ids */
188 Dudley_NodeFile_setGlobalDOFRange(&min_dof, &max_dof, in);
189
190 distribution = new index_t[in->MPIInfo->size + 1];
191 offsets = new dim_t[in->MPIInfo->size];
192 loc_offsets = new dim_t[in->MPIInfo->size];
193
194 /* distribute the range of node ids */
195 buffer_len = in->MPIInfo->setDistribution(min_dof, max_dof, distribution);
196 myDOFs = distribution[in->MPIInfo->rank + 1] - distribution[in->MPIInfo->rank];
197 /* allocate buffers */
198 DOF_buffer = new index_t[buffer_len];
199
200 /* fill DOF_buffer by the unset_dof marker to check if nodes are defined */
201 #pragma omp parallel for private(n) schedule(static)
202 for (n = 0; n < buffer_len; n++)
203 DOF_buffer[n] = unset_dof;
204
205 /* fill the buffer by sending portions around in a circle */
206 #ifdef ESYS_MPI
207 dest = in->MPIInfo->mod_rank(in->MPIInfo->rank + 1);
208 source = in->MPIInfo->mod_rank(in->MPIInfo->rank - 1);
209 #endif
210 buffer_rank = in->MPIInfo->rank;
211 for (p = 0; p < in->MPIInfo->size; ++p) {
212 if (p > 0) { /* the initial send can be skipped */
213 #ifdef ESYS_MPI
214 MPI_Sendrecv_replace(DOF_buffer, buffer_len, MPI_INT,
215 dest, in->MPIInfo->counter(), source, in->MPIInfo->counter(),
216 in->MPIInfo->comm, &status);
217 in->MPIInfo->incCounter();
218 #endif
219 }
220 buffer_rank = in->MPIInfo->mod_rank(buffer_rank - 1);
221 dof_0 = distribution[buffer_rank];
222 dof_1 = distribution[buffer_rank + 1];
223 #pragma omp parallel for private(n,k) schedule(static)
224 for (n = 0; n < in->numNodes; n++) {
225 if (reducedNodeMask[n] > -1) {
226 k = in->globalDegreesOfFreedom[n];
227 if ((dof_0 <= k) && (k < dof_1)) {
228 DOF_buffer[k - dof_0] = set_dof;
229 }
230 }
231 }
232 }
233 /* count the entries in the DOF_buffer */
234 /* TODO: OMP parallel */
235 myNewDOFs = 0;
236 for (n = 0; n < myDOFs; ++n) {
237 if (DOF_buffer[n] == set_dof) {
238 DOF_buffer[n] = myNewDOFs;
239 myNewDOFs++;
240 }
241 }
242 memset(loc_offsets, 0, in->MPIInfo->size * sizeof(dim_t));
243 loc_offsets[in->MPIInfo->rank] = myNewDOFs;
244 #ifdef ESYS_MPI
245 MPI_Allreduce(loc_offsets, offsets, in->MPIInfo->size, MPI_INT, MPI_SUM, in->MPIInfo->comm);
246 globalNumReducedDOFs = 0;
247 for (n = 0; n < in->MPIInfo->size; ++n) {
248 loc_offsets[n] = globalNumReducedDOFs;
249 globalNumReducedDOFs += offsets[n];
250 }
251 #else
252 globalNumReducedDOFs = loc_offsets[0];
253 loc_offsets[0] = 0;
254 #endif
255 #pragma omp parallel for private(n) schedule(static)
256 for (n = 0; n < myDOFs; ++n)
257 DOF_buffer[n] += loc_offsets[in->MPIInfo->rank];
258 /* now entries are collected from the buffer again by sending the entries around in a circle */
259 #pragma omp parallel for private(n) schedule(static)
260 for (n = 0; n < in->numNodes; ++n)
261 in->globalReducedDOFIndex[n] = loc_offsets[0] - 1;
262 #ifdef ESYS_MPI
263 dest = in->MPIInfo->mod_rank(in->MPIInfo->rank + 1);
264 source = in->MPIInfo->mod_rank(in->MPIInfo->rank - 1);
265 #endif
266 buffer_rank = in->MPIInfo->rank;
267 for (p = 0; p < in->MPIInfo->size; ++p) {
268 dof_0 = distribution[buffer_rank];
269 dof_1 = distribution[buffer_rank + 1];
270 #pragma omp parallel for private(n,k) schedule(static)
271 for (n = 0; n < in->numNodes; n++) {
272 if (reducedNodeMask[n] > -1) {
273 k = in->globalDegreesOfFreedom[n];
274 if ((dof_0 <= k) && (k < dof_1))
275 in->globalReducedDOFIndex[n] = DOF_buffer[k - dof_0];
276 }
277 }
278 if (p < in->MPIInfo->size - 1) { /* the last send can be skipped */
279 #ifdef ESYS_MPI
280 MPI_Sendrecv_replace(DOF_buffer, buffer_len, MPI_INT,
281 dest, in->MPIInfo->counter(), source, in->MPIInfo->counter(),
282 in->MPIInfo->comm, &status);
283 in->MPIInfo->incCounter();
284 #endif
285 }
286 buffer_rank = in->MPIInfo->mod_rank(buffer_rank - 1);
287 }
288 delete[] DOF_buffer;
289 delete[] distribution;
290 delete[] loc_offsets;
291 delete[] offsets;
292 return globalNumReducedDOFs;
293 }
294
295 dim_t Dudley_NodeFile_createDenseNodeLabeling(Dudley_NodeFile * in, index_t * node_distribution,
296 const index_t * dof_distribution)
297 {
298 index_t myFirstDOF, myLastDOF, max_id, min_id, loc_max_id, loc_min_id, dof, id, itmp, nodeID_0, nodeID_1, dof_0,
299 dof_1, *Node_buffer = NULL;
300 dim_t n, my_buffer_len, buffer_len, globalNumNodes = 0, myNewNumNodes;
301 int p, buffer_rank;
302 const index_t unset_nodeID = -1, set_nodeID = 1;
303 const dim_t header_len = 2;
304 #ifdef ESYS_MPI
305 int dest, source;
306 MPI_Status status;
307 #endif
308 int myRank = in->MPIInfo->rank;
309
310 /* find the range of node ids controlled by me */
311
312 myFirstDOF = dof_distribution[myRank];
313 myLastDOF = dof_distribution[myRank + 1];
314 max_id = -escript::DataTypes::index_t_max();
315 min_id = escript::DataTypes::index_t_max();
316 #pragma omp parallel private(loc_max_id,loc_min_id)
317 {
318 loc_max_id = max_id;
319 loc_min_id = min_id;
320 #pragma omp for private(n,dof,id) schedule(static)
321 for (n = 0; n < in->numNodes; n++) {
322 dof = in->globalDegreesOfFreedom[n];
323 id = in->Id[n];
324 if ((myFirstDOF <= dof) && (dof < myLastDOF))
325 {
326 loc_max_id = std::max(loc_max_id, id);
327 loc_min_id = std::min(loc_min_id, id);
328 }
329 }
330 #pragma omp critical
331 {
332 max_id = std::max(loc_max_id, max_id);
333 min_id = std::min(loc_min_id, min_id);
334 }
335 }
336 /* allocate a buffer */
337 my_buffer_len = max_id >= min_id ? max_id - min_id + 1 : 0;
338
339 #ifdef ESYS_MPI
340 MPI_Allreduce(&my_buffer_len, &buffer_len, 1, MPI_INT, MPI_MAX, in->MPIInfo->comm);
341 #else
342 buffer_len = my_buffer_len;
343 #endif
344
345 Node_buffer = new index_t[buffer_len + header_len];
346 /* mark and count the nodes in use */
347 #pragma omp parallel
348 {
349 #pragma omp for private(n) schedule(static)
350 for (n = 0; n < buffer_len + header_len; n++)
351 Node_buffer[n] = unset_nodeID;
352 #pragma omp for private(n) schedule(static)
353 for (n = 0; n < in->numNodes; n++)
354 in->globalNodesIndex[n] = -1;
355 #pragma omp for private(n,dof,id) schedule(static)
356 for (n = 0; n < in->numNodes; n++) {
357 dof = in->globalDegreesOfFreedom[n];
358 id = in->Id[n];
359 if ((myFirstDOF <= dof) && (dof < myLastDOF))
360 Node_buffer[id - min_id + header_len] = set_nodeID;
361 }
362 }
363 myNewNumNodes = 0;
364 for (n = 0; n < my_buffer_len; n++) {
365 if (Node_buffer[header_len + n] == set_nodeID) {
366 Node_buffer[header_len + n] = myNewNumNodes;
367 myNewNumNodes++;
368 }
369 }
370 /* make the local number of nodes globally available */
371 #ifdef ESYS_MPI
372 MPI_Allgather(&myNewNumNodes, 1, MPI_INT, node_distribution, 1, MPI_INT, in->MPIInfo->comm);
373 #else
374 node_distribution[0] = myNewNumNodes;
375 #endif
376
377 globalNumNodes = 0;
378 for (p = 0; p < in->MPIInfo->size; ++p) {
379 itmp = node_distribution[p];
380 node_distribution[p] = globalNumNodes;
381 globalNumNodes += itmp;
382 }
383 node_distribution[in->MPIInfo->size] = globalNumNodes;
384
385 /* offset nodebuffer */
386 itmp = node_distribution[in->MPIInfo->rank];
387 #pragma omp for private(n) schedule(static)
388 for (n = 0; n < my_buffer_len; n++)
389 Node_buffer[n + header_len] += itmp;
390
391 /* now we send this buffer around to assign global node index: */
392 #ifdef ESYS_MPI
393 dest = in->MPIInfo->mod_rank(in->MPIInfo->rank + 1);
394 source = in->MPIInfo->mod_rank(in->MPIInfo->rank - 1);
395 #endif
396 Node_buffer[0] = min_id;
397 Node_buffer[1] = max_id;
398 buffer_rank = in->MPIInfo->rank;
399 for (p = 0; p < in->MPIInfo->size; ++p) {
400 nodeID_0 = Node_buffer[0];
401 nodeID_1 = Node_buffer[1];
402 dof_0 = dof_distribution[buffer_rank];
403 dof_1 = dof_distribution[buffer_rank + 1];
404 if (nodeID_0 <= nodeID_1) {
405 #pragma omp for private(n,dof,id) schedule(static)
406 for (n = 0; n < in->numNodes; n++) {
407 dof = in->globalDegreesOfFreedom[n];
408 id = in->Id[n] - nodeID_0;
409 if ((dof_0 <= dof) && (dof < dof_1) && (id >= 0) && (id <= nodeID_1 - nodeID_0))
410 in->globalNodesIndex[n] = Node_buffer[id + header_len];
411 }
412 }
413 if (p < in->MPIInfo->size - 1) { /* the last send can be skipped */
414 #ifdef ESYS_MPI
415 MPI_Sendrecv_replace(Node_buffer, buffer_len + header_len, MPI_INT,
416 dest, in->MPIInfo->counter(), source, in->MPIInfo->counter(),
417 in->MPIInfo->comm, &status);
418 in->MPIInfo->incCounter();
419 #endif
420 }
421 buffer_rank = in->MPIInfo->mod_rank(buffer_rank - 1);
422 }
423 delete[] Node_buffer;
424 return globalNumNodes;
425 }
426
427 dim_t Dudley_NodeFile_createDenseReducedNodeLabeling(Dudley_NodeFile * in, index_t * reducedNodeMask)
428 {
429 index_t min_node, max_node, unset_node = -1, set_node = 1, node_0, node_1, *Nodes_buffer = NULL, k;
430 int buffer_rank, *distribution = NULL;
431 dim_t p, buffer_len, n, myNodes, *offsets = NULL, *loc_offsets = NULL, globalNumReducedNodes = 0, myNewNodes;
432 #ifdef ESYS_MPI
433 int dest, source;
434 MPI_Status status;
435 #endif
436
437 /* get the global range of node ids */
438 Dudley_NodeFile_setGlobalNodeIDIndexRange(&min_node, &max_node, in);
439
440 distribution = new index_t[in->MPIInfo->size + 1];
441 offsets = new dim_t[in->MPIInfo->size];
442 loc_offsets = new dim_t[in->MPIInfo->size];
443
444 /* distribute the range of node ids */
445 buffer_len = in->MPIInfo->setDistribution(min_node, max_node, distribution);
446 myNodes = distribution[in->MPIInfo->rank + 1] - distribution[in->MPIInfo->rank];
447 /* allocate buffers */
448 Nodes_buffer = new index_t[buffer_len];
449 /* fill Nodes_buffer by the unset_node marker to check if nodes are defined */
450 #pragma omp parallel for private(n) schedule(static)
451 for (n = 0; n < buffer_len; n++)
452 Nodes_buffer[n] = unset_node;
453
454 /* fill the buffer by sending portions around in a circle */
455 #ifdef ESYS_MPI
456 dest = in->MPIInfo->mod_rank(in->MPIInfo->rank + 1);
457 source = in->MPIInfo->mod_rank(in->MPIInfo->rank - 1);
458 #endif
459 buffer_rank = in->MPIInfo->rank;
460 for (p = 0; p < in->MPIInfo->size; ++p) {
461 if (p > 0) { /* the initial send can be skipped */
462 #ifdef ESYS_MPI
463 MPI_Sendrecv_replace(Nodes_buffer, buffer_len, MPI_INT,
464 dest, in->MPIInfo->counter(), source, in->MPIInfo->counter(),
465 in->MPIInfo->comm, &status);
466 in->MPIInfo->incCounter();
467 #endif
468 }
469 buffer_rank = in->MPIInfo->mod_rank(buffer_rank - 1);
470 node_0 = distribution[buffer_rank];
471 node_1 = distribution[buffer_rank + 1];
472 #pragma omp parallel for private(n,k) schedule(static)
473 for (n = 0; n < in->numNodes; n++) {
474 if (reducedNodeMask[n] > -1) {
475 k = in->globalNodesIndex[n];
476 if ((node_0 <= k) && (k < node_1)) {
477 Nodes_buffer[k - node_0] = set_node;
478 }
479 }
480 }
481 }
482 /* count the entries in the Nodes_buffer */
483 /* TODO: OMP parallel */
484 myNewNodes = 0;
485 for (n = 0; n < myNodes; ++n) {
486 if (Nodes_buffer[n] == set_node) {
487 Nodes_buffer[n] = myNewNodes;
488 myNewNodes++;
489 }
490 }
491 memset(loc_offsets, 0, in->MPIInfo->size * sizeof(dim_t));
492 loc_offsets[in->MPIInfo->rank] = myNewNodes;
493 #ifdef ESYS_MPI
494 MPI_Allreduce(loc_offsets, offsets, in->MPIInfo->size, MPI_INT, MPI_SUM, in->MPIInfo->comm);
495 globalNumReducedNodes = 0;
496 for (n = 0; n < in->MPIInfo->size; ++n) {
497 loc_offsets[n] = globalNumReducedNodes;
498 globalNumReducedNodes += offsets[n];
499 }
500 #else
501 globalNumReducedNodes = loc_offsets[0];
502 loc_offsets[0] = 0;
503 #endif
504 #pragma omp parallel for private(n) schedule(static)
505 for (n = 0; n < myNodes; ++n)
506 Nodes_buffer[n] += loc_offsets[in->MPIInfo->rank];
507 /* now entries are collected from the buffer again by sending the entries around in a circle */
508 #pragma omp parallel for private(n) schedule(static)
509 for (n = 0; n < in->numNodes; ++n)
510 in->globalReducedNodesIndex[n] = loc_offsets[0] - 1;
511 #ifdef ESYS_MPI
512 dest = in->MPIInfo->mod_rank(in->MPIInfo->rank + 1);
513 source = in->MPIInfo->mod_rank(in->MPIInfo->rank - 1);
514 #endif
515 buffer_rank = in->MPIInfo->rank;
516 for (p = 0; p < in->MPIInfo->size; ++p) {
517 node_0 = distribution[buffer_rank];
518 node_1 = distribution[buffer_rank + 1];
519 #pragma omp parallel for private(n,k) schedule(static)
520 for (n = 0; n < in->numNodes; n++) {
521 if (reducedNodeMask[n] > -1) {
522 k = in->globalNodesIndex[n];
523 if ((node_0 <= k) && (k < node_1))
524 in->globalReducedNodesIndex[n] = Nodes_buffer[k - node_0];
525 }
526 }
527 if (p < in->MPIInfo->size - 1) {
528 /* the last send can be skipped */
529 #ifdef ESYS_MPI
530 MPI_Sendrecv_replace(Nodes_buffer, buffer_len, MPI_INT,
531 dest, in->MPIInfo->counter(), source, in->MPIInfo->counter(),
532 in->MPIInfo->comm, &status);
533 in->MPIInfo->incCounter();
534 #endif
535 }
536 buffer_rank = in->MPIInfo->mod_rank(buffer_rank - 1);
537 }
538 delete[] Nodes_buffer;
539 delete[] distribution;
540 delete[] loc_offsets;
541 delete[] offsets;
542 return globalNumReducedNodes;
543 }
544
545 } // namespace dudley
546

Properties

Name Value
svn:mergeinfo /branches/4.0fordebian/dudley/src/NodeFile_createDenseLabelings.cpp:5567-5588 /branches/lapack2681/finley/src/NodeFile_createDenseLabelings.cpp:2682-2741 /branches/pasowrap/dudley/src/NodeFile_createDenseLabelings.cpp:3661-3674 /branches/py3_attempt2/dudley/src/NodeFile_createDenseLabelings.cpp:3871-3891 /branches/restext/finley/src/NodeFile_createDenseLabelings.cpp:2610-2624 /branches/ripleygmg_from_3668/dudley/src/NodeFile_createDenseLabelings.cpp:3669-3791 /branches/stage3.0/finley/src/NodeFile_createDenseLabelings.cpp:2569-2590 /branches/symbolic_from_3470/dudley/src/NodeFile_createDenseLabelings.cpp:3471-3974 /branches/symbolic_from_3470/ripley/test/python/dudley/src/NodeFile_createDenseLabelings.cpp:3517-3974 /release/3.0/finley/src/NodeFile_createDenseLabelings.cpp:2591-2601 /release/4.0/dudley/src/NodeFile_createDenseLabelings.cpp:5380-5406 /trunk/dudley/src/NodeFile_createDenseLabelings.cpp:4257-4344,5898-6007 /trunk/ripley/test/python/dudley/src/NodeFile_createDenseLabelings.cpp:3480-3515

  ViewVC Help
Powered by ViewVC 1.1.26