/[escript]/trunk/finley/src/NodeFile.cpp
ViewVC logotype

Contents of /trunk/finley/src/NodeFile.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 6620 - (show annotations)
Mon Aug 14 08:31:59 2017 UTC (3 months, 1 week ago) by gross
File size: 40319 byte(s)
gather function fixed.
1
2 /*****************************************************************************
3 *
4 * Copyright (c) 2003-2017 by The University of Queensland
5 * http://www.uq.edu.au
6 *
7 * Primary Business: Queensland, Australia
8 * Licensed under the Apache License, version 2.0
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12 * Development 2012-2013 by School of Earth Sciences
13 * Development from 2014 by Centre for Geoscience Computing (GeoComp)
14 *
15 *****************************************************************************/
16
17 #include "NodeFile.h"
18
19 #include <escript/Data.h>
20 #include <escript/index.h>
21
22 #include <limits>
23 #include <sstream>
24 #include <iostream>
25
26 namespace finley {
27
28 // helper function
29 static std::pair<index_t,index_t> getGlobalRange(dim_t n, const index_t* id,
30 escript::JMPI mpiInfo)
31 {
32 std::pair<index_t,index_t> result(util::getMinMaxInt(1, n, id));
33
34 #ifdef ESYS_MPI
35 index_t global_id_range[2];
36 index_t id_range[2] = { -result.first, result.second };
37 MPI_Allreduce(id_range, global_id_range, 2, MPI_DIM_T, MPI_MAX,
38 mpiInfo->comm);
39 result.first = -global_id_range[0];
40 result.second = global_id_range[1];
41 #endif
42 if (result.second < result.first) {
43 result.first = -1;
44 result.second = 0;
45 }
46 return result;
47 }
48
49 // helper function
50 static void scatterEntries(dim_t n, const index_t* index, index_t min_index,
51 index_t max_index, index_t* Id_out,
52 const index_t* Id_in,
53 int* Tag_out, const int* Tag_in,
54 index_t* globalDegreesOfFreedom_out,
55 const index_t* globalDegreesOfFreedom_in,
56 int numDim, double* Coordinates_out,
57 const double* Coordinates_in)
58 {
59 const index_t range = max_index-min_index;
60 const size_t numDim_size = numDim*sizeof(double);
61
62 #pragma omp parallel for
63 for (index_t i=0; i<n; i++) {
64 const index_t k = index[i]-min_index;
65 if (k>=0 && k<range) {
66 Id_out[k] = Id_in[i];
67 Tag_out[k] = Tag_in[i];
68 globalDegreesOfFreedom_out[k] = globalDegreesOfFreedom_in[i];
69 memcpy(&Coordinates_out[INDEX2(0,k,numDim)],
70 &Coordinates_in[INDEX2(0,i,numDim)], numDim_size);
71 }
72 }
73 }
74
75 // helper function
76 static void gatherEntries(dim_t n, const index_t* index,
77 index_t min_index, index_t max_index,
78 index_t* Id_out, const index_t* Id_in,
79 int* Tag_out, const int* Tag_in,
80 index_t* globalDegreesOfFreedom_out,
81 const index_t* globalDegreesOfFreedom_in,
82 int numDim, double* Coordinates_out,
83 const double* Coordinates_in)
84 {
85 const index_t range = max_index-min_index;
86 const size_t numDim_size = numDim*sizeof(double);
87
88 #pragma omp parallel for
89 for (index_t i=0; i<n; i++) {
90 const index_t k = index[i]-min_index;
91 if (k>=0 && k<range) {
92 Id_out[i] = Id_in[k];
93 Tag_out[i] = Tag_in[k];
94 globalDegreesOfFreedom_out[i] = globalDegreesOfFreedom_in[k];
95 memcpy(&Coordinates_out[INDEX2(0,i,numDim)],
96 &Coordinates_in[INDEX2(0,k,numDim)], numDim_size);
97 }
98 }
99 }
100
101 /// constructor
102 /// use NodeFile::allocTable to allocate the node table (Id,Coordinates)
103 NodeFile::NodeFile(int nDim, escript::JMPI mpiInfo) :
104 numNodes(0),
105 MPIInfo(mpiInfo),
106 numDim(nDim),
107 Id(NULL),
108 Tag(NULL),
109 globalDegreesOfFreedom(NULL),
110 Coordinates(NULL),
111 globalReducedDOFIndex(NULL),
112 globalReducedNodesIndex(NULL),
113 globalNodesIndex(NULL),
114 reducedNodesId(NULL),
115 degreesOfFreedomId(NULL),
116 reducedDegreesOfFreedomId(NULL),
117 status(FINLEY_INITIAL_STATUS)
118 {
119 }
120
121 NodeFile::~NodeFile()
122 {
123 freeTable();
124 }
125
126 void NodeFile::allocTable(dim_t NN)
127 {
128 if (numNodes > 0)
129 freeTable();
130
131 Id = new index_t[NN];
132 Coordinates = new escript::DataTypes::real_t[NN*numDim];
133 Tag = new int[NN];
134 globalDegreesOfFreedom = new index_t[NN];
135 globalReducedDOFIndex = new index_t[NN];
136 globalReducedNodesIndex = new index_t[NN];
137 globalNodesIndex = new index_t[NN];
138 reducedNodesId = new index_t[NN];
139 degreesOfFreedomId = new index_t[NN];
140 reducedDegreesOfFreedomId = new index_t[NN];
141 numNodes = NN;
142
143 // this initialization makes sure that data are located on the right
144 // processor
145 #pragma omp parallel for
146 for (index_t n=0; n<numNodes; n++) {
147 Id[n] = -1;
148 for (int i=0; i<numDim; i++)
149 Coordinates[INDEX2(i,n,numDim)] = 0.;
150 Tag[n] = -1;
151 globalDegreesOfFreedom[n] = -1;
152 globalReducedDOFIndex[n] = -1;
153 globalReducedNodesIndex[n] = -1;
154 globalNodesIndex[n] = -1;
155 reducedNodesId[n] = -1;
156 degreesOfFreedomId[n] = -1;
157 reducedDegreesOfFreedomId[n] = -1;
158 }
159 }
160
161 void NodeFile::freeTable()
162 {
163 delete[] Id;
164 delete[] Coordinates;
165 delete[] globalDegreesOfFreedom;
166 delete[] globalReducedDOFIndex;
167 delete[] globalReducedNodesIndex;
168 delete[] globalNodesIndex;
169 delete[] Tag;
170 delete[] reducedNodesId;
171 delete[] degreesOfFreedomId;
172 delete[] reducedDegreesOfFreedomId;
173 tagsInUse.clear();
174 nodesMapping.clear();
175 reducedNodesMapping.clear();
176 degreesOfFreedomMapping.clear();
177 reducedDegreesOfFreedomMapping.clear();
178 nodesDistribution.reset();
179 reducedNodesDistribution.reset();
180 degreesOfFreedomDistribution.reset();
181 reducedDegreesOfFreedomDistribution.reset();
182 #ifdef ESYS_HAVE_PASO
183 degreesOfFreedomConnector.reset();
184 reducedDegreesOfFreedomConnector.reset();
185 #endif
186 #ifdef ESYS_HAVE_TRILINOS
187 trilinosRowMap.reset();
188 trilinosReducedRowMap.reset();
189 trilinosColMap.reset();
190 trilinosReducedColMap.reset();
191 #endif
192 numNodes = 0;
193 }
194
195 void NodeFile::print() const
196 {
197 std::cout << "=== " << numDim << "D-Nodes:\nnumber of nodes=" << numNodes
198 << std::endl;
199 std::cout << "Id,Tag,globalDegreesOfFreedom,degreesOfFreedom,reducedDegreesOfFeedom,node,reducedNode,Coordinates" << std::endl;
200 for (index_t i=0; i<numNodes; i++) {
201 std::cout << Id[i] << "," << Tag[i] << "," << globalDegreesOfFreedom[i]
202 << "," << degreesOfFreedomMapping.target[i]
203 << "," << reducedDegreesOfFreedomMapping.target[i]
204 << "," << nodesMapping.target[i] << reducedNodesMapping.target[i]
205 << " ";
206 std::cout.precision(15);
207 std::cout.setf(std::ios::scientific, std::ios::floatfield);
208 for (int j=0; j<numDim; j++)
209 std:: cout << Coordinates[INDEX2(j,i,numDim)];
210 std::cout << std::endl;
211 }
212 }
213
214 std::pair<index_t,index_t> NodeFile::getDOFRange() const
215 {
216 std::pair<index_t,index_t> result(util::getMinMaxInt(
217 1, numNodes, globalDegreesOfFreedom));
218 if (result.second < result.first) {
219 result.first = -1;
220 result.second = 0;
221 }
222 return result;
223 }
224
225 std::pair<index_t,index_t> NodeFile::getGlobalIdRange() const
226 {
227 return getGlobalRange(numNodes, Id, MPIInfo);
228 }
229
230 std::pair<index_t,index_t> NodeFile::getGlobalDOFRange() const
231 {
232 return getGlobalRange(numNodes, globalDegreesOfFreedom, MPIInfo);
233 }
234
235 std::pair<index_t,index_t> NodeFile::getGlobalNodeIDIndexRange() const
236 {
237 return getGlobalRange(numNodes, globalNodesIndex, MPIInfo);
238 }
239
240 /// copies the array newX into this->coordinates
241 void NodeFile::setCoordinates(const escript::Data& newX)
242 {
243 if (newX.getDataPointSize() != numDim) {
244 std::stringstream ss;
245 ss << "NodeFile::setCoordinates: number of dimensions of new "
246 "coordinates has to be " << numDim;
247 throw escript::ValueError(ss.str());
248 } else if (newX.getNumDataPointsPerSample() != 1 ||
249 newX.getNumSamples() != numNodes) {
250 std::stringstream ss;
251 ss << "NodeFile::setCoordinates: number of given nodes must be "
252 << numNodes;
253 throw escript::ValueError(ss.str());
254 } else {
255 const size_t numDim_size = numDim * sizeof(double);
256 ++status;
257 #pragma omp parallel for
258 for (index_t n = 0; n < numNodes; n++) {
259 memcpy(&Coordinates[INDEX2(0, n, numDim)],
260 newX.getSampleDataRO(n), numDim_size);
261 }
262 }
263 }
264
265 /// sets tags to newTag where mask>0
266 void NodeFile::setTags(int newTag, const escript::Data& mask)
267 {
268 if (1 != mask.getDataPointSize()) {
269 throw escript::ValueError("NodeFile::setTags: number of components of mask must be 1.");
270 } else if (mask.getNumDataPointsPerSample() != 1 ||
271 mask.getNumSamples() != numNodes) {
272 throw escript::ValueError("NodeFile::setTags: illegal number of samples of mask Data object");
273 }
274
275 #pragma omp parallel for
276 for (index_t n = 0; n < numNodes; n++) {
277 if (mask.getSampleDataRO(n)[0] > 0)
278 Tag[n] = newTag;
279 }
280 updateTagList();
281 }
282
283
284 void NodeFile::copyTable(index_t offset, index_t idOffset, index_t dofOffset,
285 const NodeFile* in)
286 {
287 // check number of dimensions and table size
288 if (numDim != in->numDim) {
289 throw escript::ValueError("NodeFile::copyTable: dimensions of node files don't match");
290 }
291 if (numNodes < in->numNodes+offset) {
292 throw escript::ValueError("NodeFile::copyTable: node table is too small.");
293 }
294
295 #pragma omp parallel for
296 for (index_t n=0; n<in->numNodes; n++) {
297 Id[offset+n]=in->Id[n]+idOffset;
298 Tag[offset+n]=in->Tag[n];
299 globalDegreesOfFreedom[offset+n]=in->globalDegreesOfFreedom[n]+dofOffset;
300 for(int i=0; i<numDim; i++)
301 Coordinates[INDEX2(i, offset+n, numDim)] =
302 in->Coordinates[INDEX2(i, n, in->numDim)];
303 }
304 }
305
306 /// scatters the NodeFile in into this NodeFile using index[0:in->numNodes-1].
307 /// index has to be between 0 and numNodes-1.
308 /// colouring is chosen for the worst case
309 void NodeFile::scatter(const index_t* index, const NodeFile* in)
310 {
311 scatterEntries(numNodes, index, 0, in->numNodes, Id, in->Id, Tag, in->Tag,
312 globalDegreesOfFreedom, in->globalDegreesOfFreedom,
313 numDim, Coordinates, in->Coordinates);
314 }
315
316 /// gathers this NodeFile from the NodeFile 'in' using the entries in
317 /// index[0:out->numNodes-1] which are between 0 (and in->numNodes)
318 /// (exclusive)
319 // WARNING: This does not wotj for MPI!!!
320 void NodeFile::gather(const index_t* index, const NodeFile* in)
321 {
322 gatherEntries(numNodes, index, 0, in->getNumNodes(), Id, in->Id,
323 Tag, in->Tag, globalDegreesOfFreedom, in->globalDegreesOfFreedom,
324 numDim, Coordinates, in->Coordinates);
325
326 }
327
328 void NodeFile::gather_global(const index_t* index, const NodeFile* in)
329 {
330 // get the global range of node ids
331 const std::pair<index_t,index_t> id_range(in->getGlobalIdRange());
332 const index_t undefined_node = id_range.first-1;
333 std::vector<index_t> distribution(in->MPIInfo->size+1);
334
335 // distribute the range of node ids
336 index_t buffer_len = in->MPIInfo->setDistribution(id_range.first, id_range.second, &distribution[0]);
337
338 // allocate buffers
339 index_t* Id_buffer = new index_t[buffer_len];
340 int* Tag_buffer = new int[buffer_len];
341 index_t* globalDegreesOfFreedom_buffer = new index_t[buffer_len];
342 double* Coordinates_buffer = new double[buffer_len*numDim];
343
344 // fill Id_buffer by the undefined_node marker to check if nodes
345 // are defined
346 #pragma omp parallel for
347 for (index_t n = 0; n < buffer_len; n++)
348 Id_buffer[n] = undefined_node;
349
350 // fill the buffer by sending portions around in a circle
351 #ifdef ESYS_MPI
352 MPI_Status status;
353 int dest = in->MPIInfo->mod_rank(in->MPIInfo->rank+1);
354 int source = in->MPIInfo->mod_rank(in->MPIInfo->rank-1);
355 #endif
356 int buffer_rank = in->MPIInfo->rank;
357 for (int p=0; p<in->MPIInfo->size; ++p) {
358 if (p>0) { // the initial send can be skipped
359 #ifdef ESYS_MPI
360 MPI_Sendrecv_replace(Id_buffer, buffer_len, MPI_DIM_T, dest,
361 in->MPIInfo->counter(), source,
362 in->MPIInfo->counter(), in->MPIInfo->comm, &status);
363 MPI_Sendrecv_replace(Tag_buffer, buffer_len, MPI_INT, dest,
364 in->MPIInfo->counter()+1, source,
365 in->MPIInfo->counter()+1, in->MPIInfo->comm, &status);
366 MPI_Sendrecv_replace(globalDegreesOfFreedom_buffer, buffer_len,
367 MPI_DIM_T, dest, in->MPIInfo->counter()+2, source,
368 in->MPIInfo->counter()+2, in->MPIInfo->comm, &status);
369 MPI_Sendrecv_replace(Coordinates_buffer, buffer_len*numDim,
370 MPI_DOUBLE, dest, in->MPIInfo->counter()+3, source,
371 in->MPIInfo->counter()+3, in->MPIInfo->comm, &status);
372 in->MPIInfo->incCounter(4);
373 #endif
374 }
375 buffer_rank=in->MPIInfo->mod_rank(buffer_rank-1);
376 scatterEntries(in->numNodes, in->Id, distribution[buffer_rank],
377 distribution[buffer_rank+1], Id_buffer, in->Id,
378 Tag_buffer, in->Tag, globalDegreesOfFreedom_buffer,
379 in->globalDegreesOfFreedom, numDim, Coordinates_buffer,
380 in->Coordinates);
381 }
382 // now entries are collected from the buffer again by sending the
383 // entries around in a circle
384 #ifdef ESYS_MPI
385 dest = in->MPIInfo->mod_rank(in->MPIInfo->rank+1);
386 source = in->MPIInfo->mod_rank(in->MPIInfo->rank-1);
387 #endif
388 buffer_rank=in->MPIInfo->rank;
389 for (int p=0; p<in->MPIInfo->size; ++p) {
390 gatherEntries(numNodes, index, distribution[buffer_rank],
391 distribution[buffer_rank+1], Id, Id_buffer, Tag, Tag_buffer,
392 globalDegreesOfFreedom, globalDegreesOfFreedom_buffer, numDim,
393 Coordinates, Coordinates_buffer);
394 if (p < in->MPIInfo->size-1) { // the last send can be skipped
395 #ifdef ESYS_MPI
396 MPI_Sendrecv_replace(Id_buffer, buffer_len, MPI_DIM_T, dest,
397 in->MPIInfo->counter(), source,
398 in->MPIInfo->counter(), in->MPIInfo->comm, &status);
399 MPI_Sendrecv_replace(Tag_buffer, buffer_len, MPI_INT, dest,
400 in->MPIInfo->counter()+1, source,
401 in->MPIInfo->counter()+1, in->MPIInfo->comm, &status);
402 MPI_Sendrecv_replace(globalDegreesOfFreedom_buffer, buffer_len,
403 MPI_DIM_T, dest, in->MPIInfo->counter()+2, source,
404 in->MPIInfo->counter()+2, in->MPIInfo->comm, &status);
405 MPI_Sendrecv_replace(Coordinates_buffer, buffer_len*numDim,
406 MPI_DOUBLE, dest, in->MPIInfo->counter()+3, source,
407 in->MPIInfo->counter()+3, in->MPIInfo->comm, &status);
408 in->MPIInfo->incCounter(4);
409 #endif
410 }
411 buffer_rank=in->MPIInfo->mod_rank(buffer_rank-1);
412 }
413 #if DOASSERT
414 // check if all nodes are set:
415 index_t err=-1;
416 #pragma omp parallel for
417 for (index_t n=0; n<numNodes; ++n) {
418 if (Id[n] == undefined_node) {
419 #pragma omp critical
420 err=n;
421 }
422 }
423 if (err>=0) {
424 std::stringstream ss;
425 ss << "NodeFile::gather_global: Node id " << Id[err]
426 << " at position " << err << " is referenced but not defined.";
427 const std::string errorMsg(ss.str());
428 throw escript::AssertException(errorMsg);
429 }
430 #endif // DOASSERT
431 delete[] Id_buffer;
432 delete[] Tag_buffer;
433 delete[] globalDegreesOfFreedom_buffer;
434 delete[] Coordinates_buffer;
435 }
436
437 void NodeFile::assignMPIRankToDOFs(std::vector<int>& mpiRankOfDOF,
438 const IndexVector& distribution)
439 {
440 int p_min = MPIInfo->size, p_max = -1;
441 // first we calculate the min and max DOF on this processor to reduce
442 // costs for searching
443 const std::pair<index_t,index_t> dofRange(getDOFRange());
444
445 for (int p = 0; p < MPIInfo->size; ++p) {
446 if (distribution[p] <= dofRange.first)
447 p_min = p;
448 if (distribution[p] <= dofRange.second)
449 p_max = p;
450 }
451 #pragma omp parallel for
452 for (index_t n = 0; n < numNodes; ++n) {
453 const index_t k = globalDegreesOfFreedom[n];
454 for (int p = p_min; p <= p_max; ++p) {
455 if (k < distribution[p + 1]) {
456 mpiRankOfDOF[n] = p;
457 break;
458 }
459 }
460 }
461 }
462
463 dim_t NodeFile::prepareLabeling(const std::vector<short>& mask,
464 IndexVector& buffer,
465 IndexVector& distribution,
466 bool useNodes)
467 {
468 const index_t UNSET_ID=-1,SET_ID=1;
469
470 // get the global range of DOF/node ids
471 std::pair<index_t,index_t> idRange(useNodes ?
472 getGlobalNodeIDIndexRange() : getGlobalDOFRange());
473 const index_t* indexArray = (useNodes ? globalNodesIndex : globalDegreesOfFreedom);
474 // distribute the range of node ids
475 distribution.assign(MPIInfo->size+1, 0);
476 int buffer_len = MPIInfo->setDistribution(idRange.first,
477 idRange.second, &distribution[0]);
478 const dim_t myCount = distribution[MPIInfo->rank+1]-distribution[MPIInfo->rank];
479
480 // fill buffer by the UNSET_ID marker to check if nodes are defined
481 buffer.assign(buffer_len, UNSET_ID);
482
483 // fill the buffer by sending portions around in a circle
484 #ifdef ESYS_MPI
485 MPI_Status status;
486 int dest = MPIInfo->mod_rank(MPIInfo->rank + 1);
487 int source = MPIInfo->mod_rank(MPIInfo->rank - 1);
488 #endif
489 int buffer_rank=MPIInfo->rank;
490 for (int p=0; p<MPIInfo->size; ++p) {
491 if (p>0) { // the initial send can be skipped
492 #ifdef ESYS_MPI
493 MPI_Sendrecv_replace(&buffer[0], buffer.size(), MPI_DIM_T, dest,
494 MPIInfo->counter(), source, MPIInfo->counter(),
495 MPIInfo->comm, &status);
496 MPIInfo->incCounter();
497 #endif
498 }
499 buffer_rank = MPIInfo->mod_rank(buffer_rank-1);
500 const index_t id0 = distribution[buffer_rank];
501 const index_t id1 = distribution[buffer_rank+1];
502 #pragma omp parallel for
503 for (index_t n = 0; n < numNodes; n++) {
504 if (mask.size() < numNodes || mask[n] > -1) {
505 const index_t k = indexArray[n];
506 if (id0 <= k && k < id1) {
507 buffer[k - id0] = SET_ID;
508 }
509 }
510 }
511 }
512 // count the entries in the buffer
513 // TODO: OMP parallel
514 index_t myNewCount = 0;
515 for (index_t n = 0; n < myCount; ++n) {
516 if (buffer[n] == SET_ID) {
517 buffer[n] = myNewCount;
518 myNewCount++;
519 }
520 }
521 return myNewCount;
522 }
523
524 dim_t NodeFile::createDenseDOFLabeling()
525 {
526 std::vector<index_t> DOF_buffer;
527 std::vector<index_t> distribution;
528 std::vector<index_t> loc_offsets(MPIInfo->size);
529 std::vector<index_t> offsets(MPIInfo->size);
530 index_t new_numGlobalDOFs = 0;
531
532 // retrieve the number of own DOFs and fill buffer
533 loc_offsets[MPIInfo->rank] = prepareLabeling(std::vector<short>(),
534 DOF_buffer, distribution, false);
535 #ifdef ESYS_MPI
536 MPI_Allreduce(&loc_offsets[0], &offsets[0], MPIInfo->size, MPI_DIM_T,
537 MPI_SUM, MPIInfo->comm);
538 for (int n=0; n<MPIInfo->size; ++n) {
539 loc_offsets[n]=new_numGlobalDOFs;
540 new_numGlobalDOFs+=offsets[n];
541 }
542 #else
543 new_numGlobalDOFs = loc_offsets[0];
544 loc_offsets[0] = 0;
545 #endif
546
547 const dim_t myDOFs = distribution[MPIInfo->rank+1]-distribution[MPIInfo->rank];
548 #pragma omp parallel for
549 for (index_t n = 0; n < myDOFs; ++n)
550 DOF_buffer[n] += loc_offsets[MPIInfo->rank];
551
552 std::vector<unsigned char> set_new_DOF(numNodes, true);
553
554 // now entries are collected from the buffer again by sending them around
555 // in a circle
556 #ifdef ESYS_MPI
557 int dest = MPIInfo->mod_rank(MPIInfo->rank + 1);
558 int source = MPIInfo->mod_rank(MPIInfo->rank - 1);
559 #endif
560 int buffer_rank = MPIInfo->rank;
561 for (int p = 0; p < MPIInfo->size; ++p) {
562 const index_t dof0 = distribution[buffer_rank];
563 const index_t dof1 = distribution[buffer_rank+1];
564 #pragma omp parallel for
565 for (index_t n = 0; n < numNodes; n++) {
566 const index_t k = globalDegreesOfFreedom[n];
567 if (set_new_DOF[n] && dof0<=k && k<dof1) {
568 globalDegreesOfFreedom[n]=DOF_buffer[k-dof0];
569 set_new_DOF[n]=false;
570 }
571 }
572 if (p<MPIInfo->size-1) { // the last send can be skipped
573 #ifdef ESYS_MPI
574 MPI_Status status;
575 MPI_Sendrecv_replace(&DOF_buffer[0], DOF_buffer.size(), MPI_DIM_T,
576 dest, MPIInfo->counter(), source,
577 MPIInfo->counter(), MPIInfo->comm, &status);
578 MPIInfo->incCounter();
579 #endif
580 }
581 buffer_rank = MPIInfo->mod_rank(buffer_rank-1);
582 }
583
584 return new_numGlobalDOFs;
585 }
586
587 dim_t NodeFile::createDenseNodeLabeling(IndexVector& nodeDistribution,
588 const IndexVector& dofDistribution)
589 {
590 const index_t UNSET_ID=-1, SET_ID=1;
591 const index_t myFirstDOF = dofDistribution[MPIInfo->rank];
592 const index_t myLastDOF = dofDistribution[MPIInfo->rank+1];
593
594 // find the range of node ids controlled by me
595 index_t min_id = std::numeric_limits<index_t>::max();
596 index_t max_id = std::numeric_limits<index_t>::min();
597 #pragma omp parallel
598 {
599 index_t loc_max_id = max_id;
600 index_t loc_min_id = min_id;
601 #pragma omp for
602 for (index_t n = 0; n < numNodes; n++) {
603 const dim_t dof = globalDegreesOfFreedom[n];
604 if (myFirstDOF <= dof && dof < myLastDOF) {
605 loc_max_id = std::max(loc_max_id, Id[n]);
606 loc_min_id = std::min(loc_min_id, Id[n]);
607 }
608 }
609 #pragma omp critical
610 {
611 max_id = std::max(loc_max_id, max_id);
612 min_id = std::min(loc_min_id, min_id);
613 }
614 }
615 index_t my_buffer_len = (max_id>=min_id ? max_id-min_id+1 : 0);
616 index_t buffer_len;
617
618 #ifdef ESYS_MPI
619 MPI_Allreduce(&my_buffer_len, &buffer_len, 1, MPI_DIM_T, MPI_MAX,
620 MPIInfo->comm);
621 #else
622 buffer_len=my_buffer_len;
623 #endif
624
625 const int header_len=2;
626 std::vector<index_t> Node_buffer(buffer_len+header_len, UNSET_ID);
627 // extra storage for these IDs
628 Node_buffer[0]=min_id;
629 Node_buffer[1]=max_id;
630
631 // mark and count the nodes in use
632 #pragma omp parallel for
633 for (index_t n = 0; n < numNodes; n++) {
634 globalNodesIndex[n] = -1;
635 const index_t dof = globalDegreesOfFreedom[n];
636 if (myFirstDOF <= dof && dof < myLastDOF)
637 Node_buffer[Id[n]-min_id+header_len] = SET_ID;
638 }
639 index_t myNewNumNodes = 0;
640 for (index_t n = 0; n < my_buffer_len; n++) {
641 if (Node_buffer[header_len+n] == SET_ID) {
642 Node_buffer[header_len+n] = myNewNumNodes;
643 myNewNumNodes++;
644 }
645 }
646 // make the local number of nodes globally available
647 #ifdef ESYS_MPI
648 MPI_Allgather(&myNewNumNodes, 1, MPI_DIM_T, &nodeDistribution[0], 1,
649 MPI_DIM_T, MPIInfo->comm);
650 #else
651 nodeDistribution[0] = myNewNumNodes;
652 #endif
653
654 dim_t globalNumNodes = 0;
655 for (int p = 0; p < MPIInfo->size; ++p) {
656 const dim_t itmp = nodeDistribution[p];
657 nodeDistribution[p] = globalNumNodes;
658 globalNumNodes += itmp;
659 }
660 nodeDistribution[MPIInfo->size] = globalNumNodes;
661
662 // offset node buffer
663 #pragma omp parallel for
664 for (index_t n = 0; n < my_buffer_len; n++)
665 Node_buffer[n+header_len] += nodeDistribution[MPIInfo->rank];
666
667 // now we send this buffer around to assign global node index
668 #ifdef ESYS_MPI
669 int dest = MPIInfo->mod_rank(MPIInfo->rank + 1);
670 int source = MPIInfo->mod_rank(MPIInfo->rank - 1);
671 #endif
672 int buffer_rank=MPIInfo->rank;
673 for (int p=0; p<MPIInfo->size; ++p) {
674 const index_t nodeID_0 = Node_buffer[0];
675 const index_t nodeID_1 = Node_buffer[1];
676 const index_t dof0 = dofDistribution[buffer_rank];
677 const index_t dof1 = dofDistribution[buffer_rank+1];
678 if (nodeID_0 <= nodeID_1) {
679 #pragma omp parallel for
680 for (index_t n = 0; n < numNodes; n++) {
681 const index_t dof = globalDegreesOfFreedom[n];
682 const index_t id = Id[n]-nodeID_0;
683 if (dof0 <= dof && dof < dof1 && id>=0 && id<=nodeID_1-nodeID_0)
684 globalNodesIndex[n] = Node_buffer[id+header_len];
685 }
686 }
687 if (p<MPIInfo->size-1) { // the last send can be skipped
688 #ifdef ESYS_MPI
689 MPI_Status status;
690 MPI_Sendrecv_replace(&Node_buffer[0], Node_buffer.size(), MPI_DIM_T,
691 dest, MPIInfo->counter(), source,
692 MPIInfo->counter(), MPIInfo->comm, &status);
693 MPIInfo->incCounter();
694 #endif
695 }
696 buffer_rank = MPIInfo->mod_rank(buffer_rank-1);
697 }
698 return globalNumNodes;
699 }
700
701 dim_t NodeFile::createDenseReducedLabeling(const std::vector<short>& reducedMask,
702 bool useNodes)
703 {
704 std::vector<index_t> buffer;
705 std::vector<index_t> distribution;
706 std::vector<index_t> loc_offsets(MPIInfo->size);
707 std::vector<index_t> offsets(MPIInfo->size);
708 dim_t new_numGlobalReduced=0;
709
710 // retrieve the number of own DOFs/nodes and fill buffer
711 loc_offsets[MPIInfo->rank]=prepareLabeling(reducedMask, buffer,
712 distribution, useNodes);
713 #ifdef ESYS_MPI
714 MPI_Allreduce(&loc_offsets[0], &offsets[0], MPIInfo->size, MPI_DIM_T,
715 MPI_SUM, MPIInfo->comm);
716 for (int n=0; n<MPIInfo->size; ++n) {
717 loc_offsets[n]=new_numGlobalReduced;
718 new_numGlobalReduced+=offsets[n];
719 }
720 #else
721 new_numGlobalReduced=loc_offsets[0];
722 loc_offsets[0]=0;
723 #endif
724
725 const dim_t myCount=distribution[MPIInfo->rank+1]-distribution[MPIInfo->rank];
726 #pragma omp parallel for
727 for (index_t n=0; n<myCount; ++n)
728 buffer[n]+=loc_offsets[MPIInfo->rank];
729
730 const index_t* denseArray =
731 (useNodes ? globalNodesIndex : globalDegreesOfFreedom);
732 index_t* reducedArray =
733 (useNodes ? globalReducedNodesIndex : globalReducedDOFIndex);
734
735 #pragma omp parallel for
736 for (index_t n=0; n<numNodes; ++n)
737 reducedArray[n]=loc_offsets[0]-1;
738
739 // now entries are collected from the buffer by sending them around
740 // in a circle
741 #ifdef ESYS_MPI
742 int dest = MPIInfo->mod_rank(MPIInfo->rank + 1);
743 int source = MPIInfo->mod_rank(MPIInfo->rank - 1);
744 #endif
745 int buffer_rank=MPIInfo->rank;
746 for (int p=0; p<MPIInfo->size; ++p) {
747 const index_t id0=distribution[buffer_rank];
748 const index_t id1=distribution[buffer_rank+1];
749 #pragma omp parallel for
750 for (index_t n=0; n<numNodes; n++) {
751 if (reducedMask[n] > -1) {
752 const index_t k=denseArray[n];
753 if (id0<=k && k<id1)
754 reducedArray[n]=buffer[k-id0];
755 }
756 }
757 if (p<MPIInfo->size-1) { // the last send can be skipped
758 #ifdef ESYS_MPI
759 MPI_Status status;
760 MPI_Sendrecv_replace(&buffer[0], buffer.size(), MPI_DIM_T, dest,
761 MPIInfo->counter(), source,
762 MPIInfo->counter(), MPIInfo->comm, &status);
763 MPIInfo->incCounter();
764 #endif
765 }
766 buffer_rank = MPIInfo->mod_rank(buffer_rank-1);
767 }
768 return new_numGlobalReduced;
769 }
770
771 void NodeFile::createDOFMappingAndCoupling(bool use_reduced_elements)
772 {
773 escript::Distribution_ptr dofDistribution;
774 const index_t* globalDOFIndex;
775 if (use_reduced_elements) {
776 dofDistribution = reducedDegreesOfFreedomDistribution;
777 globalDOFIndex = globalReducedDOFIndex;
778 } else {
779 dofDistribution = degreesOfFreedomDistribution;
780 globalDOFIndex = globalDegreesOfFreedom;
781 }
782 NodeMapping& mapping = (use_reduced_elements ?
783 reducedDegreesOfFreedomMapping : degreesOfFreedomMapping);
784
785 const index_t myFirstDOF = dofDistribution->getFirstComponent();
786 const index_t myLastDOF = dofDistribution->getLastComponent();
787 const int mpiSize = MPIInfo->size;
788 const int myRank = MPIInfo->rank;
789
790 index_t min_DOF, max_DOF;
791 std::pair<index_t,index_t> DOF_range(util::getFlaggedMinMaxInt(
792 numNodes, globalDOFIndex, -1));
793
794 if (DOF_range.second < DOF_range.first) {
795 min_DOF = myFirstDOF;
796 max_DOF = myLastDOF - 1;
797 } else {
798 min_DOF = DOF_range.first;
799 max_DOF = DOF_range.second;
800 }
801
802 int p_min = mpiSize;
803 int p_max = -1;
804 if (max_DOF >= min_DOF) {
805 for (int p = 0; p < mpiSize; ++p) {
806 if (dofDistribution->first_component[p] <= min_DOF)
807 p_min = p;
808 if (dofDistribution->first_component[p] <= max_DOF)
809 p_max = p;
810 }
811 }
812
813 std::stringstream ss;
814 if (myFirstDOF<myLastDOF && !(min_DOF <= myFirstDOF && myLastDOF-1 <= max_DOF)) {
815 ss << "createDOFMappingAndCoupling: Local elements do not span local "
816 "degrees of freedom. min_DOF=" << min_DOF << ", myFirstDOF="
817 << myFirstDOF << ", myLastDOF-1=" << myLastDOF-1
818 << ", max_DOF=" << max_DOF << " on rank=" << MPIInfo->rank;
819 }
820 const std::string msg(ss.str());
821 int error = msg.length();
822 int gerror = error;
823 escript::checkResult(error, gerror, MPIInfo);
824 if (gerror > 0) {
825 char* gmsg;
826 escript::shipString(msg.c_str(), &gmsg, MPIInfo->comm);
827 throw FinleyException(gmsg);
828 }
829
830 const index_t UNUSED = -1;
831 const dim_t len_loc_dof = max_DOF - min_DOF + 1;
832 std::vector<index_t> shared(numNodes * (p_max - p_min + 1));
833 std::vector<index_t> locDOFMask(len_loc_dof, UNUSED);
834
835 #ifdef BOUNDS_CHECK
836 ESYS_ASSERT(myLastDOF-min_DOF <= len_loc_dof, "BOUNDS_CHECK");
837 #endif
838
839 #pragma omp parallel
840 {
841 #pragma omp for
842 for (index_t i = 0; i < numNodes; ++i) {
843 const index_t k = globalDOFIndex[i];
844 if (k > -1) {
845 #ifdef BOUNDS_CHECK
846 ESYS_ASSERT(k - min_DOF < len_loc_dof, "BOUNDS_CHECK");
847 #endif
848 locDOFMask[k - min_DOF] = UNUSED - 1;
849 }
850 }
851 #pragma omp for
852 for (index_t i = myFirstDOF - min_DOF; i < myLastDOF - min_DOF; ++i) {
853 locDOFMask[i] = i - myFirstDOF + min_DOF;
854 }
855 }
856
857 std::vector<index_t> wanted_DOFs(numNodes);
858 std::vector<index_t> rcv_len(mpiSize);
859 std::vector<index_t> snd_len(mpiSize);
860 std::vector<int> neighbour;
861 std::vector<index_t> offsetInShared;
862 dim_t n = 0;
863 dim_t lastn = n;
864
865 for (int p = p_min; p <= p_max; ++p) {
866 if (p != myRank) {
867 const index_t firstDOF = std::max(min_DOF, dofDistribution->first_component[p]);
868 const index_t lastDOF = std::min(max_DOF + 1, dofDistribution->first_component[p + 1]);
869 #ifdef BOUNDS_CHECK
870 ESYS_ASSERT(lastDOF - min_DOF <= len_loc_dof, "BOUNDS_CHECK");
871 #endif
872 for (index_t i = firstDOF - min_DOF; i < lastDOF - min_DOF; ++i) {
873 if (locDOFMask[i] == UNUSED - 1) {
874 locDOFMask[i] = myLastDOF - myFirstDOF + n;
875 wanted_DOFs[n] = i + min_DOF;
876 ++n;
877 }
878 }
879 if (n > lastn) {
880 rcv_len[p] = n - lastn;
881 neighbour.push_back(p);
882 offsetInShared.push_back(lastn);
883 lastn = n;
884 }
885 } // if p!=myRank
886 } // for p
887
888 offsetInShared.push_back(lastn);
889
890 // assign new DOF labels to nodes
891 std::vector<index_t> nodeMask(numNodes, UNUSED);
892 #pragma omp parallel for
893 for (index_t i = 0; i < numNodes; ++i) {
894 const index_t k = globalDOFIndex[i];
895 if (k > -1)
896 nodeMask[i] = locDOFMask[k - min_DOF];
897 }
898
899 // now we can set the mapping from nodes to local DOFs
900 mapping.assign(nodeMask, UNUSED);
901
902 // define how to get DOF values for controlled but other processors
903 #ifdef BOUNDS_CHECK
904 ESYS_ASSERT(numNodes == 0 || offsetInShared.back() < numNodes * (p_max - p_min + 1), "BOUNDS_CHECK");
905 #endif
906 #pragma omp parallel for
907 for (index_t i = 0; i < lastn; ++i)
908 shared[i] = myLastDOF - myFirstDOF + i;
909
910 #ifdef ESYS_HAVE_PASO
911 index_t* p = shared.empty() ? NULL : &shared[0];
912 paso::SharedComponents_ptr rcv_shcomp(new paso::SharedComponents(
913 myLastDOF - myFirstDOF, neighbour, p, offsetInShared));
914 #endif
915
916 /////////////////////////////////
917 // now we build the sender //
918 /////////////////////////////////
919 #ifdef ESYS_MPI
920 std::vector<MPI_Request> mpi_requests(mpiSize * 2);
921 std::vector<MPI_Status> mpi_stati(mpiSize * 2);
922 MPI_Alltoall(&rcv_len[0], 1, MPI_DIM_T, &snd_len[0], 1, MPI_DIM_T, MPIInfo->comm);
923 int count = 0;
924 for (int p = 0; p < neighbour.size(); p++) {
925 MPI_Isend(&wanted_DOFs[offsetInShared[p]],
926 offsetInShared[p+1] - offsetInShared[p],
927 MPI_DIM_T, neighbour[p], MPIInfo->counter() + myRank,
928 MPIInfo->comm, &mpi_requests[count]);
929 count++;
930 }
931 n = 0;
932 neighbour.clear();
933 offsetInShared.clear();
934 for (int p = 0; p < mpiSize; p++) {
935 if (snd_len[p] > 0) {
936 MPI_Irecv(&shared[n], snd_len[p], MPI_DIM_T, p,
937 MPIInfo->counter()+p, MPIInfo->comm,
938 &mpi_requests[count]);
939 count++;
940 neighbour.push_back(p);
941 offsetInShared.push_back(n);
942 n += snd_len[p];
943 }
944 }
945 MPIInfo->incCounter(MPIInfo->size);
946 MPI_Waitall(count, &mpi_requests[0], &mpi_stati[0]);
947 offsetInShared.push_back(n);
948
949 // map global IDs to local IDs
950 #pragma omp parallel for
951 for (index_t i = 0; i < n; ++i) {
952 shared[i] = locDOFMask[shared[i] - min_DOF];
953 }
954 #endif // ESYS_MPI
955
956 #ifdef ESYS_HAVE_PASO
957 paso::SharedComponents_ptr snd_shcomp(new paso::SharedComponents(
958 myLastDOF - myFirstDOF, neighbour, p, offsetInShared));
959
960 if (use_reduced_elements) {
961 reducedDegreesOfFreedomConnector.reset(new paso::Connector(snd_shcomp, rcv_shcomp));
962 } else {
963 degreesOfFreedomConnector.reset(new paso::Connector(snd_shcomp, rcv_shcomp));
964 }
965 #endif // ESYS_HAVE_PASO
966
967 #ifdef ESYS_HAVE_TRILINOS
968 using namespace esys_trilinos;
969
970 const dim_t myNumTargets = myLastDOF - myFirstDOF;
971 const dim_t numTargets = mapping.getNumTargets();
972 IndexVector myRows(myNumTargets);
973 IndexVector columns(numTargets);
974 const IndexVector& dofMap = mapping.map;
975
976 #pragma omp parallel
977 {
978 #pragma omp for nowait
979 for (size_t i = 0; i < myNumTargets; i++) {
980 myRows[i] = globalDOFIndex[dofMap[i]];
981 }
982 #pragma omp for
983 for (size_t i = 0; i < numTargets; i++) {
984 columns[i] = globalDOFIndex[dofMap[i]];
985 }
986 } // end parallel section
987
988 const dim_t numTotal = dofDistribution->getGlobalNumComponents();
989 if (use_reduced_elements) {
990 trilinosReducedRowMap.reset(new MapType(numTotal, myRows, 0,
991 TeuchosCommFromEsysComm(MPIInfo->comm)));
992 trilinosReducedColMap.reset(new MapType(numTotal, columns, 0,
993 TeuchosCommFromEsysComm(MPIInfo->comm)));
994 } else {
995 trilinosRowMap.reset(new MapType(numTotal, myRows, 0,
996 TeuchosCommFromEsysComm(MPIInfo->comm)));
997 trilinosColMap.reset(new MapType(numTotal, columns, 0,
998 TeuchosCommFromEsysComm(MPIInfo->comm)));
999 }
1000 #endif // ESYS_HAVE_TRILINOS
1001 }
1002
1003 void NodeFile::createNodeMappings(const IndexVector& indexReducedNodes,
1004 const IndexVector& dofDist,
1005 const IndexVector& nodeDist)
1006 {
1007 const int mpiSize = MPIInfo->size;
1008 const int myRank = MPIInfo->rank;
1009
1010 const index_t myFirstDOF = dofDist[myRank];
1011 const index_t myLastDOF = dofDist[myRank+1];
1012 const index_t myNumDOF = myLastDOF-myFirstDOF;
1013
1014 const index_t myFirstNode = nodeDist[myRank];
1015 const index_t myLastNode = nodeDist[myRank+1];
1016 const index_t myNumNodes = myLastNode-myFirstNode;
1017
1018 std::vector<short> maskMyReducedDOF(myNumDOF, -1);
1019 std::vector<short> maskMyReducedNodes(myNumNodes, -1);
1020 const index_t iRNsize = indexReducedNodes.size();
1021
1022 // mark the nodes used by the reduced mesh
1023 #pragma omp parallel for
1024 for (index_t i = 0; i < iRNsize; ++i) {
1025 index_t k = globalNodesIndex[indexReducedNodes[i]];
1026 if (k >= myFirstNode && myLastNode > k)
1027 maskMyReducedNodes[k - myFirstNode] = 1;
1028 k = globalDegreesOfFreedom[indexReducedNodes[i]];
1029 if (k >= myFirstDOF && myLastDOF > k) {
1030 maskMyReducedDOF[k - myFirstDOF] = 1;
1031 }
1032 }
1033 IndexVector indexMyReducedDOF = util::packMask(maskMyReducedDOF);
1034 index_t myNumReducedDOF = indexMyReducedDOF.size();
1035 IndexVector indexMyReducedNodes = util::packMask(maskMyReducedNodes);
1036 index_t myNumReducedNodes = indexMyReducedNodes.size();
1037
1038 IndexVector rdofDist(mpiSize+1);
1039 IndexVector rnodeDist(mpiSize+1);
1040 #ifdef ESYS_MPI
1041 MPI_Allgather(&myNumReducedNodes, 1, MPI_DIM_T, &rnodeDist[0], 1, MPI_DIM_T, MPIInfo->comm);
1042 MPI_Allgather(&myNumReducedDOF, 1, MPI_DIM_T, &rdofDist[0], 1, MPI_DIM_T, MPIInfo->comm);
1043 #else
1044 rnodeDist[0] = myNumReducedNodes;
1045 rdofDist[0] = myNumReducedDOF;
1046 #endif
1047 index_t globalNumReducedNodes = 0;
1048 index_t globalNumReducedDOF = 0;
1049 for (int i = 0; i < mpiSize; ++i) {
1050 index_t k = rnodeDist[i];
1051 rnodeDist[i] = globalNumReducedNodes;
1052 globalNumReducedNodes += k;
1053
1054 k = rdofDist[i];
1055 rdofDist[i] = globalNumReducedDOF;
1056 globalNumReducedDOF += k;
1057 }
1058 rnodeDist[mpiSize] = globalNumReducedNodes;
1059 rdofDist[mpiSize] = globalNumReducedDOF;
1060
1061 // ==== distribution of Nodes ====
1062 nodesDistribution.reset(new escript::Distribution(MPIInfo, nodeDist));
1063
1064 // ==== distribution of DOFs ====
1065 degreesOfFreedomDistribution.reset(new escript::Distribution(MPIInfo, dofDist));
1066
1067 // ==== distribution of reduced Nodes ====
1068 reducedNodesDistribution.reset(new escript::Distribution(MPIInfo, rnodeDist));
1069
1070 // ==== distribution of reduced DOF ====
1071 reducedDegreesOfFreedomDistribution.reset(new escript::Distribution(
1072 MPIInfo, rdofDist));
1073
1074 IndexVector nodeMask(numNodes);
1075 const index_t UNUSED = -1;
1076
1077 // ==== nodes mapping (dummy) ====
1078 #pragma omp parallel for
1079 for (index_t i = 0; i < numNodes; ++i)
1080 nodeMask[i] = i;
1081 nodesMapping.assign(nodeMask, UNUSED);
1082
1083 // ==== mapping between nodes and reduced nodes ====
1084 #pragma omp parallel for
1085 for (index_t i = 0; i < numNodes; ++i)
1086 nodeMask[i] = UNUSED;
1087 #pragma omp parallel for
1088 for (index_t i = 0; i < iRNsize; ++i)
1089 nodeMask[indexReducedNodes[i]] = i;
1090 reducedNodesMapping.assign(nodeMask, UNUSED);
1091
1092 // ==== mapping between nodes and DOFs + DOF connector
1093 createDOFMappingAndCoupling(false);
1094 // ==== mapping between nodes and reduced DOFs + reduced DOF connector
1095 createDOFMappingAndCoupling(true);
1096
1097 // get the Ids for DOFs and reduced nodes
1098 const index_t rnTargets = reducedNodesMapping.getNumTargets();
1099 const index_t dofTargets = degreesOfFreedomMapping.getNumTargets();
1100 const index_t rdofTargets = reducedDegreesOfFreedomMapping.getNumTargets();
1101 #pragma omp parallel
1102 {
1103 #pragma omp for nowait
1104 for (index_t i = 0; i < rnTargets; ++i)
1105 reducedNodesId[i] = Id[reducedNodesMapping.map[i]];
1106 #pragma omp for nowait
1107 for (index_t i = 0; i < dofTargets; ++i)
1108 degreesOfFreedomId[i] = Id[degreesOfFreedomMapping.map[i]];
1109 #pragma omp for
1110 for (index_t i = 0; i < rdofTargets; ++i)
1111 reducedDegreesOfFreedomId[i] = Id[reducedDegreesOfFreedomMapping.map[i]];
1112 }
1113 }
1114
1115 } // namespace finley
1116

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision
svn:mergeinfo /branches/4.0fordebian/finley/src/NodeFile.cpp:5567-5588 /branches/diaplayground/finley/src/NodeFile.cpp:4940-5147 /branches/lapack2681/finley/src/NodeFile.cpp:2682-2741 /branches/pasowrap/finley/src/NodeFile.cpp:3661-3674 /branches/py3_attempt2/finley/src/NodeFile.cpp:3871-3891 /branches/restext/finley/src/NodeFile.cpp:2610-2624 /branches/ripleygmg_from_3668/finley/src/NodeFile.cpp:3669-3791 /branches/stage3.0/finley/src/NodeFile.cpp:2569-2590 /branches/symbolic_from_3470/finley/src/NodeFile.cpp:3471-3974 /branches/trilinos_from_5897/finley/src/NodeFile.cpp:5898-6118 /release/3.0/finley/src/NodeFile.cpp:2591-2601 /release/4.0/finley/src/NodeFile.cpp:5380-5406 /trunk/finley/src/NodeFile.cpp:4257-4344

  ViewVC Help
Powered by ViewVC 1.1.26