/[escript]/trunk/finley/src/NodeFile.cpp
ViewVC logotype

Contents of /trunk/finley/src/NodeFile.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 4626 - (show annotations)
Wed Jan 22 06:07:34 2014 UTC (5 years, 7 months ago) by caltinay
File size: 40732 byte(s)
Eliminated all const_cast<Data*> hacks in ripley and finley now that
Data.getSampleDataRO returns a const pointer.

1
2 /*****************************************************************************
3 *
4 * Copyright (c) 2003-2013 by University of Queensland
5 * http://www.uq.edu.au
6 *
7 * Primary Business: Queensland, Australia
8 * Licensed under the Open Software License version 3.0
9 * http://www.opensource.org/licenses/osl-3.0.php
10 *
11 * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12 * Development since 2012 by School of Earth Sciences
13 *
14 *****************************************************************************/
15
16
17 /****************************************************************************
18
19 Finley: NodeFile
20
21 *****************************************************************************/
22
23 #include "NodeFile.h"
24 #include <escript/Data.h>
25 #include <paso/Coupler.h>
26
27 #include <limits>
28 #include <sstream>
29
30 namespace finley {
31
32 // helper function
33 static void scatterEntries(int n, int* index, int min_index, int max_index,
34 int* Id_out, int* Id_in, int* Tag_out, int* Tag_in,
35 int* globalDegreesOfFreedom_out,
36 int* globalDegreesOfFreedom_in,
37 int numDim, double* Coordinates_out,
38 double* Coordinates_in)
39 {
40 const int range = max_index-min_index;
41 const size_t numDim_size = numDim*sizeof(double);
42
43 #pragma omp parallel for
44 for (int i=0; i<n; i++) {
45 const int k=index[i]-min_index;
46 if ((k>=0) && (k<range)) {
47 Id_out[k]=Id_in[i];
48 Tag_out[k]=Tag_in[i];
49 globalDegreesOfFreedom_out[k]=globalDegreesOfFreedom_in[i];
50 memcpy(&(Coordinates_out[INDEX2(0,k,numDim)]),
51 &(Coordinates_in[INDEX2(0,i,numDim)]), numDim_size);
52 }
53 }
54 }
55
56 // helper function
57 static void gatherEntries(int n, const int* index, int min_index, int max_index,
58 int* Id_out, int* Id_in, int* Tag_out, int* Tag_in,
59 int* globalDegreesOfFreedom_out,
60 int* globalDegreesOfFreedom_in,
61 int numDim, double* Coordinates_out,
62 double* Coordinates_in)
63 {
64 const int range = max_index-min_index;
65 const size_t numDim_size = numDim*sizeof(double);
66
67 #pragma omp parallel for
68 for (int i=0; i<n; i++) {
69 const int k=index[i]-min_index;
70 if ((k>=0) && (k<range)) {
71 Id_out[i]=Id_in[k];
72 Tag_out[i]=Tag_in[k];
73 globalDegreesOfFreedom_out[i]=globalDegreesOfFreedom_in[k];
74 memcpy(&(Coordinates_out[INDEX2(0,i,numDim)]),
75 &(Coordinates_in[INDEX2(0,k,numDim)]), numDim_size);
76 }
77 }
78 }
79
80 /// constructor
81 /// use NodeFile::allocTable to allocate the node table (Id,Coordinates)
82 NodeFile::NodeFile(int nDim, Esys_MPIInfo *mpiInfo) :
83 numNodes(0),
84 numDim(nDim),
85 Id(NULL),
86 Tag(NULL),
87 globalDegreesOfFreedom(NULL),
88 Coordinates(NULL),
89 globalReducedDOFIndex(NULL),
90 globalReducedNodesIndex(NULL),
91 globalNodesIndex(NULL),
92 nodesDistribution(NULL),
93 reducedNodesDistribution(NULL),
94 degreesOfFreedomDistribution(NULL),
95 reducedDegreesOfFreedomDistribution(NULL),
96 degreesOfFreedomConnector(NULL),
97 reducedDegreesOfFreedomConnector(NULL),
98 reducedNodesId(NULL),
99 degreesOfFreedomId(NULL),
100 reducedDegreesOfFreedomId(NULL),
101 status(FINLEY_INITIAL_STATUS)
102 {
103 MPIInfo = Esys_MPIInfo_getReference(mpiInfo);
104 }
105
106 /// destructor
107 NodeFile::~NodeFile()
108 {
109 freeTable();
110 Esys_MPIInfo_free(MPIInfo);
111 }
112
113 /// allocates the node table within this node file to hold NN nodes.
114 void NodeFile::allocTable(int NN)
115 {
116 if (numNodes>0)
117 freeTable();
118
119 Id=new int[NN];
120 Coordinates=new double[NN*numDim];
121 Tag=new int[NN];
122 globalDegreesOfFreedom=new int[NN];
123 globalReducedDOFIndex=new int[NN];
124 globalReducedNodesIndex=new int[NN];
125 globalNodesIndex=new int[NN];
126 reducedNodesId=new int[NN];
127 degreesOfFreedomId=new int[NN];
128 reducedDegreesOfFreedomId=new int[NN];
129 numNodes=NN;
130
131 // this initialization makes sure that data are located on the right
132 // processor
133 #pragma omp parallel for
134 for (int n=0; n<numNodes; n++) {
135 Id[n]=-1;
136 for (int i=0; i<numDim; i++)
137 Coordinates[INDEX2(i,n,numDim)]=0.;
138 Tag[n]=-1;
139 globalDegreesOfFreedom[n]=-1;
140 globalReducedDOFIndex[n]=-1;
141 globalReducedNodesIndex[n]=-1;
142 globalNodesIndex[n]=-1;
143 reducedNodesId[n]=-1;
144 degreesOfFreedomId[n]=-1;
145 reducedDegreesOfFreedomId[n]=-1;
146 }
147 }
148
149 /// frees the node table within this node file
150 void NodeFile::freeTable()
151 {
152 delete[] Id;
153 delete[] Coordinates;
154 delete[] globalDegreesOfFreedom;
155 delete[] globalReducedDOFIndex;
156 delete[] globalReducedNodesIndex;
157 delete[] globalNodesIndex;
158 delete[] Tag;
159 delete[] reducedNodesId;
160 delete[] degreesOfFreedomId;
161 delete[] reducedDegreesOfFreedomId;
162 tagsInUse.clear();
163 nodesMapping.clear();
164 reducedNodesMapping.clear();
165 degreesOfFreedomMapping.clear();
166 reducedDegreesOfFreedomMapping.clear();
167 Paso_Distribution_free(nodesDistribution);
168 nodesDistribution=NULL;
169 Paso_Distribution_free(reducedNodesDistribution);
170 nodesDistribution=NULL;
171 Paso_Distribution_free(degreesOfFreedomDistribution);
172 degreesOfFreedomDistribution=NULL;
173 Paso_Distribution_free(reducedDegreesOfFreedomDistribution);
174 reducedDegreesOfFreedomDistribution=NULL;
175 Paso_Connector_free(degreesOfFreedomConnector);
176 degreesOfFreedomConnector=NULL;
177 Paso_Connector_free(reducedDegreesOfFreedomConnector);
178 reducedDegreesOfFreedomConnector=NULL;
179
180 numNodes=0;
181 }
182
183 void NodeFile::print() const
184 {
185 std::cout << "=== " << numDim << "D-Nodes:\nnumber of nodes=" << numNodes
186 << std::endl;
187 std::cout << "Id,Tag,globalDegreesOfFreedom,degreesOfFreedom,reducedDegreesOfFeedom,node,reducedNode,Coordinates" << std::endl;
188 for (int i=0; i<numNodes; i++) {
189 std::cout << Id[i] << "," << Tag[i] << "," << globalDegreesOfFreedom[i]
190 << "," << degreesOfFreedomMapping.target[i]
191 << "," << reducedDegreesOfFreedomMapping.target[i]
192 << "," << nodesMapping.target[i] << reducedNodesMapping.target[i]
193 << " ";
194 std::cout.precision(15);
195 std::cout.setf(std::ios::scientific, std::ios::floatfield);
196 for (int j=0; j<numDim; j++)
197 std:: cout << Coordinates[INDEX2(j,i,numDim)];
198 std::cout << std::endl;
199 }
200 }
201
202 /// copies the array newX into this->coordinates
203 void NodeFile::setCoordinates(const escript::Data& newX)
204 {
205 if (newX.getDataPointSize() != numDim) {
206 std::stringstream ss;
207 ss << "NodeFile::setCoordinates: number of dimensions of new "
208 "coordinates has to be " << numDim;
209 const std::string errorMsg(ss.str());
210 setError(VALUE_ERROR, errorMsg.c_str());
211 } else if (newX.getNumDataPointsPerSample() != 1 ||
212 newX.getNumSamples() != numNodes) {
213 std::stringstream ss;
214 ss << "NodeFile::setCoordinates: number of given nodes must be "
215 << numNodes;
216 const std::string errorMsg(ss.str());
217 setError(VALUE_ERROR, errorMsg.c_str());
218 } else {
219 const size_t numDim_size=numDim*sizeof(double);
220 ++status;
221 #pragma omp parallel for
222 for (int n=0; n<numNodes; n++) {
223 memcpy(&(Coordinates[INDEX2(0,n,numDim)]), newX.getSampleDataRO(n), numDim_size);
224 }
225 }
226 }
227
228 /// sets tags to newTag where mask>0
229 void NodeFile::setTags(const int newTag, const escript::Data& mask)
230 {
231 resetError();
232
233 if (1 != mask.getDataPointSize()) {
234 setError(TYPE_ERROR, "NodeFile::setTags: number of components of mask must be 1.");
235 return;
236 } else if (mask.getNumDataPointsPerSample() != 1 ||
237 mask.getNumSamples() != numNodes) {
238 setError(TYPE_ERROR, "NodeFile::setTags: illegal number of samples of mask Data object");
239 return;
240 }
241
242 #pragma omp parallel for
243 for (int n=0; n<numNodes; n++) {
244 if (mask.getSampleDataRO(n)[0] > 0)
245 Tag[n]=newTag;
246 }
247 updateTagList();
248 }
249
250 std::pair<int,int> NodeFile::getDOFRange() const
251 {
252 std::pair<int,int> result(util::getMinMaxInt(
253 1, numNodes, globalDegreesOfFreedom));
254 if (result.second < result.first) {
255 result.first = -1;
256 result.second = 0;
257 }
258 return result;
259 }
260
261 std::pair<int,int> NodeFile::getGlobalIdRange() const
262 {
263 std::pair<int,int> result(util::getMinMaxInt(1, numNodes, Id));
264
265 #ifdef ESYS_MPI
266 int global_id_range[2];
267 int id_range[2] = { -result.first, result.second };
268 MPI_Allreduce(id_range, global_id_range, 2, MPI_INT, MPI_MAX, MPIInfo->comm);
269 result.first = -global_id_range[0];
270 result.second = global_id_range[1];
271 #endif
272 if (result.second < result.first) {
273 result.first = -1;
274 result.second = 0;
275 }
276 return result;
277 }
278
279 std::pair<int,int> NodeFile::getGlobalDOFRange() const
280 {
281 std::pair<int,int> result(util::getMinMaxInt(
282 1, numNodes, globalDegreesOfFreedom));
283
284 #ifdef ESYS_MPI
285 int global_id_range[2];
286 int id_range[2] = { -result.first, result.second };
287 MPI_Allreduce(id_range, global_id_range, 2, MPI_INT, MPI_MAX, MPIInfo->comm);
288 result.first = -global_id_range[0];
289 result.second = global_id_range[1];
290 #endif
291 if (result.second < result.first) {
292 result.first = -1;
293 result.second = 0;
294 }
295 return result;
296 }
297
298 std::pair<int,int> NodeFile::getGlobalNodeIDIndexRange() const
299 {
300 std::pair<int,int> result(util::getMinMaxInt(1, numNodes, globalNodesIndex));
301
302 #ifdef ESYS_MPI
303 int global_id_range[2];
304 int id_range[2] = { -result.first, result.second };
305 MPI_Allreduce(id_range, global_id_range, 2, MPI_INT, MPI_MAX, MPIInfo->comm);
306 result.first = -global_id_range[0];
307 result.second = global_id_range[1];
308 #endif
309 if (result.second < result.first) {
310 result.first = -1;
311 result.second = 0;
312 }
313 return result;
314 }
315
316 void NodeFile::copyTable(int offset, int idOffset, int dofOffset,
317 const NodeFile* in)
318 {
319 // check number of dimensions and table size
320 if (numDim != in->numDim) {
321 setError(TYPE_ERROR, "NodeFile::copyTable: dimensions of node files don't match");
322 return;
323 }
324 if (numNodes < in->numNodes+offset) {
325 setError(MEMORY_ERROR, "NodeFile::copyTable: node table is too small.");
326 return;
327 }
328
329 #pragma omp parallel for
330 for (int n=0; n<in->numNodes; n++) {
331 Id[offset+n]=in->Id[n]+idOffset;
332 Tag[offset+n]=in->Tag[n];
333 globalDegreesOfFreedom[offset+n]=in->globalDegreesOfFreedom[n]+dofOffset;
334 for(int i=0; i<numDim; i++)
335 Coordinates[INDEX2(i, offset+n, numDim)] =
336 in->Coordinates[INDEX2(i, n, in->numDim)];
337 }
338 }
339
340 /// scatters the NodeFile in into this NodeFile using index[0:in->numNodes-1].
341 /// index has to be between 0 and numNodes-1.
342 /// colouring is chosen for the worst case
343 void NodeFile::scatter(int* index, const NodeFile* in)
344 {
345 scatterEntries(numNodes, index, 0, in->numNodes, Id, in->Id, Tag, in->Tag,
346 globalDegreesOfFreedom, in->globalDegreesOfFreedom,
347 numDim, Coordinates, in->Coordinates);
348 }
349
350 /// gathers this NodeFile from the NodeFile 'in' using the entries in
351 /// index[0:out->numNodes-1] which are between min_index and max_index
352 /// (exclusive)
353 void NodeFile::gather(int* index, const NodeFile* in)
354 {
355 const std::pair<int,int> id_range(in->getGlobalIdRange());
356 gatherEntries(numNodes, index, id_range.first, id_range.second, Id, in->Id,
357 Tag, in->Tag, globalDegreesOfFreedom, in->globalDegreesOfFreedom,
358 numDim, Coordinates, in->Coordinates);
359 }
360
361 void NodeFile::gather_global(const std::vector<int>& index, const NodeFile* in)
362 {
363 // get the global range of node ids
364 const std::pair<int,int> id_range(in->getGlobalIdRange());
365 const int undefined_node=id_range.first-1;
366 std::vector<int> distribution(in->MPIInfo->size+1);
367
368 // distribute the range of node ids
369 int buffer_len=Esys_MPIInfo_setDistribution(in->MPIInfo,
370 id_range.first, id_range.second, &distribution[0]);
371
372 // allocate buffers
373 int *Id_buffer=new int[buffer_len];
374 int *Tag_buffer=new int[buffer_len];
375 int *globalDegreesOfFreedom_buffer=new int[buffer_len];
376 double *Coordinates_buffer=new double[buffer_len*numDim];
377
378 // fill Id_buffer by the undefined_node marker to check if nodes
379 // are defined
380 #pragma omp parallel for
381 for (int n=0; n<buffer_len; n++)
382 Id_buffer[n]=undefined_node;
383
384 // fill the buffer by sending portions around in a circle
385 #ifdef ESYS_MPI
386 MPI_Status status;
387 int dest=Esys_MPIInfo_mod(in->MPIInfo->size, in->MPIInfo->rank+1);
388 int source=Esys_MPIInfo_mod(in->MPIInfo->size, in->MPIInfo->rank-1);
389 #endif
390 int buffer_rank=in->MPIInfo->rank;
391 for (int p=0; p<in->MPIInfo->size; ++p) {
392 if (p>0) { // the initial send can be skipped
393 #ifdef ESYS_MPI
394 MPI_Sendrecv_replace(Id_buffer, buffer_len, MPI_INT, dest,
395 in->MPIInfo->msg_tag_counter, source,
396 in->MPIInfo->msg_tag_counter, in->MPIInfo->comm, &status);
397 MPI_Sendrecv_replace(Tag_buffer, buffer_len, MPI_INT, dest,
398 in->MPIInfo->msg_tag_counter+1, source,
399 in->MPIInfo->msg_tag_counter+1, in->MPIInfo->comm, &status);
400 MPI_Sendrecv_replace(globalDegreesOfFreedom_buffer, buffer_len,
401 MPI_INT, dest, in->MPIInfo->msg_tag_counter+2, source,
402 in->MPIInfo->msg_tag_counter+2, in->MPIInfo->comm, &status);
403 MPI_Sendrecv_replace(Coordinates_buffer, buffer_len*numDim,
404 MPI_DOUBLE, dest, in->MPIInfo->msg_tag_counter+3, source,
405 in->MPIInfo->msg_tag_counter+3, in->MPIInfo->comm, &status);
406 #endif
407 ESYS_MPI_INC_COUNTER(*(in->MPIInfo), 4)
408 }
409 buffer_rank=Esys_MPIInfo_mod(in->MPIInfo->size, buffer_rank-1);
410 scatterEntries(in->numNodes, in->Id, distribution[buffer_rank],
411 distribution[buffer_rank+1], Id_buffer, in->Id,
412 Tag_buffer, in->Tag, globalDegreesOfFreedom_buffer,
413 in->globalDegreesOfFreedom, numDim, Coordinates_buffer,
414 in->Coordinates);
415 }
416 // now entries are collected from the buffer again by sending the
417 // entries around in a circle
418 #ifdef ESYS_MPI
419 dest=Esys_MPIInfo_mod(in->MPIInfo->size, in->MPIInfo->rank+1);
420 source=Esys_MPIInfo_mod(in->MPIInfo->size, in->MPIInfo->rank-1);
421 #endif
422 buffer_rank=in->MPIInfo->rank;
423 for (int p=0; p<in->MPIInfo->size; ++p) {
424 gatherEntries(numNodes, &index[0], distribution[buffer_rank],
425 distribution[buffer_rank+1], Id, Id_buffer, Tag, Tag_buffer,
426 globalDegreesOfFreedom, globalDegreesOfFreedom_buffer, numDim,
427 Coordinates, Coordinates_buffer);
428 if (p < in->MPIInfo->size-1) { // the last send can be skipped
429 #ifdef ESYS_MPI
430 MPI_Sendrecv_replace(Id_buffer, buffer_len, MPI_INT, dest,
431 in->MPIInfo->msg_tag_counter, source,
432 in->MPIInfo->msg_tag_counter, in->MPIInfo->comm, &status);
433 MPI_Sendrecv_replace(Tag_buffer, buffer_len, MPI_INT, dest,
434 in->MPIInfo->msg_tag_counter+1, source,
435 in->MPIInfo->msg_tag_counter+1, in->MPIInfo->comm, &status);
436 MPI_Sendrecv_replace(globalDegreesOfFreedom_buffer, buffer_len,
437 MPI_INT, dest, in->MPIInfo->msg_tag_counter+2, source,
438 in->MPIInfo->msg_tag_counter+2, in->MPIInfo->comm, &status);
439 MPI_Sendrecv_replace(Coordinates_buffer, buffer_len*numDim,
440 MPI_DOUBLE, dest, in->MPIInfo->msg_tag_counter+3, source,
441 in->MPIInfo->msg_tag_counter+3, in->MPIInfo->comm, &status);
442 #endif
443 ESYS_MPI_INC_COUNTER(*(in->MPIInfo), 4)
444 }
445 buffer_rank=Esys_MPIInfo_mod(in->MPIInfo->size, buffer_rank-1);
446 }
447 // check if all nodes are set:
448 #pragma omp parallel for
449 for (int n=0; n<numNodes; ++n) {
450 if (Id[n] == undefined_node) {
451 std::stringstream ss;
452 ss << "NodeFile::gather_global: Node id " << Id[n]
453 << " at position " << n << " is referenced but not defined.";
454 const std::string errorMsg(ss.str());
455 setError(VALUE_ERROR, errorMsg.c_str());
456 }
457 }
458 delete[] Id_buffer;
459 delete[] Tag_buffer;
460 delete[] globalDegreesOfFreedom_buffer;
461 delete[] Coordinates_buffer;
462 // make sure that the error is global
463 Esys_MPIInfo_noError(in->MPIInfo);
464 }
465
466 void NodeFile::assignMPIRankToDOFs(std::vector<int>& mpiRankOfDOF,
467 const std::vector<int>& distribution)
468 {
469 Esys_MPI_rank p_min=MPIInfo->size, p_max=-1;
470 // first we retrieve the min and max DOF on this processor to reduce
471 // costs for searching
472 const std::pair<int,int> dof_range(getDOFRange());
473
474 for (int p=0; p<MPIInfo->size; ++p) {
475 if (distribution[p]<=dof_range.first) p_min=p;
476 if (distribution[p]<=dof_range.second) p_max=p;
477 }
478 #pragma omp parallel for
479 for (int n=0; n<numNodes; ++n) {
480 const int k=globalDegreesOfFreedom[n];
481 for (int p=p_min; p<=p_max; ++p) {
482 if (k < distribution[p+1]) {
483 mpiRankOfDOF[n]=p;
484 break;
485 }
486 }
487 }
488 }
489
490 int NodeFile::prepareLabeling(const std::vector<short>& mask,
491 std::vector<int>& buffer,
492 std::vector<int>& distribution, bool useNodes)
493 {
494 const int UNSET_ID=-1,SET_ID=1;
495
496 // get the global range of DOF/node ids
497 std::pair<int,int> idRange(useNodes ?
498 getGlobalNodeIDIndexRange() : getGlobalDOFRange());
499 const int* indexArray = (useNodes ? globalNodesIndex : globalDegreesOfFreedom);
500 // distribute the range of node ids
501 distribution.assign(MPIInfo->size+1, 0);
502 int buffer_len=Esys_MPIInfo_setDistribution(MPIInfo, idRange.first,
503 idRange.second, &distribution[0]);
504 const int myCount=distribution[MPIInfo->rank+1]-distribution[MPIInfo->rank];
505
506 // fill buffer by the UNSET_ID marker to check if nodes are defined
507 buffer.assign(buffer_len, UNSET_ID);
508
509 // fill the buffer by sending portions around in a circle
510 #ifdef ESYS_MPI
511 MPI_Status status;
512 int dest=Esys_MPIInfo_mod(MPIInfo->size, MPIInfo->rank + 1);
513 int source=Esys_MPIInfo_mod(MPIInfo->size, MPIInfo->rank - 1);
514 #endif
515 int buffer_rank=MPIInfo->rank;
516 for (int p=0; p<MPIInfo->size; ++p) {
517 if (p>0) { // the initial send can be skipped
518 #ifdef ESYS_MPI
519 MPI_Sendrecv_replace(&buffer[0], buffer.size(), MPI_INT, dest,
520 MPIInfo->msg_tag_counter, source, MPIInfo->msg_tag_counter,
521 MPIInfo->comm, &status);
522 #endif
523 MPIInfo->msg_tag_counter++;
524 }
525 buffer_rank=Esys_MPIInfo_mod(MPIInfo->size, buffer_rank-1);
526 const int id0=distribution[buffer_rank];
527 const int id1=distribution[buffer_rank+1];
528 #pragma omp parallel for
529 for (int n=0; n<numNodes; n++) {
530 if (mask.size()<numNodes || mask[n]>-1) {
531 const int k=indexArray[n];
532 if (id0<=k && k<id1) {
533 buffer[k-id0] = SET_ID;
534 }
535 }
536 }
537 }
538 // count the entries in the buffer
539 // TODO: OMP parallel
540 int myNewCount=0;
541 for (int n=0; n<myCount; ++n) {
542 if (buffer[n] == SET_ID) {
543 buffer[n]=myNewCount;
544 myNewCount++;
545 }
546 }
547 return myNewCount;
548 }
549
550 int NodeFile::createDenseDOFLabeling()
551 {
552 std::vector<int> DOF_buffer;
553 std::vector<int> distribution;
554 std::vector<int> loc_offsets(MPIInfo->size);
555 std::vector<int> offsets(MPIInfo->size);
556 int new_numGlobalDOFs=0;
557
558 // retrieve the number of own DOFs and fill buffer
559 loc_offsets[MPIInfo->rank]=prepareLabeling(std::vector<short>(),
560 DOF_buffer, distribution, false);
561 #ifdef ESYS_MPI
562 MPI_Allreduce(&loc_offsets[0], &offsets[0], MPIInfo->size, MPI_INT,
563 MPI_SUM, MPIInfo->comm);
564 for (int n=0; n<MPIInfo->size; ++n) {
565 loc_offsets[n]=new_numGlobalDOFs;
566 new_numGlobalDOFs+=offsets[n];
567 }
568 #else
569 new_numGlobalDOFs=loc_offsets[0];
570 loc_offsets[0]=0;
571 #endif
572
573 const int myDOFs=distribution[MPIInfo->rank+1]-distribution[MPIInfo->rank];
574 #pragma omp parallel for
575 for (int n=0; n<myDOFs; ++n)
576 DOF_buffer[n]+=loc_offsets[MPIInfo->rank];
577
578 std::vector<unsigned char> set_new_DOF(numNodes, true);
579
580 // now entries are collected from the buffer again by sending them around
581 // in a circle
582 #ifdef ESYS_MPI
583 int dest=Esys_MPIInfo_mod(MPIInfo->size, MPIInfo->rank + 1);
584 int source=Esys_MPIInfo_mod(MPIInfo->size, MPIInfo->rank - 1);
585 #endif
586 int buffer_rank=MPIInfo->rank;
587 for (int p=0; p<MPIInfo->size; ++p) {
588 const int dof0=distribution[buffer_rank];
589 const int dof1=distribution[buffer_rank+1];
590 #pragma omp parallel for
591 for (int n=0; n<numNodes; n++) {
592 const int k=globalDegreesOfFreedom[n];
593 if (set_new_DOF[n] && dof0<=k && k<dof1) {
594 globalDegreesOfFreedom[n]=DOF_buffer[k-dof0];
595 set_new_DOF[n]=false;
596 }
597 }
598 if (p<MPIInfo->size-1) { // the last send can be skipped
599 #ifdef ESYS_MPI
600 MPI_Status status;
601 MPI_Sendrecv_replace(&DOF_buffer[0], DOF_buffer.size(), MPI_INT,
602 dest, MPIInfo->msg_tag_counter, source,
603 MPIInfo->msg_tag_counter, MPIInfo->comm, &status);
604 #endif
605 ESYS_MPI_INC_COUNTER(*MPIInfo, 1)
606 }
607 buffer_rank=Esys_MPIInfo_mod(MPIInfo->size, buffer_rank-1);
608 }
609
610 return new_numGlobalDOFs;
611 }
612
613 int NodeFile::createDenseNodeLabeling(std::vector<int>& nodeDistribution,
614 const std::vector<int>& dofDistribution)
615 {
616 const int UNSET_ID=-1, SET_ID=1;
617 const int myFirstDOF=dofDistribution[MPIInfo->rank];
618 const int myLastDOF=dofDistribution[MPIInfo->rank+1];
619
620 // find the range of node ids controlled by me
621 int min_id=std::numeric_limits<int>::max();
622 int max_id=std::numeric_limits<int>::min();
623 #pragma omp parallel
624 {
625 int loc_max_id=max_id;
626 int loc_min_id=min_id;
627 #pragma omp for
628 for (int n=0; n<numNodes; n++) {
629 const int dof=globalDegreesOfFreedom[n];
630 if (myFirstDOF<=dof && dof<myLastDOF) {
631 loc_max_id=std::max(loc_max_id, Id[n]);
632 loc_min_id=std::min(loc_min_id, Id[n]);
633 }
634 }
635 #pragma omp critical
636 {
637 max_id=std::max(loc_max_id, max_id);
638 min_id=std::min(loc_min_id, min_id);
639 }
640 }
641 int my_buffer_len = (max_id>=min_id ? max_id-min_id+1 : 0);
642 int buffer_len;
643
644 #ifdef ESYS_MPI
645 MPI_Allreduce(&my_buffer_len, &buffer_len, 1, MPI_INT, MPI_MAX,
646 MPIInfo->comm);
647 #else
648 buffer_len=my_buffer_len;
649 #endif
650
651 const int header_len=2;
652 std::vector<int> Node_buffer(buffer_len+header_len, UNSET_ID);
653 // extra storage for these IDs
654 Node_buffer[0]=min_id;
655 Node_buffer[1]=max_id;
656
657 // mark and count the nodes in use
658 #pragma omp parallel for
659 for (int n=0; n<numNodes; n++) {
660 globalNodesIndex[n]=-1;
661 const int dof=globalDegreesOfFreedom[n];
662 if (myFirstDOF<=dof && dof<myLastDOF)
663 Node_buffer[Id[n]-min_id+header_len]=SET_ID;
664 }
665 int myNewNumNodes=0;
666 for (int n=0; n<my_buffer_len; n++) {
667 if (Node_buffer[header_len+n]==SET_ID) {
668 Node_buffer[header_len+n]=myNewNumNodes;
669 myNewNumNodes++;
670 }
671 }
672 // make the local number of nodes globally available
673 #ifdef ESYS_MPI
674 MPI_Allgather(&myNewNumNodes, 1, MPI_INT, &nodeDistribution[0], 1, MPI_INT,
675 MPIInfo->comm);
676 #else
677 nodeDistribution[0]=myNewNumNodes;
678 #endif
679
680 int globalNumNodes=0;
681 for (int p=0; p<MPIInfo->size; ++p) {
682 const int itmp=nodeDistribution[p];
683 nodeDistribution[p]=globalNumNodes;
684 globalNumNodes+=itmp;
685 }
686 nodeDistribution[MPIInfo->size]=globalNumNodes;
687
688 // offset node buffer
689 #pragma omp parallel for
690 for (int n=0; n<my_buffer_len; n++)
691 Node_buffer[n+header_len]+=nodeDistribution[MPIInfo->rank];
692
693 // now we send this buffer around to assign global node index
694 #ifdef ESYS_MPI
695 int dest=Esys_MPIInfo_mod(MPIInfo->size, MPIInfo->rank + 1);
696 int source=Esys_MPIInfo_mod(MPIInfo->size, MPIInfo->rank - 1);
697 #endif
698 int buffer_rank=MPIInfo->rank;
699 for (int p=0; p<MPIInfo->size; ++p) {
700 const int nodeID_0=Node_buffer[0];
701 const int nodeID_1=Node_buffer[1];
702 const int dof0=dofDistribution[buffer_rank];
703 const int dof1=dofDistribution[buffer_rank+1];
704 if (nodeID_0 <= nodeID_1) {
705 #pragma omp parallel for
706 for (int n=0; n<numNodes; n++) {
707 const int dof=globalDegreesOfFreedom[n];
708 const int id=Id[n]-nodeID_0;
709 if (dof0<=dof && dof<dof1 && id>=0 && id<=nodeID_1-nodeID_0)
710 globalNodesIndex[n]=Node_buffer[id+header_len];
711 }
712 }
713 if (p<MPIInfo->size-1) { // the last send can be skipped
714 #ifdef ESYS_MPI
715 MPI_Status status;
716 MPI_Sendrecv_replace(&Node_buffer[0], Node_buffer.size(), MPI_INT,
717 dest, MPIInfo->msg_tag_counter, source,
718 MPIInfo->msg_tag_counter, MPIInfo->comm, &status);
719 #endif
720 ESYS_MPI_INC_COUNTER(*MPIInfo, 1)
721 }
722 buffer_rank=Esys_MPIInfo_mod(MPIInfo->size, buffer_rank-1);
723 }
724 return globalNumNodes;
725 }
726
727 int NodeFile::createDenseReducedLabeling(const std::vector<short>& reducedMask,
728 bool useNodes)
729 {
730 std::vector<int> buffer;
731 std::vector<int> distribution;
732 std::vector<int> loc_offsets(MPIInfo->size);
733 std::vector<int> offsets(MPIInfo->size);
734 int new_numGlobalReduced=0;
735
736 // retrieve the number of own DOFs/nodes and fill buffer
737 loc_offsets[MPIInfo->rank]=prepareLabeling(reducedMask, buffer,
738 distribution, useNodes);
739 #ifdef ESYS_MPI
740 MPI_Allreduce(&loc_offsets[0], &offsets[0], MPIInfo->size, MPI_INT,
741 MPI_SUM, MPIInfo->comm);
742 for (int n=0; n<MPIInfo->size; ++n) {
743 loc_offsets[n]=new_numGlobalReduced;
744 new_numGlobalReduced+=offsets[n];
745 }
746 #else
747 new_numGlobalReduced=loc_offsets[0];
748 loc_offsets[0]=0;
749 #endif
750
751 const int myCount=distribution[MPIInfo->rank+1]-distribution[MPIInfo->rank];
752 #pragma omp parallel for
753 for (int n=0; n<myCount; ++n)
754 buffer[n]+=loc_offsets[MPIInfo->rank];
755
756 const int* denseArray =
757 (useNodes ? globalNodesIndex : globalDegreesOfFreedom);
758 int* reducedArray =
759 (useNodes ? globalReducedNodesIndex : globalReducedDOFIndex);
760
761 #pragma omp parallel for
762 for (int n=0; n<numNodes; ++n)
763 reducedArray[n]=loc_offsets[0]-1;
764
765 // now entries are collected from the buffer by sending them around
766 // in a circle
767 #ifdef ESYS_MPI
768 int dest=Esys_MPIInfo_mod(MPIInfo->size, MPIInfo->rank + 1);
769 int source=Esys_MPIInfo_mod(MPIInfo->size, MPIInfo->rank - 1);
770 #endif
771 int buffer_rank=MPIInfo->rank;
772 for (int p=0; p<MPIInfo->size; ++p) {
773 const int id0=distribution[buffer_rank];
774 const int id1=distribution[buffer_rank+1];
775 #pragma omp parallel for
776 for (int n=0; n<numNodes; n++) {
777 if (reducedMask[n] > -1) {
778 const int k=denseArray[n];
779 if (id0<=k && k<id1)
780 reducedArray[n]=buffer[k-id0];
781 }
782 }
783 if (p<MPIInfo->size-1) { // the last send can be skipped
784 #ifdef ESYS_MPI
785 MPI_Status status;
786 MPI_Sendrecv_replace(&buffer[0], buffer.size(), MPI_INT, dest,
787 MPIInfo->msg_tag_counter, source,
788 MPIInfo->msg_tag_counter, MPIInfo->comm, &status);
789 #endif
790 ESYS_MPI_INC_COUNTER(*MPIInfo, 1)
791 }
792 buffer_rank=Esys_MPIInfo_mod(MPIInfo->size, buffer_rank-1);
793 }
794 return new_numGlobalReduced;
795 }
796
797 void NodeFile::createDOFMappingAndCoupling(bool use_reduced_elements)
798 {
799 Paso_Distribution* dof_distribution;
800 const int* globalDOFIndex;
801 if (use_reduced_elements) {
802 dof_distribution=reducedDegreesOfFreedomDistribution;
803 globalDOFIndex=globalReducedDOFIndex;
804 } else {
805 dof_distribution=degreesOfFreedomDistribution;
806 globalDOFIndex=globalDegreesOfFreedom;
807 }
808 const int myFirstDOF=Paso_Distribution_getFirstComponent(dof_distribution);
809 const int myLastDOF=Paso_Distribution_getLastComponent(dof_distribution);
810 const int mpiSize=MPIInfo->size;
811 const int myRank=MPIInfo->rank;
812
813 int min_DOF, max_DOF;
814 std::pair<int,int> DOF_range(util::getFlaggedMinMaxInt(
815 numNodes, globalDOFIndex, -1));
816
817 if (DOF_range.second < DOF_range.first) {
818 min_DOF=myFirstDOF;
819 max_DOF=myLastDOF-1;
820 } else {
821 min_DOF=DOF_range.first;
822 max_DOF=DOF_range.second;
823 }
824
825 int p_min=mpiSize;
826 int p_max=-1;
827 if (max_DOF >= min_DOF) {
828 for (int p=0; p<mpiSize; ++p) {
829 if (dof_distribution->first_component[p]<=min_DOF) p_min=p;
830 if (dof_distribution->first_component[p]<=max_DOF) p_max=p;
831 }
832 }
833
834 if (!((min_DOF<=myFirstDOF) && (myLastDOF-1<=max_DOF))) {
835 setError(SYSTEM_ERROR, "Local elements do not span local degrees of freedom.");
836 return;
837 }
838 const int UNUSED = -1;
839 const int len_loc_dof=max_DOF-min_DOF+1;
840 std::vector<int> shared(numNodes*(p_max-p_min+1));
841 std::vector<int> offsetInShared(mpiSize+1);
842 std::vector<int> locDOFMask(len_loc_dof, UNUSED);
843
844 #pragma omp parallel
845 {
846 #pragma omp for
847 for (int i=0;i<numNodes;++i) {
848 const int k=globalDOFIndex[i];
849 if (k > -1) {
850 #ifdef BOUNDS_CHECK
851 if ((k-min_DOF)>=len_loc_dof) {
852 printf("BOUNDS_CHECK %s %d i=%d k=%d min_DOF=%d\n", __FILE__, __LINE__, i, k, min_DOF);
853 exit(1);
854 }
855 #endif
856 locDOFMask[k-min_DOF]=UNUSED-1;
857 }
858 }
859 #ifdef BOUNDS_CHECK
860 if (myLastDOF-min_DOF > len_loc_dof) {
861 printf("BOUNDS_CHECK %s %d\n", __FILE__, __LINE__);
862 exit(1);
863 }
864 #endif
865 #pragma omp for
866 for (int i=myFirstDOF-min_DOF; i<myLastDOF-min_DOF; ++i) {
867 locDOFMask[i]=i-myFirstDOF+min_DOF;
868 }
869 }
870
871 std::vector<int> wanted_DOFs(numNodes);
872 std::vector<int> rcv_len(mpiSize);
873 std::vector<int> snd_len(mpiSize);
874 std::vector<int> neighbor(mpiSize);
875 int numNeighbors=0;
876 int n=0;
877 int lastn=n;
878 for (int p=p_min; p<=p_max; ++p) {
879 if (p != myRank) {
880 const int firstDOF=std::max(min_DOF, dof_distribution->first_component[p]);
881 const int lastDOF=std::min(max_DOF+1, dof_distribution->first_component[p+1]);
882 #ifdef BOUNDS_CHECK
883 if (firstDOF-min_DOF<0 || lastDOF-min_DOF>len_loc_dof) {
884 printf("BOUNDS_CHECK %s %d p=%d\n", __FILE__, __LINE__, p);
885 exit(1);
886 }
887 #endif
888 for (int i=firstDOF-min_DOF; i<lastDOF-min_DOF; ++i) {
889 if (locDOFMask[i] == UNUSED-1) {
890 locDOFMask[i]=myLastDOF-myFirstDOF+n;
891 wanted_DOFs[n]=i+min_DOF;
892 ++n;
893 }
894 }
895 if (n > lastn) {
896 rcv_len[p]=n-lastn;
897 #ifdef BOUNDS_CHECK
898 if (numNeighbors >= mpiSize+1) {
899 printf("BOUNDS_CHECK %s %d p=%d numNeighbors=%d n=%d\n", __FILE__, __LINE__, p, numNeighbors, n);
900 exit(1);
901 }
902 #endif
903 neighbor[numNeighbors]=p;
904 offsetInShared[numNeighbors]=lastn;
905 numNeighbors++;
906 lastn=n;
907 }
908 } // if p!=myRank
909 } // for p
910
911 #ifdef BOUNDS_CHECK
912 if (numNeighbors >= mpiSize+1) {
913 printf("BOUNDS_CHECK %s %d numNeighbors=%d\n", __FILE__, __LINE__, numNeighbors);
914 exit(1);
915 }
916 #endif
917 offsetInShared[numNeighbors]=lastn;
918
919 // assign new DOF labels to nodes
920 std::vector<int> nodeMask(numNodes, UNUSED);
921 #pragma omp parallel for
922 for (int i=0; i<numNodes; ++i) {
923 const int k=globalDOFIndex[i];
924 if (k > -1)
925 nodeMask[i]=locDOFMask[k-min_DOF];
926 }
927
928 // now we can set the mapping from nodes to local DOFs
929 if (use_reduced_elements) {
930 reducedDegreesOfFreedomMapping.assign(nodeMask, UNUSED);
931 } else {
932 degreesOfFreedomMapping.assign(nodeMask, UNUSED);
933 }
934
935 // define how to get DOF values for controlled but other processors
936 #ifdef BOUNDS_CHECK
937 if (offsetInShared[numNeighbors] >= numNodes*(p_max-p_min+1)) {
938 printf("BOUNDS_CHECK %s %d\n", __FILE__, __LINE__);
939 exit(1);
940 }
941 #endif
942 #pragma omp parallel for
943 for (int i=0; i<offsetInShared[numNeighbors]; ++i)
944 shared[i]=myLastDOF-myFirstDOF+i;
945
946 Paso_SharedComponents *rcv_shcomp=Paso_SharedComponents_alloc(
947 myLastDOF-myFirstDOF, numNeighbors, &neighbor[0], &shared[0],
948 &offsetInShared[0], 1, 0, MPIInfo);
949
950 /////////////////////////////////
951 // now we build the sender //
952 /////////////////////////////////
953 #ifdef ESYS_MPI
954 std::vector<MPI_Request> mpi_requests(mpiSize*2);
955 std::vector<MPI_Status> mpi_stati(mpiSize*2);
956 MPI_Alltoall(&rcv_len[0], 1, MPI_INT, &snd_len[0], 1, MPI_INT, MPIInfo->comm);
957 int count=0;
958 #else
959 snd_len[0]=rcv_len[0];
960 #endif
961
962 for (int p=0; p<rcv_shcomp->numNeighbors; p++) {
963 #ifdef ESYS_MPI
964 MPI_Isend(&(wanted_DOFs[rcv_shcomp->offsetInShared[p]]),
965 rcv_shcomp->offsetInShared[p+1]-rcv_shcomp->offsetInShared[p],
966 MPI_INT, rcv_shcomp->neighbor[p],
967 MPIInfo->msg_tag_counter+myRank, MPIInfo->comm,
968 &mpi_requests[count]);
969 count++;
970 #endif
971 }
972 n=0;
973 numNeighbors=0;
974 for (int p=0; p<mpiSize; p++) {
975 if (snd_len[p] > 0) {
976 #ifdef ESYS_MPI
977 MPI_Irecv(&shared[n], snd_len[p], MPI_INT, p,
978 MPIInfo->msg_tag_counter+p, MPIInfo->comm,
979 &mpi_requests[count]);
980 count++;
981 #endif
982 neighbor[numNeighbors]=p;
983 offsetInShared[numNeighbors]=n;
984 numNeighbors++;
985 n+=snd_len[p];
986 }
987 }
988 ESYS_MPI_INC_COUNTER(*MPIInfo, MPIInfo->size)
989 offsetInShared[numNeighbors]=n;
990 #ifdef ESYS_MPI
991 MPI_Waitall(count, &mpi_requests[0], &mpi_stati[0]);
992 #endif
993 // map global ids to local id's
994 #pragma omp parallel for
995 for (int i=0; i<offsetInShared[numNeighbors]; ++i) {
996 shared[i]=locDOFMask[shared[i]-min_DOF];
997 }
998
999 Paso_SharedComponents* snd_shcomp=Paso_SharedComponents_alloc(
1000 myLastDOF-myFirstDOF, numNeighbors, &neighbor[0], &shared[0],
1001 &offsetInShared[0], 1, 0, MPIInfo);
1002
1003 if (noError()) {
1004 if (use_reduced_elements) {
1005 reducedDegreesOfFreedomConnector=Paso_Connector_alloc(snd_shcomp, rcv_shcomp);
1006 } else {
1007 degreesOfFreedomConnector=Paso_Connector_alloc(snd_shcomp, rcv_shcomp);
1008 }
1009 }
1010
1011 Paso_SharedComponents_free(rcv_shcomp);
1012 Paso_SharedComponents_free(snd_shcomp);
1013 }
1014
1015 void NodeFile::createNodeMappings(const std::vector<int>& indexReducedNodes,
1016 const std::vector<int>& dofDist,
1017 const std::vector<int>& nodeDist)
1018 {
1019 const int mpiSize=MPIInfo->size;
1020 const int myRank=MPIInfo->rank;
1021
1022 const int myFirstDOF=dofDist[myRank];
1023 const int myLastDOF=dofDist[myRank+1];
1024 const int myNumDOF=myLastDOF-myFirstDOF;
1025
1026 const int myFirstNode=nodeDist[myRank];
1027 const int myLastNode=nodeDist[myRank+1];
1028 const int myNumNodes=myLastNode-myFirstNode;
1029
1030 std::vector<short> maskMyReducedDOF(myNumDOF, -1);
1031 std::vector<short> maskMyReducedNodes(myNumNodes, -1);
1032
1033 // mark the nodes used by the reduced mesh
1034 #pragma omp parallel for
1035 for (int i=0; i<indexReducedNodes.size(); ++i) {
1036 int k=globalNodesIndex[indexReducedNodes[i]];
1037 if (k>=myFirstNode && myLastNode>k)
1038 maskMyReducedNodes[k-myFirstNode]=1;
1039 k=globalDegreesOfFreedom[indexReducedNodes[i]];
1040 if (k>=myFirstDOF && myLastDOF>k) {
1041 maskMyReducedDOF[k-myFirstDOF]=1;
1042 }
1043 }
1044 std::vector<int> indexMyReducedDOF = util::packMask(maskMyReducedDOF);
1045 int myNumReducedDOF=indexMyReducedDOF.size();
1046 std::vector<int> indexMyReducedNodes = util::packMask(maskMyReducedNodes);
1047 int myNumReducedNodes=indexMyReducedNodes.size();
1048
1049 std::vector<int> rdofDist(mpiSize+1);
1050 std::vector<int> rnodeDist(mpiSize+1);
1051 #ifdef ESYS_MPI
1052 MPI_Allgather(&myNumReducedNodes, 1, MPI_INT, &rnodeDist[0], 1, MPI_INT, MPIInfo->comm);
1053 MPI_Allgather(&myNumReducedDOF, 1, MPI_INT, &rdofDist[0], 1, MPI_INT, MPIInfo->comm);
1054 #else
1055 rnodeDist[0]=myNumReducedNodes;
1056 rdofDist[0]=myNumReducedDOF;
1057 #endif
1058 int globalNumReducedNodes=0;
1059 int globalNumReducedDOF=0;
1060 for (int i=0; i<mpiSize;++i) {
1061 int k=rnodeDist[i];
1062 rnodeDist[i]=globalNumReducedNodes;
1063 globalNumReducedNodes+=k;
1064
1065 k=rdofDist[i];
1066 rdofDist[i]=globalNumReducedDOF;
1067 globalNumReducedDOF+=k;
1068 }
1069 rnodeDist[mpiSize]=globalNumReducedNodes;
1070 rdofDist[mpiSize]=globalNumReducedDOF;
1071
1072 // ==== distribution of Nodes ===============================
1073 nodesDistribution=Paso_Distribution_alloc(MPIInfo, &nodeDist[0], 1, 0);
1074 // ==== distribution of DOFs ================================
1075 degreesOfFreedomDistribution=Paso_Distribution_alloc(MPIInfo, &dofDist[0], 1,0);
1076 // ==== distribution of reduced Nodes =======================
1077 reducedNodesDistribution=Paso_Distribution_alloc(MPIInfo, &rnodeDist[0], 1, 0);
1078 // ==== distribution of reduced DOF =========================
1079 reducedDegreesOfFreedomDistribution=Paso_Distribution_alloc(MPIInfo, &rdofDist[0], 1, 0);
1080
1081 std::vector<int> nodeMask(numNodes);
1082
1083 if (noError()) {
1084 const int UNUSED = -1;
1085 // ==== nodes mapping which is a dummy structure ========
1086 #pragma omp parallel for
1087 for (int i=0; i<numNodes; ++i)
1088 nodeMask[i]=i;
1089 nodesMapping.assign(nodeMask, UNUSED);
1090
1091 // ==== mapping between nodes and reduced nodes ==========
1092 #pragma omp parallel for
1093 for (int i=0; i<numNodes; ++i)
1094 nodeMask[i]=UNUSED;
1095 #pragma omp parallel for
1096 for (int i=0; i<indexReducedNodes.size(); ++i)
1097 nodeMask[indexReducedNodes[i]]=i;
1098 reducedNodesMapping.assign(nodeMask, UNUSED);
1099 }
1100 // ==== mapping between nodes and DOFs + DOF connector
1101 if (noError())
1102 createDOFMappingAndCoupling(false);
1103 // ==== mapping between nodes and reduced DOFs + reduced DOF connector
1104 if (noError())
1105 createDOFMappingAndCoupling(true);
1106
1107 // get the Ids for DOFs and reduced nodes
1108 if (noError()) {
1109 #pragma omp parallel
1110 {
1111 #pragma omp for
1112 for (int i=0; i<reducedNodesMapping.getNumTargets(); ++i)
1113 reducedNodesId[i]=Id[reducedNodesMapping.map[i]];
1114 #pragma omp for
1115 for (int i=0; i<degreesOfFreedomMapping.getNumTargets(); ++i)
1116 degreesOfFreedomId[i]=Id[degreesOfFreedomMapping.map[i]];
1117 #pragma omp for
1118 for (int i=0; i<reducedDegreesOfFreedomMapping.getNumTargets(); ++i)
1119 reducedDegreesOfFreedomId[i]=Id[reducedDegreesOfFreedomMapping.map[i]];
1120 }
1121 } else {
1122 Paso_Distribution_free(nodesDistribution);
1123 Paso_Distribution_free(reducedNodesDistribution);
1124 Paso_Distribution_free(degreesOfFreedomDistribution);
1125 Paso_Distribution_free(reducedDegreesOfFreedomDistribution);
1126 Paso_Connector_free(degreesOfFreedomConnector);
1127 Paso_Connector_free(reducedDegreesOfFreedomConnector);
1128 nodesDistribution=NULL;
1129 reducedNodesDistribution=NULL;
1130 degreesOfFreedomDistribution=NULL;
1131 reducedDegreesOfFreedomDistribution=NULL;
1132 degreesOfFreedomConnector=NULL;
1133 reducedDegreesOfFreedomConnector=NULL;
1134 }
1135 }
1136
1137 } // namespace finley
1138

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision
svn:mergeinfo /branches/lapack2681/finley/src/NodeFile.cpp:2682-2741 /branches/pasowrap/finley/src/NodeFile.cpp:3661-3674 /branches/py3_attempt2/finley/src/NodeFile.cpp:3871-3891 /branches/restext/finley/src/NodeFile.cpp:2610-2624 /branches/ripleygmg_from_3668/finley/src/NodeFile.cpp:3669-3791 /branches/stage3.0/finley/src/NodeFile.cpp:2569-2590 /branches/symbolic_from_3470/finley/src/NodeFile.cpp:3471-3974 /release/3.0/finley/src/NodeFile.cpp:2591-2601 /trunk/finley/src/NodeFile.cpp:4257-4344

  ViewVC Help
Powered by ViewVC 1.1.26