/[escript]/trunk/finley/src/NodeFile.cpp
ViewVC logotype

Contents of /trunk/finley/src/NodeFile.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 4496 - (show annotations)
Mon Jul 15 06:53:44 2013 UTC (6 years, 2 months ago) by caltinay
File size: 40858 byte(s)
finley (WIP):
-moved all of finley into its namespace
-introduced some shared pointers
-Mesh is now a class
-other bits and pieces...

1
2 /*****************************************************************************
3 *
4 * Copyright (c) 2003-2013 by University of Queensland
5 * http://www.uq.edu.au
6 *
7 * Primary Business: Queensland, Australia
8 * Licensed under the Open Software License version 3.0
9 * http://www.opensource.org/licenses/osl-3.0.php
10 *
11 * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12 * Development since 2012 by School of Earth Sciences
13 *
14 *****************************************************************************/
15
16
17 /****************************************************************************
18
19 Finley: NodeFile
20
21 *****************************************************************************/
22
23 #include "NodeFile.h"
24 #include <escript/Data.h>
25 #include <paso/Coupler.h>
26
27 #include <limits>
28 #include <sstream>
29
30 namespace finley {
31
32 // helper function
33 static void scatterEntries(int n, int* index, int min_index, int max_index,
34 int* Id_out, int* Id_in, int* Tag_out, int* Tag_in,
35 int* globalDegreesOfFreedom_out,
36 int* globalDegreesOfFreedom_in,
37 int numDim, double* Coordinates_out,
38 double* Coordinates_in)
39 {
40 const int range = max_index-min_index;
41 const size_t numDim_size = numDim*sizeof(double);
42
43 #pragma omp parallel for
44 for (int i=0; i<n; i++) {
45 const int k=index[i]-min_index;
46 if ((k>=0) && (k<range)) {
47 Id_out[k]=Id_in[i];
48 Tag_out[k]=Tag_in[i];
49 globalDegreesOfFreedom_out[k]=globalDegreesOfFreedom_in[i];
50 memcpy(&(Coordinates_out[INDEX2(0,k,numDim)]),
51 &(Coordinates_in[INDEX2(0,i,numDim)]), numDim_size);
52 }
53 }
54 }
55
56 // helper function
57 static void gatherEntries(int n, const int* index, int min_index, int max_index,
58 int* Id_out, int* Id_in, int* Tag_out, int* Tag_in,
59 int* globalDegreesOfFreedom_out,
60 int* globalDegreesOfFreedom_in,
61 int numDim, double* Coordinates_out,
62 double* Coordinates_in)
63 {
64 const int range = max_index-min_index;
65 const size_t numDim_size = numDim*sizeof(double);
66
67 #pragma omp parallel for
68 for (int i=0; i<n; i++) {
69 const int k=index[i]-min_index;
70 if ((k>=0) && (k<range)) {
71 Id_out[i]=Id_in[k];
72 Tag_out[i]=Tag_in[k];
73 globalDegreesOfFreedom_out[i]=globalDegreesOfFreedom_in[k];
74 memcpy(&(Coordinates_out[INDEX2(0,i,numDim)]),
75 &(Coordinates_in[INDEX2(0,k,numDim)]), numDim_size);
76 }
77 }
78 }
79
80 /// constructor
81 /// use NodeFile::allocTable to allocate the node table (Id,Coordinates)
82 NodeFile::NodeFile(int nDim, Esys_MPIInfo *mpiInfo) :
83 numNodes(0),
84 numDim(nDim),
85 Id(NULL),
86 Tag(NULL),
87 globalDegreesOfFreedom(NULL),
88 Coordinates(NULL),
89 globalReducedDOFIndex(NULL),
90 globalReducedNodesIndex(NULL),
91 globalNodesIndex(NULL),
92 nodesDistribution(NULL),
93 reducedNodesDistribution(NULL),
94 degreesOfFreedomDistribution(NULL),
95 reducedDegreesOfFreedomDistribution(NULL),
96 degreesOfFreedomConnector(NULL),
97 reducedDegreesOfFreedomConnector(NULL),
98 reducedNodesId(NULL),
99 degreesOfFreedomId(NULL),
100 reducedDegreesOfFreedomId(NULL),
101 status(FINLEY_INITIAL_STATUS)
102 {
103 MPIInfo = Esys_MPIInfo_getReference(mpiInfo);
104 }
105
106 /// destructor
107 NodeFile::~NodeFile()
108 {
109 freeTable();
110 Esys_MPIInfo_free(MPIInfo);
111 }
112
113 /// allocates the node table within this node file to hold NN nodes.
114 void NodeFile::allocTable(int NN)
115 {
116 if (numNodes>0)
117 freeTable();
118
119 Id=new int[NN];
120 Coordinates=new double[NN*numDim];
121 Tag=new int[NN];
122 globalDegreesOfFreedom=new int[NN];
123 globalReducedDOFIndex=new int[NN];
124 globalReducedNodesIndex=new int[NN];
125 globalNodesIndex=new int[NN];
126 reducedNodesId=new int[NN];
127 degreesOfFreedomId=new int[NN];
128 reducedDegreesOfFreedomId=new int[NN];
129 numNodes=NN;
130
131 // this initialization makes sure that data are located on the right
132 // processor
133 #pragma omp parallel for
134 for (int n=0; n<numNodes; n++) {
135 Id[n]=-1;
136 for (int i=0; i<numDim; i++)
137 Coordinates[INDEX2(i,n,numDim)]=0.;
138 Tag[n]=-1;
139 globalDegreesOfFreedom[n]=-1;
140 globalReducedDOFIndex[n]=-1;
141 globalReducedNodesIndex[n]=-1;
142 globalNodesIndex[n]=-1;
143 reducedNodesId[n]=-1;
144 degreesOfFreedomId[n]=-1;
145 reducedDegreesOfFreedomId[n]=-1;
146 }
147 }
148
149 /// frees the node table within this node file
150 void NodeFile::freeTable()
151 {
152 delete[] Id;
153 delete[] Coordinates;
154 delete[] globalDegreesOfFreedom;
155 delete[] globalReducedDOFIndex;
156 delete[] globalReducedNodesIndex;
157 delete[] globalNodesIndex;
158 delete[] Tag;
159 delete[] reducedNodesId;
160 delete[] degreesOfFreedomId;
161 delete[] reducedDegreesOfFreedomId;
162 tagsInUse.clear();
163 nodesMapping.clear();
164 reducedNodesMapping.clear();
165 degreesOfFreedomMapping.clear();
166 reducedDegreesOfFreedomMapping.clear();
167 Paso_Distribution_free(nodesDistribution);
168 nodesDistribution=NULL;
169 Paso_Distribution_free(reducedNodesDistribution);
170 nodesDistribution=NULL;
171 Paso_Distribution_free(degreesOfFreedomDistribution);
172 degreesOfFreedomDistribution=NULL;
173 Paso_Distribution_free(reducedDegreesOfFreedomDistribution);
174 reducedDegreesOfFreedomDistribution=NULL;
175 Paso_Connector_free(degreesOfFreedomConnector);
176 degreesOfFreedomConnector=NULL;
177 Paso_Connector_free(reducedDegreesOfFreedomConnector);
178 reducedDegreesOfFreedomConnector=NULL;
179
180 numNodes=0;
181 }
182
183 void NodeFile::print() const
184 {
185 std::cout << "=== " << numDim << "D-Nodes:\nnumber of nodes=" << numNodes
186 << std::endl;
187 std::cout << "Id,Tag,globalDegreesOfFreedom,degreesOfFreedom,reducedDegreesOfFeedom,node,reducedNode,Coordinates" << std::endl;
188 for (int i=0; i<numNodes; i++) {
189 std::cout << Id[i] << "," << Tag[i] << "," << globalDegreesOfFreedom[i]
190 << "," << degreesOfFreedomMapping.target[i]
191 << "," << reducedDegreesOfFreedomMapping.target[i]
192 << "," << nodesMapping.target[i] << reducedNodesMapping.target[i]
193 << " ";
194 std::cout.precision(15);
195 std::cout.setf(std::ios::scientific, std::ios::floatfield);
196 for (int j=0; j<numDim; j++)
197 std:: cout << Coordinates[INDEX2(j,i,numDim)];
198 std::cout << std::endl;
199 }
200 }
201
202 /// copies the array newX into this->coordinates
203 void NodeFile::setCoordinates(const escript::Data& cNewX)
204 {
205 if (cNewX.getDataPointSize() != numDim) {
206 std::stringstream ss;
207 ss << "NodeFile::setCoordinates: number of dimensions of new "
208 "coordinates has to be " << numDim;
209 const std::string errorMsg(ss.str());
210 setError(VALUE_ERROR, errorMsg.c_str());
211 } else if (cNewX.getNumDataPointsPerSample() != 1 ||
212 cNewX.getNumSamples() != numNodes) {
213 std::stringstream ss;
214 ss << "NodeFile::setCoordinates: number of given nodes must be "
215 << numNodes;
216 const std::string errorMsg(ss.str());
217 setError(VALUE_ERROR, errorMsg.c_str());
218 } else {
219 const size_t numDim_size=numDim*sizeof(double);
220 ++status;
221 escript::Data& newX = *const_cast<escript::Data*>(&cNewX);
222 #pragma omp parallel for
223 for (int n=0; n<numNodes; n++) {
224 memcpy(&(Coordinates[INDEX2(0,n,numDim)]), newX.getSampleDataRO(n), numDim_size);
225 }
226 }
227 }
228
229 /// sets tags to newTag where mask>0
230 void NodeFile::setTags(const int newTag, const escript::Data& cMask)
231 {
232 resetError();
233
234 if (1 != cMask.getDataPointSize()) {
235 setError(TYPE_ERROR, "NodeFile::setTags: number of components of mask must be 1.");
236 return;
237 } else if (cMask.getNumDataPointsPerSample() != 1 ||
238 cMask.getNumSamples() != numNodes) {
239 setError(TYPE_ERROR, "NodeFile::setTags: illegal number of samples of mask Data object");
240 return;
241 }
242
243 escript::Data& mask = *const_cast<escript::Data*>(&cMask);
244 #pragma omp parallel for
245 for (int n=0; n<numNodes; n++) {
246 if (mask.getSampleDataRO(n)[0] > 0)
247 Tag[n]=newTag;
248 }
249 updateTagList();
250 }
251
252 std::pair<int,int> NodeFile::getDOFRange() const
253 {
254 std::pair<int,int> result(util::getMinMaxInt(
255 1, numNodes, globalDegreesOfFreedom));
256 if (result.second < result.first) {
257 result.first = -1;
258 result.second = 0;
259 }
260 return result;
261 }
262
263 std::pair<int,int> NodeFile::getGlobalIdRange() const
264 {
265 std::pair<int,int> result(util::getMinMaxInt(1, numNodes, Id));
266
267 #ifdef ESYS_MPI
268 int global_id_range[2];
269 int id_range[2] = { -result.first, result.second };
270 MPI_Allreduce(id_range, global_id_range, 2, MPI_INT, MPI_MAX, MPIInfo->comm);
271 result.first = -global_id_range[0];
272 result.second = global_id_range[1];
273 #endif
274 if (result.second < result.first) {
275 result.first = -1;
276 result.second = 0;
277 }
278 return result;
279 }
280
281 std::pair<int,int> NodeFile::getGlobalDOFRange() const
282 {
283 std::pair<int,int> result(util::getMinMaxInt(
284 1, numNodes, globalDegreesOfFreedom));
285
286 #ifdef ESYS_MPI
287 int global_id_range[2];
288 int id_range[2] = { -result.first, result.second };
289 MPI_Allreduce(id_range, global_id_range, 2, MPI_INT, MPI_MAX, MPIInfo->comm);
290 result.first = -global_id_range[0];
291 result.second = global_id_range[1];
292 #endif
293 if (result.second < result.first) {
294 result.first = -1;
295 result.second = 0;
296 }
297 return result;
298 }
299
300 std::pair<int,int> NodeFile::getGlobalNodeIDIndexRange() const
301 {
302 std::pair<int,int> result(util::getMinMaxInt(1, numNodes, globalNodesIndex));
303
304 #ifdef ESYS_MPI
305 int global_id_range[2];
306 int id_range[2] = { -result.first, result.second };
307 MPI_Allreduce(id_range, global_id_range, 2, MPI_INT, MPI_MAX, MPIInfo->comm);
308 result.first = -global_id_range[0];
309 result.second = global_id_range[1];
310 #endif
311 if (result.second < result.first) {
312 result.first = -1;
313 result.second = 0;
314 }
315 return result;
316 }
317
318 void NodeFile::copyTable(int offset, int idOffset, int dofOffset,
319 const NodeFile* in)
320 {
321 // check number of dimensions and table size
322 if (numDim != in->numDim) {
323 setError(TYPE_ERROR, "NodeFile::copyTable: dimensions of node files don't match");
324 return;
325 }
326 if (numNodes < in->numNodes+offset) {
327 setError(MEMORY_ERROR, "NodeFile::copyTable: node table is too small.");
328 return;
329 }
330
331 #pragma omp parallel for
332 for (int n=0; n<in->numNodes; n++) {
333 Id[offset+n]=in->Id[n]+idOffset;
334 Tag[offset+n]=in->Tag[n];
335 globalDegreesOfFreedom[offset+n]=in->globalDegreesOfFreedom[n]+dofOffset;
336 for(int i=0; i<numDim; i++)
337 Coordinates[INDEX2(i, offset+n, numDim)] =
338 in->Coordinates[INDEX2(i, n, in->numDim)];
339 }
340 }
341
342 /// scatters the NodeFile in into this NodeFile using index[0:in->numNodes-1].
343 /// index has to be between 0 and numNodes-1.
344 /// colouring is chosen for the worst case
345 void NodeFile::scatter(int* index, const NodeFile* in)
346 {
347 scatterEntries(numNodes, index, 0, in->numNodes, Id, in->Id, Tag, in->Tag,
348 globalDegreesOfFreedom, in->globalDegreesOfFreedom,
349 numDim, Coordinates, in->Coordinates);
350 }
351
352 /// gathers this NodeFile from the NodeFile 'in' using the entries in
353 /// index[0:out->numNodes-1] which are between min_index and max_index
354 /// (exclusive)
355 void NodeFile::gather(int* index, const NodeFile* in)
356 {
357 const std::pair<int,int> id_range(in->getGlobalIdRange());
358 gatherEntries(numNodes, index, id_range.first, id_range.second, Id, in->Id,
359 Tag, in->Tag, globalDegreesOfFreedom, in->globalDegreesOfFreedom,
360 numDim, Coordinates, in->Coordinates);
361 }
362
363 void NodeFile::gather_global(const std::vector<int>& index, const NodeFile* in)
364 {
365 // get the global range of node ids
366 const std::pair<int,int> id_range(in->getGlobalIdRange());
367 const int undefined_node=id_range.first-1;
368 std::vector<int> distribution(in->MPIInfo->size+1);
369
370 // distribute the range of node ids
371 int buffer_len=Esys_MPIInfo_setDistribution(in->MPIInfo,
372 id_range.first, id_range.second, &distribution[0]);
373
374 // allocate buffers
375 int *Id_buffer=new int[buffer_len];
376 int *Tag_buffer=new int[buffer_len];
377 int *globalDegreesOfFreedom_buffer=new int[buffer_len];
378 double *Coordinates_buffer=new double[buffer_len*numDim];
379
380 // fill Id_buffer by the undefined_node marker to check if nodes
381 // are defined
382 #pragma omp parallel for
383 for (int n=0; n<buffer_len; n++)
384 Id_buffer[n]=undefined_node;
385
386 // fill the buffer by sending portions around in a circle
387 #ifdef ESYS_MPI
388 MPI_Status status;
389 int dest=Esys_MPIInfo_mod(in->MPIInfo->size, in->MPIInfo->rank+1);
390 int source=Esys_MPIInfo_mod(in->MPIInfo->size, in->MPIInfo->rank-1);
391 #endif
392 int buffer_rank=in->MPIInfo->rank;
393 for (int p=0; p<in->MPIInfo->size; ++p) {
394 if (p>0) { // the initial send can be skipped
395 #ifdef ESYS_MPI
396 MPI_Sendrecv_replace(Id_buffer, buffer_len, MPI_INT, dest,
397 in->MPIInfo->msg_tag_counter, source,
398 in->MPIInfo->msg_tag_counter, in->MPIInfo->comm, &status);
399 MPI_Sendrecv_replace(Tag_buffer, buffer_len, MPI_INT, dest,
400 in->MPIInfo->msg_tag_counter+1, source,
401 in->MPIInfo->msg_tag_counter+1, in->MPIInfo->comm, &status);
402 MPI_Sendrecv_replace(globalDegreesOfFreedom_buffer, buffer_len,
403 MPI_INT, dest, in->MPIInfo->msg_tag_counter+2, source,
404 in->MPIInfo->msg_tag_counter+2, in->MPIInfo->comm, &status);
405 MPI_Sendrecv_replace(Coordinates_buffer, buffer_len*numDim,
406 MPI_DOUBLE, dest, in->MPIInfo->msg_tag_counter+3, source,
407 in->MPIInfo->msg_tag_counter+3, in->MPIInfo->comm, &status);
408 #endif
409 in->MPIInfo->msg_tag_counter+=4;
410 }
411 buffer_rank=Esys_MPIInfo_mod(in->MPIInfo->size, buffer_rank-1);
412 scatterEntries(in->numNodes, in->Id, distribution[buffer_rank],
413 distribution[buffer_rank+1], Id_buffer, in->Id,
414 Tag_buffer, in->Tag, globalDegreesOfFreedom_buffer,
415 in->globalDegreesOfFreedom, numDim, Coordinates_buffer,
416 in->Coordinates);
417 }
418 // now entries are collected from the buffer again by sending the
419 // entries around in a circle
420 #ifdef ESYS_MPI
421 dest=Esys_MPIInfo_mod(in->MPIInfo->size, in->MPIInfo->rank+1);
422 source=Esys_MPIInfo_mod(in->MPIInfo->size, in->MPIInfo->rank-1);
423 #endif
424 buffer_rank=in->MPIInfo->rank;
425 for (int p=0; p<in->MPIInfo->size; ++p) {
426 gatherEntries(numNodes, &index[0], distribution[buffer_rank],
427 distribution[buffer_rank+1], Id, Id_buffer, Tag, Tag_buffer,
428 globalDegreesOfFreedom, globalDegreesOfFreedom_buffer, numDim,
429 Coordinates, Coordinates_buffer);
430 if (p < in->MPIInfo->size-1) { // the last send can be skipped
431 #ifdef ESYS_MPI
432 MPI_Sendrecv_replace(Id_buffer, buffer_len, MPI_INT, dest,
433 in->MPIInfo->msg_tag_counter, source,
434 in->MPIInfo->msg_tag_counter, in->MPIInfo->comm, &status);
435 MPI_Sendrecv_replace(Tag_buffer, buffer_len, MPI_INT, dest,
436 in->MPIInfo->msg_tag_counter+1, source,
437 in->MPIInfo->msg_tag_counter+1, in->MPIInfo->comm, &status);
438 MPI_Sendrecv_replace(globalDegreesOfFreedom_buffer, buffer_len,
439 MPI_INT, dest, in->MPIInfo->msg_tag_counter+2, source,
440 in->MPIInfo->msg_tag_counter+2, in->MPIInfo->comm, &status);
441 MPI_Sendrecv_replace(Coordinates_buffer, buffer_len*numDim,
442 MPI_DOUBLE, dest, in->MPIInfo->msg_tag_counter+3, source,
443 in->MPIInfo->msg_tag_counter+3, in->MPIInfo->comm, &status);
444 #endif
445 in->MPIInfo->msg_tag_counter+=4;
446 }
447 buffer_rank=Esys_MPIInfo_mod(in->MPIInfo->size, buffer_rank-1);
448 }
449 // check if all nodes are set:
450 #pragma omp parallel for
451 for (int n=0; n<numNodes; ++n) {
452 if (Id[n] == undefined_node) {
453 std::stringstream ss;
454 ss << "NodeFile::gather_global: Node id " << Id[n]
455 << " at position " << n << " is referenced but not defined.";
456 const std::string errorMsg(ss.str());
457 setError(VALUE_ERROR, errorMsg.c_str());
458 }
459 }
460 delete[] Id_buffer;
461 delete[] Tag_buffer;
462 delete[] globalDegreesOfFreedom_buffer;
463 delete[] Coordinates_buffer;
464 // make sure that the error is global
465 Esys_MPIInfo_noError(in->MPIInfo);
466 }
467
468 void NodeFile::assignMPIRankToDOFs(std::vector<int>& mpiRankOfDOF,
469 const std::vector<int>& distribution)
470 {
471 Esys_MPI_rank p_min=MPIInfo->size, p_max=-1;
472 // first we retrieve the min and max DOF on this processor to reduce
473 // costs for searching
474 const std::pair<int,int> dof_range(getDOFRange());
475
476 for (int p=0; p<MPIInfo->size; ++p) {
477 if (distribution[p]<=dof_range.first) p_min=p;
478 if (distribution[p]<=dof_range.second) p_max=p;
479 }
480 #pragma omp parallel for
481 for (int n=0; n<numNodes; ++n) {
482 const int k=globalDegreesOfFreedom[n];
483 for (int p=p_min; p<=p_max; ++p) {
484 if (k < distribution[p+1]) {
485 mpiRankOfDOF[n]=p;
486 break;
487 }
488 }
489 }
490 }
491
492 int NodeFile::prepareLabeling(const std::vector<short>& mask,
493 std::vector<int>& buffer,
494 std::vector<int>& distribution, bool useNodes)
495 {
496 const int UNSET_ID=-1,SET_ID=1;
497
498 // get the global range of DOF/node ids
499 std::pair<int,int> idRange(useNodes ?
500 getGlobalNodeIDIndexRange() : getGlobalDOFRange());
501 const int* indexArray = (useNodes ? globalNodesIndex : globalDegreesOfFreedom);
502 // distribute the range of node ids
503 distribution.assign(MPIInfo->size+1, 0);
504 int buffer_len=Esys_MPIInfo_setDistribution(MPIInfo, idRange.first,
505 idRange.second, &distribution[0]);
506 const int myCount=distribution[MPIInfo->rank+1]-distribution[MPIInfo->rank];
507
508 // fill buffer by the UNSET_ID marker to check if nodes are defined
509 buffer.assign(buffer_len, UNSET_ID);
510
511 // fill the buffer by sending portions around in a circle
512 #ifdef ESYS_MPI
513 MPI_Status status;
514 int dest=Esys_MPIInfo_mod(MPIInfo->size, MPIInfo->rank + 1);
515 int source=Esys_MPIInfo_mod(MPIInfo->size, MPIInfo->rank - 1);
516 #endif
517 int buffer_rank=MPIInfo->rank;
518 for (int p=0; p<MPIInfo->size; ++p) {
519 if (p>0) { // the initial send can be skipped
520 #ifdef ESYS_MPI
521 MPI_Sendrecv_replace(&buffer[0], buffer.size(), MPI_INT, dest,
522 MPIInfo->msg_tag_counter, source, MPIInfo->msg_tag_counter,
523 MPIInfo->comm, &status);
524 #endif
525 MPIInfo->msg_tag_counter++;
526 }
527 buffer_rank=Esys_MPIInfo_mod(MPIInfo->size, buffer_rank-1);
528 const int id0=distribution[buffer_rank];
529 const int id1=distribution[buffer_rank+1];
530 #pragma omp parallel for
531 for (int n=0; n<numNodes; n++) {
532 if (mask.size()<numNodes || mask[n]>-1) {
533 const int k=indexArray[n];
534 if (id0<=k && k<id1) {
535 buffer[k-id0] = SET_ID;
536 }
537 }
538 }
539 }
540 // count the entries in the buffer
541 // TODO: OMP parallel
542 int myNewCount=0;
543 for (int n=0; n<myCount; ++n) {
544 if (buffer[n] == SET_ID) {
545 buffer[n]=myNewCount;
546 myNewCount++;
547 }
548 }
549 return myNewCount;
550 }
551
552 int NodeFile::createDenseDOFLabeling()
553 {
554 std::vector<int> DOF_buffer;
555 std::vector<int> distribution;
556 std::vector<int> loc_offsets(MPIInfo->size);
557 std::vector<int> offsets(MPIInfo->size);
558 int new_numGlobalDOFs=0;
559
560 // retrieve the number of own DOFs and fill buffer
561 loc_offsets[MPIInfo->rank]=prepareLabeling(std::vector<short>(),
562 DOF_buffer, distribution, false);
563 #ifdef ESYS_MPI
564 MPI_Allreduce(&loc_offsets[0], &offsets[0], MPIInfo->size, MPI_INT,
565 MPI_SUM, MPIInfo->comm);
566 for (int n=0; n<MPIInfo->size; ++n) {
567 loc_offsets[n]=new_numGlobalDOFs;
568 new_numGlobalDOFs+=offsets[n];
569 }
570 #else
571 new_numGlobalDOFs=loc_offsets[0];
572 loc_offsets[0]=0;
573 #endif
574
575 const int myDOFs=distribution[MPIInfo->rank+1]-distribution[MPIInfo->rank];
576 #pragma omp parallel for
577 for (int n=0; n<myDOFs; ++n)
578 DOF_buffer[n]+=loc_offsets[MPIInfo->rank];
579
580 std::vector<bool_t> set_new_DOF(numNodes, TRUE);
581
582 // now entries are collected from the buffer again by sending them around
583 // in a circle
584 #ifdef ESYS_MPI
585 int dest=Esys_MPIInfo_mod(MPIInfo->size, MPIInfo->rank + 1);
586 int source=Esys_MPIInfo_mod(MPIInfo->size, MPIInfo->rank - 1);
587 #endif
588 int buffer_rank=MPIInfo->rank;
589 for (int p=0; p<MPIInfo->size; ++p) {
590 const int dof0=distribution[buffer_rank];
591 const int dof1=distribution[buffer_rank+1];
592 #pragma omp parallel for
593 for (int n=0; n<numNodes; n++) {
594 const int k=globalDegreesOfFreedom[n];
595 if (set_new_DOF[n] && dof0<=k && k<dof1) {
596 globalDegreesOfFreedom[n]=DOF_buffer[k-dof0];
597 set_new_DOF[n]=FALSE;
598 }
599 }
600 if (p<MPIInfo->size-1) { // the last send can be skipped
601 #ifdef ESYS_MPI
602 MPI_Status status;
603 MPI_Sendrecv_replace(&DOF_buffer[0], DOF_buffer.size(), MPI_INT,
604 dest, MPIInfo->msg_tag_counter, source,
605 MPIInfo->msg_tag_counter, MPIInfo->comm, &status);
606 #endif
607 MPIInfo->msg_tag_counter+=1;
608 }
609 buffer_rank=Esys_MPIInfo_mod(MPIInfo->size, buffer_rank-1);
610 }
611
612 return new_numGlobalDOFs;
613 }
614
615 int NodeFile::createDenseNodeLabeling(std::vector<int>& nodeDistribution,
616 const std::vector<int>& dofDistribution)
617 {
618 const int UNSET_ID=-1, SET_ID=1;
619 const int myFirstDOF=dofDistribution[MPIInfo->rank];
620 const int myLastDOF=dofDistribution[MPIInfo->rank+1];
621
622 // find the range of node ids controlled by me
623 int min_id=std::numeric_limits<int>::max();
624 int max_id=std::numeric_limits<int>::min();
625 #pragma omp parallel
626 {
627 int loc_max_id=max_id;
628 int loc_min_id=min_id;
629 #pragma omp for
630 for (int n=0; n<numNodes; n++) {
631 const int dof=globalDegreesOfFreedom[n];
632 if (myFirstDOF<=dof && dof<myLastDOF) {
633 loc_max_id=std::max(loc_max_id, Id[n]);
634 loc_min_id=std::min(loc_min_id, Id[n]);
635 }
636 }
637 #pragma omp critical
638 {
639 max_id=std::max(loc_max_id, max_id);
640 min_id=std::min(loc_min_id, min_id);
641 }
642 }
643 int my_buffer_len = (max_id>=min_id ? max_id-min_id+1 : 0);
644 int buffer_len;
645
646 #ifdef ESYS_MPI
647 MPI_Allreduce(&my_buffer_len, &buffer_len, 1, MPI_INT, MPI_MAX,
648 MPIInfo->comm);
649 #else
650 buffer_len=my_buffer_len;
651 #endif
652
653 const int header_len=2;
654 std::vector<int> Node_buffer(buffer_len+header_len, UNSET_ID);
655 // extra storage for these IDs
656 Node_buffer[0]=min_id;
657 Node_buffer[1]=max_id;
658
659 // mark and count the nodes in use
660 #pragma omp parallel for
661 for (int n=0; n<numNodes; n++) {
662 globalNodesIndex[n]=-1;
663 const int dof=globalDegreesOfFreedom[n];
664 if (myFirstDOF<=dof && dof<myLastDOF)
665 Node_buffer[Id[n]-min_id+header_len]=SET_ID;
666 }
667 int myNewNumNodes=0;
668 for (int n=0; n<my_buffer_len; n++) {
669 if (Node_buffer[header_len+n]==SET_ID) {
670 Node_buffer[header_len+n]=myNewNumNodes;
671 myNewNumNodes++;
672 }
673 }
674 // make the local number of nodes globally available
675 #ifdef ESYS_MPI
676 MPI_Allgather(&myNewNumNodes, 1, MPI_INT, &nodeDistribution[0], 1, MPI_INT,
677 MPIInfo->comm);
678 #else
679 nodeDistribution[0]=myNewNumNodes;
680 #endif
681
682 int globalNumNodes=0;
683 for (int p=0; p<MPIInfo->size; ++p) {
684 const int itmp=nodeDistribution[p];
685 nodeDistribution[p]=globalNumNodes;
686 globalNumNodes+=itmp;
687 }
688 nodeDistribution[MPIInfo->size]=globalNumNodes;
689
690 // offset node buffer
691 #pragma omp parallel for
692 for (int n=0; n<my_buffer_len; n++)
693 Node_buffer[n+header_len]+=nodeDistribution[MPIInfo->rank];
694
695 // now we send this buffer around to assign global node index
696 #ifdef ESYS_MPI
697 int dest=Esys_MPIInfo_mod(MPIInfo->size, MPIInfo->rank + 1);
698 int source=Esys_MPIInfo_mod(MPIInfo->size, MPIInfo->rank - 1);
699 #endif
700 int buffer_rank=MPIInfo->rank;
701 for (int p=0; p<MPIInfo->size; ++p) {
702 const int nodeID_0=Node_buffer[0];
703 const int nodeID_1=Node_buffer[1];
704 const int dof0=dofDistribution[buffer_rank];
705 const int dof1=dofDistribution[buffer_rank+1];
706 if (nodeID_0 <= nodeID_1) {
707 #pragma omp parallel for
708 for (int n=0; n<numNodes; n++) {
709 const int dof=globalDegreesOfFreedom[n];
710 const int id=Id[n]-nodeID_0;
711 if (dof0<=dof && dof<dof1 && id>=0 && id<=nodeID_1-nodeID_0)
712 globalNodesIndex[n]=Node_buffer[id+header_len];
713 }
714 }
715 if (p<MPIInfo->size-1) { // the last send can be skipped
716 #ifdef ESYS_MPI
717 MPI_Status status;
718 MPI_Sendrecv_replace(&Node_buffer[0], Node_buffer.size(), MPI_INT,
719 dest, MPIInfo->msg_tag_counter, source,
720 MPIInfo->msg_tag_counter, MPIInfo->comm, &status);
721 #endif
722 MPIInfo->msg_tag_counter+=1;
723 }
724 buffer_rank=Esys_MPIInfo_mod(MPIInfo->size, buffer_rank-1);
725 }
726 return globalNumNodes;
727 }
728
729 int NodeFile::createDenseReducedLabeling(const std::vector<short>& reducedMask,
730 bool useNodes)
731 {
732 std::vector<int> buffer;
733 std::vector<int> distribution;
734 std::vector<int> loc_offsets(MPIInfo->size);
735 std::vector<int> offsets(MPIInfo->size);
736 int new_numGlobalReduced=0;
737
738 // retrieve the number of own DOFs/nodes and fill buffer
739 loc_offsets[MPIInfo->rank]=prepareLabeling(reducedMask, buffer,
740 distribution, useNodes);
741 #ifdef ESYS_MPI
742 MPI_Allreduce(&loc_offsets[0], &offsets[0], MPIInfo->size, MPI_INT,
743 MPI_SUM, MPIInfo->comm);
744 for (int n=0; n<MPIInfo->size; ++n) {
745 loc_offsets[n]=new_numGlobalReduced;
746 new_numGlobalReduced+=offsets[n];
747 }
748 #else
749 new_numGlobalReduced=loc_offsets[0];
750 loc_offsets[0]=0;
751 #endif
752
753 const int myCount=distribution[MPIInfo->rank+1]-distribution[MPIInfo->rank];
754 #pragma omp parallel for
755 for (int n=0; n<myCount; ++n)
756 buffer[n]+=loc_offsets[MPIInfo->rank];
757
758 const int* denseArray =
759 (useNodes ? globalNodesIndex : globalDegreesOfFreedom);
760 int* reducedArray =
761 (useNodes ? globalReducedNodesIndex : globalReducedDOFIndex);
762
763 #pragma omp parallel for
764 for (int n=0; n<numNodes; ++n)
765 reducedArray[n]=loc_offsets[0]-1;
766
767 // now entries are collected from the buffer by sending them around
768 // in a circle
769 #ifdef ESYS_MPI
770 int dest=Esys_MPIInfo_mod(MPIInfo->size, MPIInfo->rank + 1);
771 int source=Esys_MPIInfo_mod(MPIInfo->size, MPIInfo->rank - 1);
772 #endif
773 int buffer_rank=MPIInfo->rank;
774 for (int p=0; p<MPIInfo->size; ++p) {
775 const int id0=distribution[buffer_rank];
776 const int id1=distribution[buffer_rank+1];
777 #pragma omp parallel for
778 for (int n=0; n<numNodes; n++) {
779 if (reducedMask[n] > -1) {
780 const int k=denseArray[n];
781 if (id0<=k && k<id1)
782 reducedArray[n]=buffer[k-id0];
783 }
784 }
785 if (p<MPIInfo->size-1) { // the last send can be skipped
786 #ifdef ESYS_MPI
787 MPI_Status status;
788 MPI_Sendrecv_replace(&buffer[0], buffer.size(), MPI_INT, dest,
789 MPIInfo->msg_tag_counter, source,
790 MPIInfo->msg_tag_counter, MPIInfo->comm, &status);
791 #endif
792 MPIInfo->msg_tag_counter+=1;
793 }
794 buffer_rank=Esys_MPIInfo_mod(MPIInfo->size, buffer_rank-1);
795 }
796 return new_numGlobalReduced;
797 }
798
799 void NodeFile::createDOFMappingAndCoupling(bool use_reduced_elements)
800 {
801 Paso_Distribution* dof_distribution;
802 const int* globalDOFIndex;
803 if (use_reduced_elements) {
804 dof_distribution=reducedDegreesOfFreedomDistribution;
805 globalDOFIndex=globalReducedDOFIndex;
806 } else {
807 dof_distribution=degreesOfFreedomDistribution;
808 globalDOFIndex=globalDegreesOfFreedom;
809 }
810 const int myFirstDOF=Paso_Distribution_getFirstComponent(dof_distribution);
811 const int myLastDOF=Paso_Distribution_getLastComponent(dof_distribution);
812 const int mpiSize=MPIInfo->size;
813 const int myRank=MPIInfo->rank;
814
815 int min_DOF, max_DOF;
816 std::pair<int,int> DOF_range(util::getFlaggedMinMaxInt(
817 numNodes, globalDOFIndex, -1));
818
819 if (DOF_range.second < DOF_range.first) {
820 min_DOF=myFirstDOF;
821 max_DOF=myLastDOF-1;
822 } else {
823 min_DOF=DOF_range.first;
824 max_DOF=DOF_range.second;
825 }
826
827 int p_min=mpiSize;
828 int p_max=-1;
829 if (max_DOF >= min_DOF) {
830 for (int p=0; p<mpiSize; ++p) {
831 if (dof_distribution->first_component[p]<=min_DOF) p_min=p;
832 if (dof_distribution->first_component[p]<=max_DOF) p_max=p;
833 }
834 }
835
836 if (!((min_DOF<=myFirstDOF) && (myLastDOF-1<=max_DOF))) {
837 setError(SYSTEM_ERROR, "Local elements do not span local degrees of freedom.");
838 return;
839 }
840 const int UNUSED = -1;
841 const int len_loc_dof=max_DOF-min_DOF+1;
842 std::vector<int> shared(numNodes*(p_max-p_min+1));
843 std::vector<int> offsetInShared(mpiSize+1);
844 std::vector<int> locDOFMask(len_loc_dof, UNUSED);
845
846 #pragma omp parallel
847 {
848 #pragma omp for
849 for (int i=0;i<numNodes;++i) {
850 const int k=globalDOFIndex[i];
851 if (k > -1) {
852 #ifdef BOUNDS_CHECK
853 if ((k-min_DOF)>=len_loc_dof) {
854 printf("BOUNDS_CHECK %s %d i=%d k=%d min_DOF=%d\n", __FILE__, __LINE__, i, k, min_DOF);
855 exit(1);
856 }
857 #endif
858 locDOFMask[k-min_DOF]=UNUSED-1;
859 }
860 }
861 #ifdef BOUNDS_CHECK
862 if (myLastDOF-min_DOF > len_loc_dof) {
863 printf("BOUNDS_CHECK %s %d\n", __FILE__, __LINE__);
864 exit(1);
865 }
866 #endif
867 #pragma omp for
868 for (int i=myFirstDOF-min_DOF; i<myLastDOF-min_DOF; ++i) {
869 locDOFMask[i]=i-myFirstDOF+min_DOF;
870 }
871 }
872
873 std::vector<int> wanted_DOFs(numNodes);
874 std::vector<int> rcv_len(mpiSize);
875 std::vector<int> snd_len(mpiSize);
876 std::vector<int> neighbor(mpiSize);
877 int numNeighbors=0;
878 int n=0;
879 int lastn=n;
880 for (int p=p_min; p<=p_max; ++p) {
881 if (p != myRank) {
882 const int firstDOF=std::max(min_DOF, dof_distribution->first_component[p]);
883 const int lastDOF=std::min(max_DOF+1, dof_distribution->first_component[p+1]);
884 #ifdef BOUNDS_CHECK
885 if (firstDOF-min_DOF<0 || lastDOF-min_DOF>len_loc_dof) {
886 printf("BOUNDS_CHECK %s %d p=%d\n", __FILE__, __LINE__, p);
887 exit(1);
888 }
889 #endif
890 for (int i=firstDOF-min_DOF; i<lastDOF-min_DOF; ++i) {
891 if (locDOFMask[i] == UNUSED-1) {
892 locDOFMask[i]=myLastDOF-myFirstDOF+n;
893 wanted_DOFs[n]=i+min_DOF;
894 ++n;
895 }
896 }
897 if (n > lastn) {
898 rcv_len[p]=n-lastn;
899 #ifdef BOUNDS_CHECK
900 if (numNeighbors >= mpiSize+1) {
901 printf("BOUNDS_CHECK %s %d p=%d numNeighbors=%d n=%d\n", __FILE__, __LINE__, p, numNeighbors, n);
902 exit(1);
903 }
904 #endif
905 neighbor[numNeighbors]=p;
906 offsetInShared[numNeighbors]=lastn;
907 numNeighbors++;
908 lastn=n;
909 }
910 } // if p!=myRank
911 } // for p
912
913 #ifdef BOUNDS_CHECK
914 if (numNeighbors >= mpiSize+1) {
915 printf("BOUNDS_CHECK %s %d numNeighbors=%d\n", __FILE__, __LINE__, numNeighbors);
916 exit(1);
917 }
918 #endif
919 offsetInShared[numNeighbors]=lastn;
920
921 // assign new DOF labels to nodes
922 std::vector<int> nodeMask(numNodes, UNUSED);
923 #pragma omp parallel for
924 for (int i=0; i<numNodes; ++i) {
925 const int k=globalDOFIndex[i];
926 if (k > -1)
927 nodeMask[i]=locDOFMask[k-min_DOF];
928 }
929
930 // now we can set the mapping from nodes to local DOFs
931 if (use_reduced_elements) {
932 reducedDegreesOfFreedomMapping.assign(nodeMask, UNUSED);
933 } else {
934 degreesOfFreedomMapping.assign(nodeMask, UNUSED);
935 }
936
937 // define how to get DOF values for controlled but other processors
938 #ifdef BOUNDS_CHECK
939 if (offsetInShared[numNeighbors] >= numNodes*(p_max-p_min+1)) {
940 printf("BOUNDS_CHECK %s %d\n", __FILE__, __LINE__);
941 exit(1);
942 }
943 #endif
944 #pragma omp parallel for
945 for (int i=0; i<offsetInShared[numNeighbors]; ++i)
946 shared[i]=myLastDOF-myFirstDOF+i;
947
948 Paso_SharedComponents *rcv_shcomp=Paso_SharedComponents_alloc(
949 myLastDOF-myFirstDOF, numNeighbors, &neighbor[0], &shared[0],
950 &offsetInShared[0], 1, 0, MPIInfo);
951
952 /////////////////////////////////
953 // now we build the sender //
954 /////////////////////////////////
955 #ifdef ESYS_MPI
956 std::vector<MPI_Request> mpi_requests(mpiSize*2);
957 std::vector<MPI_Status> mpi_stati(mpiSize*2);
958 MPI_Alltoall(&rcv_len[0], 1, MPI_INT, &snd_len[0], 1, MPI_INT, MPIInfo->comm);
959 int count=0;
960 #else
961 snd_len[0]=rcv_len[0];
962 #endif
963
964 for (int p=0; p<rcv_shcomp->numNeighbors; p++) {
965 #ifdef ESYS_MPI
966 MPI_Isend(&(wanted_DOFs[rcv_shcomp->offsetInShared[p]]),
967 rcv_shcomp->offsetInShared[p+1]-rcv_shcomp->offsetInShared[p],
968 MPI_INT, rcv_shcomp->neighbor[p],
969 MPIInfo->msg_tag_counter+myRank, MPIInfo->comm,
970 &mpi_requests[count]);
971 count++;
972 #endif
973 }
974 n=0;
975 numNeighbors=0;
976 for (int p=0; p<mpiSize; p++) {
977 if (snd_len[p] > 0) {
978 #ifdef ESYS_MPI
979 MPI_Irecv(&shared[n], snd_len[p], MPI_INT, p,
980 MPIInfo->msg_tag_counter+p, MPIInfo->comm,
981 &mpi_requests[count]);
982 count++;
983 #endif
984 neighbor[numNeighbors]=p;
985 offsetInShared[numNeighbors]=n;
986 numNeighbors++;
987 n+=snd_len[p];
988 }
989 }
990 MPIInfo->msg_tag_counter+=MPIInfo->size;
991 offsetInShared[numNeighbors]=n;
992 #ifdef ESYS_MPI
993 MPI_Waitall(count, &mpi_requests[0], &mpi_stati[0]);
994 #endif
995 // map global ids to local id's
996 #pragma omp parallel for
997 for (int i=0; i<offsetInShared[numNeighbors]; ++i) {
998 shared[i]=locDOFMask[shared[i]-min_DOF];
999 }
1000
1001 Paso_SharedComponents* snd_shcomp=Paso_SharedComponents_alloc(
1002 myLastDOF-myFirstDOF, numNeighbors, &neighbor[0], &shared[0],
1003 &offsetInShared[0], 1, 0, MPIInfo);
1004
1005 if (noError()) {
1006 if (use_reduced_elements) {
1007 reducedDegreesOfFreedomConnector=Paso_Connector_alloc(snd_shcomp, rcv_shcomp);
1008 } else {
1009 degreesOfFreedomConnector=Paso_Connector_alloc(snd_shcomp, rcv_shcomp);
1010 }
1011 }
1012
1013 Paso_SharedComponents_free(rcv_shcomp);
1014 Paso_SharedComponents_free(snd_shcomp);
1015 }
1016
1017 void NodeFile::createNodeMappings(const std::vector<int>& indexReducedNodes,
1018 const std::vector<int>& dofDist,
1019 const std::vector<int>& nodeDist)
1020 {
1021 const int mpiSize=MPIInfo->size;
1022 const int myRank=MPIInfo->rank;
1023
1024 const int myFirstDOF=dofDist[myRank];
1025 const int myLastDOF=dofDist[myRank+1];
1026 const int myNumDOF=myLastDOF-myFirstDOF;
1027
1028 const int myFirstNode=nodeDist[myRank];
1029 const int myLastNode=nodeDist[myRank+1];
1030 const int myNumNodes=myLastNode-myFirstNode;
1031
1032 std::vector<short> maskMyReducedDOF(myNumDOF, -1);
1033 std::vector<short> maskMyReducedNodes(myNumNodes, -1);
1034
1035 // mark the nodes used by the reduced mesh
1036 #pragma omp parallel for
1037 for (int i=0; i<indexReducedNodes.size(); ++i) {
1038 int k=globalNodesIndex[indexReducedNodes[i]];
1039 if (k>=myFirstNode && myLastNode>k)
1040 maskMyReducedNodes[k-myFirstNode]=1;
1041 k=globalDegreesOfFreedom[indexReducedNodes[i]];
1042 if (k>=myFirstDOF && myLastDOF>k) {
1043 maskMyReducedDOF[k-myFirstDOF]=1;
1044 }
1045 }
1046 std::vector<int> indexMyReducedDOF = util::packMask(maskMyReducedDOF);
1047 int myNumReducedDOF=indexMyReducedDOF.size();
1048 std::vector<int> indexMyReducedNodes = util::packMask(maskMyReducedNodes);
1049 int myNumReducedNodes=indexMyReducedNodes.size();
1050
1051 std::vector<int> rdofDist(mpiSize+1);
1052 std::vector<int> rnodeDist(mpiSize+1);
1053 #ifdef ESYS_MPI
1054 MPI_Allgather(&myNumReducedNodes, 1, MPI_INT, &rnodeDist[0], 1, MPI_INT, MPIInfo->comm);
1055 MPI_Allgather(&myNumReducedDOF, 1, MPI_INT, &rdofDist[0], 1, MPI_INT, MPIInfo->comm);
1056 #else
1057 rnodeDist[0]=myNumReducedNodes;
1058 rdofDist[0]=myNumReducedDOF;
1059 #endif
1060 int globalNumReducedNodes=0;
1061 int globalNumReducedDOF=0;
1062 for (int i=0; i<mpiSize;++i) {
1063 int k=rnodeDist[i];
1064 rnodeDist[i]=globalNumReducedNodes;
1065 globalNumReducedNodes+=k;
1066
1067 k=rdofDist[i];
1068 rdofDist[i]=globalNumReducedDOF;
1069 globalNumReducedDOF+=k;
1070 }
1071 rnodeDist[mpiSize]=globalNumReducedNodes;
1072 rdofDist[mpiSize]=globalNumReducedDOF;
1073
1074 // ==== distribution of Nodes ===============================
1075 nodesDistribution=Paso_Distribution_alloc(MPIInfo, &nodeDist[0], 1, 0);
1076 // ==== distribution of DOFs ================================
1077 degreesOfFreedomDistribution=Paso_Distribution_alloc(MPIInfo, &dofDist[0], 1,0);
1078 // ==== distribution of reduced Nodes =======================
1079 reducedNodesDistribution=Paso_Distribution_alloc(MPIInfo, &rnodeDist[0], 1, 0);
1080 // ==== distribution of reduced DOF =========================
1081 reducedDegreesOfFreedomDistribution=Paso_Distribution_alloc(MPIInfo, &rdofDist[0], 1, 0);
1082
1083 std::vector<int> nodeMask(numNodes);
1084
1085 if (noError()) {
1086 const int UNUSED = -1;
1087 // ==== nodes mapping which is a dummy structure ========
1088 #pragma omp parallel for
1089 for (int i=0; i<numNodes; ++i)
1090 nodeMask[i]=i;
1091 nodesMapping.assign(nodeMask, UNUSED);
1092
1093 // ==== mapping between nodes and reduced nodes ==========
1094 #pragma omp parallel for
1095 for (int i=0; i<numNodes; ++i)
1096 nodeMask[i]=UNUSED;
1097 #pragma omp parallel for
1098 for (int i=0; i<indexReducedNodes.size(); ++i)
1099 nodeMask[indexReducedNodes[i]]=i;
1100 reducedNodesMapping.assign(nodeMask, UNUSED);
1101 }
1102 // ==== mapping between nodes and DOFs + DOF connector
1103 if (noError())
1104 createDOFMappingAndCoupling(false);
1105 // ==== mapping between nodes and reduced DOFs + reduced DOF connector
1106 if (noError())
1107 createDOFMappingAndCoupling(true);
1108
1109 // get the Ids for DOFs and reduced nodes
1110 if (noError()) {
1111 #pragma omp parallel
1112 {
1113 #pragma omp for
1114 for (int i=0; i<reducedNodesMapping.getNumTargets(); ++i)
1115 reducedNodesId[i]=Id[reducedNodesMapping.map[i]];
1116 #pragma omp for
1117 for (int i=0; i<degreesOfFreedomMapping.getNumTargets(); ++i)
1118 degreesOfFreedomId[i]=Id[degreesOfFreedomMapping.map[i]];
1119 #pragma omp for
1120 for (int i=0; i<reducedDegreesOfFreedomMapping.getNumTargets(); ++i)
1121 reducedDegreesOfFreedomId[i]=Id[reducedDegreesOfFreedomMapping.map[i]];
1122 }
1123 } else {
1124 Paso_Distribution_free(nodesDistribution);
1125 Paso_Distribution_free(reducedNodesDistribution);
1126 Paso_Distribution_free(degreesOfFreedomDistribution);
1127 Paso_Distribution_free(reducedDegreesOfFreedomDistribution);
1128 Paso_Connector_free(degreesOfFreedomConnector);
1129 Paso_Connector_free(reducedDegreesOfFreedomConnector);
1130 nodesDistribution=NULL;
1131 reducedNodesDistribution=NULL;
1132 degreesOfFreedomDistribution=NULL;
1133 reducedDegreesOfFreedomDistribution=NULL;
1134 degreesOfFreedomConnector=NULL;
1135 reducedDegreesOfFreedomConnector=NULL;
1136 }
1137 }
1138
1139 } // namespace finley
1140

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision
svn:mergeinfo /branches/lapack2681/finley/src/NodeFile.cpp:2682-2741 /branches/pasowrap/finley/src/NodeFile.cpp:3661-3674 /branches/py3_attempt2/finley/src/NodeFile.cpp:3871-3891 /branches/restext/finley/src/NodeFile.cpp:2610-2624 /branches/ripleygmg_from_3668/finley/src/NodeFile.cpp:3669-3791 /branches/stage3.0/finley/src/NodeFile.cpp:2569-2590 /branches/symbolic_from_3470/finley/src/NodeFile.cpp:3471-3974 /release/3.0/finley/src/NodeFile.cpp:2591-2601 /trunk/finley/src/NodeFile.cpp:4257-4344

  ViewVC Help
Powered by ViewVC 1.1.26