/[escript]/trunk/finley/src/Mesh_read.cpp
ViewVC logotype

Contents of /trunk/finley/src/Mesh_read.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 4496 - (show annotations)
Mon Jul 15 06:53:44 2013 UTC (6 years ago) by caltinay
File size: 30210 byte(s)
finley (WIP):
-moved all of finley into its namespace
-introduced some shared pointers
-Mesh is now a class
-other bits and pieces...

1
2 /*****************************************************************************
3 *
4 * Copyright (c) 2003-2013 by University of Queensland
5 * http://www.uq.edu.au
6 *
7 * Primary Business: Queensland, Australia
8 * Licensed under the Open Software License version 3.0
9 * http://www.opensource.org/licenses/osl-3.0.php
10 *
11 * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12 * Development since 2012 by School of Earth Sciences
13 *
14 *****************************************************************************/
15
16
17 /****************************************************************************
18
19 Finley: read mesh from file
20
21 *****************************************************************************/
22
23 #include <ctype.h>
24 #include "Mesh.h"
25
26 namespace finley {
27
28 #define FSCANF_CHECK(scan_ret, reason) {\
29 if (scan_ret == EOF) {\
30 perror(reason);\
31 setError(IO_ERROR,"scan error while reading finley file");\
32 return NULL;\
33 }\
34 }
35
36
37 Mesh* Mesh::read(const std::string fname, int order, int reduced_order,
38 bool optimize)
39 {
40 int numNodes, numDim, numEle, i0, i1;
41 const_ReferenceElementSet_ptr refPoints, refContactElements, refFaceElements, refElements;
42 char name[LenString_MAX],element_type[LenString_MAX],frm[20];
43 char error_msg[LenErrorMsg_MAX];
44 FILE *fileHandle_p = NULL;
45 ElementTypeId typeID=NoRef;
46 int scan_ret;
47
48 resetError();
49 Esys_MPIInfo *mpi_info = Esys_MPIInfo_alloc(MPI_COMM_WORLD);
50
51 if (mpi_info->rank == 0) {
52 // get file handle
53 fileHandle_p = fopen(fname.c_str(), "r");
54 if (fileHandle_p==NULL) {
55 sprintf(error_msg,"Mesh_read: Opening file %s for reading failed.",fname.c_str());
56 setError(IO_ERROR,error_msg);
57 Esys_MPIInfo_free( mpi_info );
58 return NULL;
59 }
60
61 // read header
62 sprintf(frm,"%%%d[^\n]",LenString_MAX-1);
63 scan_ret = fscanf(fileHandle_p, frm, name);
64 FSCANF_CHECK(scan_ret, "Mesh::read")
65
66 /* get the number of nodes */
67 scan_ret = fscanf(fileHandle_p, "%1d%*s %d\n", &numDim,&numNodes);
68 FSCANF_CHECK(scan_ret, "Mesh::read")
69 }
70
71 #ifdef ESYS_MPI
72 // Broadcast numDim, numNodes, name if there are multiple MPI procs
73 if (mpi_info->size > 1) {
74 int temp1[3];
75 if (mpi_info->rank == 0) {
76 temp1[0] = numDim;
77 temp1[1] = numNodes;
78 temp1[2] = strlen(name) + 1;
79 } else {
80 temp1[0] = 0;
81 temp1[1] = 0;
82 temp1[2] = 1;
83 }
84 MPI_Bcast(temp1, 3, MPI_INT, 0, mpi_info->comm);
85 numDim = temp1[0];
86 numNodes = temp1[1];
87 MPI_Bcast(name, temp1[2], MPI_CHAR, 0, mpi_info->comm);
88 }
89 #endif
90
91 /* allocate mesh */
92 Mesh* mesh_p = new Mesh(name, numDim, mpi_info);
93
94 if (noError()) {
95 /* Each CPU will get at most chunkSize nodes so the message has to be sufficiently large */
96 int chunkSize = numNodes / mpi_info->size + 1, totalNodes=0, chunkNodes=0, nextCPU=1;
97 int *tempInts = new index_t[chunkSize*3+1]; /* Stores the integer message data */
98 double *tempCoords = new double[chunkSize*numDim]; /* Stores the double message data */
99
100 /*
101 Read chunkSize nodes, send it in a chunk to worker CPU which copies chunk into its local mesh_p
102 It doesn't matter that a CPU has the wrong nodes for its elements, this is sorted out later
103 First chunk sent to CPU 1, second to CPU 2, ...
104 Last chunk stays on CPU 0 (the master)
105 The three columns of integers (Id, gDOF, Tag) are gathered into a single array tempInts and sent together in a single MPI message
106 */
107
108 if (mpi_info->rank == 0) { /* Master */
109 for (;;) { /* Infinite loop */
110 #pragma omp parallel for private (i0) schedule(static)
111 for (i0=0; i0<chunkSize*3+1; i0++) tempInts[i0] = -1;
112
113 #pragma omp parallel for private (i0) schedule(static)
114 for (i0=0; i0<chunkSize*numDim; i0++) tempCoords[i0] = -1.0;
115
116 chunkNodes = 0;
117 for (i1=0; i1<chunkSize; i1++) {
118 if (totalNodes >= numNodes) break; /* End of inner loop */
119 if (1 == numDim) {
120 scan_ret = fscanf(fileHandle_p, "%d %d %d %le\n",
121 &tempInts[0+i1], &tempInts[chunkSize+i1], &tempInts[chunkSize*2+i1],
122 &tempCoords[i1*numDim+0]);
123 FSCANF_CHECK(scan_ret, "Mesh_read")
124 }
125 if (2 == numDim) {
126 scan_ret = fscanf(fileHandle_p, "%d %d %d %le %le\n",
127 &tempInts[0+i1], &tempInts[chunkSize+i1], &tempInts[chunkSize*2+i1],
128 &tempCoords[i1*numDim+0], &tempCoords[i1*numDim+1]);
129 FSCANF_CHECK(scan_ret, "Mesh_read")
130 }
131 if (3 == numDim) {
132 scan_ret = fscanf(fileHandle_p, "%d %d %d %le %le %le\n",
133 &tempInts[0+i1], &tempInts[chunkSize+i1], &tempInts[chunkSize*2+i1],
134 &tempCoords[i1*numDim+0], &tempCoords[i1*numDim+1], &tempCoords[i1*numDim+2]);
135 FSCANF_CHECK(scan_ret, "Mesh_read")
136 }
137 totalNodes++; /* When do we quit the infinite loop? */
138 chunkNodes++; /* How many nodes do we actually have in this chunk? It may be smaller than chunkSize. */
139 }
140 if (chunkNodes > chunkSize) {
141 setError(ESYS_MPI_ERROR, "Mesh_read: error reading chunks of mesh, data too large for message size");
142 return NULL;
143 }
144 #ifdef ESYS_MPI
145 /* Eventually we'll send chunkSize nodes to each CPU numbered 1 ... mpi_info->size-1, here goes one of them */
146 if (nextCPU < mpi_info->size) {
147 tempInts[chunkSize*3] = chunkNodes; /* The message has one more int to send chunkNodes */
148 MPI_Send(tempInts, chunkSize*3+1, MPI_INT, nextCPU, 81720, mpi_info->comm);
149 MPI_Send(tempCoords, chunkSize*numDim, MPI_DOUBLE, nextCPU, 81721, mpi_info->comm);
150 }
151 #endif
152 nextCPU++;
153 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
154 if (nextCPU > mpi_info->size) break; /* End infinite loop */
155 } /* Infinite loop */
156 } /* End master */
157 else { /* Worker */
158 #ifdef ESYS_MPI
159 /* Each worker receives two messages */
160 MPI_Status status;
161 MPI_Recv(tempInts, chunkSize*3+1, MPI_INT, 0, 81720, mpi_info->comm, &status);
162 MPI_Recv(tempCoords, chunkSize*numDim, MPI_DOUBLE, 0, 81721, mpi_info->comm, &status);
163 chunkNodes = tempInts[chunkSize*3]; /* How many nodes are in this workers chunk? */
164 #endif
165 } /* Worker */
166
167 /* Copy node data from tempMem to mesh_p */
168 mesh_p->Nodes->allocTable(chunkNodes);
169
170 if (noError()) {
171 #pragma omp parallel for private (i0, i1) schedule(static)
172 for (i0=0; i0<chunkNodes; i0++) {
173 mesh_p->Nodes->Id[i0] = tempInts[0+i0];
174 mesh_p->Nodes->globalDegreesOfFreedom[i0] = tempInts[chunkSize+i0];
175 mesh_p->Nodes->Tag[i0] = tempInts[chunkSize*2+i0];
176 for (i1=0; i1<numDim; i1++) {
177 mesh_p->Nodes->Coordinates[INDEX2(i1,i0,numDim)] = tempCoords[i0*numDim+i1];
178 }
179 }
180 }
181 delete[] tempInts;
182 delete[] tempCoords;
183 }
184
185 /* *********************************** read elements ****************************************************************************************/
186 if (noError()) {
187
188 /* Read the element typeID */
189 if (mpi_info->rank == 0) {
190 scan_ret = fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
191 FSCANF_CHECK(scan_ret, "Mesh_read")
192 typeID=ReferenceElement::getTypeId(element_type);
193 }
194 #ifdef ESYS_MPI
195 if (mpi_info->size > 1) {
196 int temp1[2], mpi_error;
197 temp1[0] = (int) typeID;
198 temp1[1] = numEle;
199 mpi_error = MPI_Bcast (temp1, 2, MPI_INT, 0, mpi_info->comm);
200 if (mpi_error != MPI_SUCCESS) {
201 setError(ESYS_MPI_ERROR, "Mesh_read: broadcast of Element typeID failed");
202 return NULL;
203 }
204 typeID = (ElementTypeId) temp1[0];
205 numEle = temp1[1];
206 }
207 #endif
208 if (typeID==NoRef) {
209 sprintf(error_msg, "Mesh_read: Unidentified element type %s", element_type);
210 setError(VALUE_ERROR, error_msg);
211 }
212 }
213
214 /* Allocate the ElementFile */
215 if (noError()) {
216 refElements.reset(new ReferenceElementSet(typeID, order, reduced_order));
217 mesh_p->Elements=new ElementFile(refElements, mpi_info);
218 // new meaning for numNodes: num nodes per element
219 numNodes = mesh_p->Elements->numNodes;
220 }
221
222 /********************** Read the element data ***************************/
223 if (noError()) {
224 int chunkSize = numEle / mpi_info->size + 1, totalEle=0, nextCPU=1, chunkEle=0;
225 int *tempInts = new int[chunkSize*(2+numNodes)+1]; /* Store Id + Tag + node list (+ one int at end for chunkEle) */
226 /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
227 if (mpi_info->rank == 0) { /* Master */
228 for (;;) { /* Infinite loop */
229 #pragma omp parallel for private (i0) schedule(static)
230 for (i0=0; i0<chunkSize*(2+numNodes)+1; i0++) tempInts[i0] = -1;
231
232 chunkEle = 0;
233 for (i0=0; i0<chunkSize; i0++) {
234 if (totalEle >= numEle) break; /* End inner loop */
235 scan_ret = fscanf(fileHandle_p, "%d %d", &tempInts[i0*(2+numNodes)+0], &tempInts[i0*(2+numNodes)+1]);
236 FSCANF_CHECK(scan_ret, "Mesh_read")
237 for (i1 = 0; i1 < numNodes; i1++) {
238 scan_ret = fscanf(fileHandle_p, " %d", &tempInts[i0*(2+numNodes)+2+i1]);
239 FSCANF_CHECK(scan_ret, "Mesh_read")
240 }
241 scan_ret = fscanf(fileHandle_p, "\n");
242 FSCANF_CHECK(scan_ret, "Mesh_read")
243 totalEle++;
244 chunkEle++;
245 }
246 #ifdef ESYS_MPI
247 /* Eventually we'll send chunk of elements to each CPU except 0 itself, here goes one of them */
248 if (nextCPU < mpi_info->size) {
249 tempInts[chunkSize*(2+numNodes)] = chunkEle;
250 MPI_Send(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, nextCPU, 81722, mpi_info->comm);
251 }
252 #endif
253 nextCPU++;
254 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
255 if (nextCPU > mpi_info->size) break; /* End infinite loop */
256 } /* Infinite loop */
257 } /* End master */
258 else { /* Worker */
259 #ifdef ESYS_MPI
260 /* Each worker receives one message */
261 MPI_Status status;
262 MPI_Recv(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, 0, 81722, mpi_info->comm, &status);
263 chunkEle = tempInts[chunkSize*(2+numNodes)];
264 #endif
265 } /* Worker */
266
267 mesh_p->Elements->allocTable(chunkEle);
268
269 /* Copy Element data from tempInts to mesh_p */
270 if (noError()) {
271
272 mesh_p->Elements->minColor=0;
273 mesh_p->Elements->maxColor=chunkEle-1;
274 #pragma omp parallel for private (i0, i1) schedule(static)
275 for (i0=0; i0<chunkEle; i0++) {
276 mesh_p->Elements->Id[i0] = tempInts[i0*(2+numNodes)+0];
277 mesh_p->Elements->Tag[i0] = tempInts[i0*(2+numNodes)+1];
278 mesh_p->Elements->Owner[i0] =mpi_info->rank;
279 mesh_p->Elements->Color[i0] = i0;
280 for (i1 = 0; i1 < numNodes; i1++) {
281 mesh_p->Elements->Nodes[INDEX2(i1, i0, numNodes)] = tempInts[i0*(2+numNodes)+2+i1];
282 }
283 }
284 }
285
286 delete[] tempInts;
287 }
288 /******************** end of Read the element data **********************/
289
290 /********************** read face elements ******************************/
291 if (noError()) {
292 /* Read the element typeID */
293
294 if (mpi_info->rank == 0) {
295 scan_ret = fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
296 FSCANF_CHECK(scan_ret, "Mesh_read")
297 typeID=ReferenceElement::getTypeId(element_type);
298 }
299 #ifdef ESYS_MPI
300 if (mpi_info->size > 1) {
301 int temp1[2];
302 temp1[0] = (int) typeID;
303 temp1[1] = numEle;
304 MPI_Bcast (temp1, 2, MPI_INT, 0, mpi_info->comm);
305 typeID = (ElementTypeId) temp1[0];
306 numEle = temp1[1];
307 }
308 #endif
309 if (typeID==NoRef) {
310 sprintf(error_msg, "Mesh_read: Unidentified element type %s", element_type);
311 setError(VALUE_ERROR, error_msg);
312 }
313 if (noError()) {
314 /* Allocate the ElementFile */
315 refFaceElements.reset(new ReferenceElementSet(typeID, order, reduced_order));
316 mesh_p->FaceElements=new ElementFile(refFaceElements, mpi_info);
317 numNodes = mesh_p->FaceElements->numNodes; // new meaning for numNodes: num nodes per element
318 }
319
320 }
321
322 /*********************** Read the face element data *********************/
323
324 if (noError()) {
325 int chunkSize = numEle / mpi_info->size + 1, totalEle=0, nextCPU=1, chunkEle=0;
326 int *tempInts = new index_t[chunkSize*(2+numNodes)+1]; /* Store Id + Tag + node list (+ one int at end for chunkEle) */
327 /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
328 if (mpi_info->rank == 0) { /* Master */
329 for (;;) { /* Infinite loop */
330 #pragma omp parallel for private (i0) schedule(static)
331 for (i0=0; i0<chunkSize*(2+numNodes)+1; i0++) tempInts[i0] = -1;
332
333 chunkEle = 0;
334 for (i0=0; i0<chunkSize; i0++) {
335 if (totalEle >= numEle) break; /* End inner loop */
336 scan_ret = fscanf(fileHandle_p, "%d %d", &tempInts[i0*(2+numNodes)+0], &tempInts[i0*(2+numNodes)+1]);
337 FSCANF_CHECK(scan_ret, "Mesh_read")
338 for (i1 = 0; i1 < numNodes; i1++) {
339 scan_ret = fscanf(fileHandle_p, " %d", &tempInts[i0*(2+numNodes)+2+i1]);
340 FSCANF_CHECK(scan_ret, "Mesh_read")
341 }
342 scan_ret = fscanf(fileHandle_p, "\n");
343 FSCANF_CHECK(scan_ret, "Mesh_read")
344 totalEle++;
345 chunkEle++;
346 }
347 #ifdef ESYS_MPI
348 /* Eventually we'll send chunk of elements to each CPU except 0 itself, here goes one of them */
349 if (nextCPU < mpi_info->size) {
350 tempInts[chunkSize*(2+numNodes)] = chunkEle;
351 MPI_Send(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, nextCPU, 81723, mpi_info->comm);
352 }
353 #endif
354 nextCPU++;
355 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
356 if (nextCPU > mpi_info->size) break; /* End infinite loop */
357 } /* Infinite loop */
358 } /* End master */
359 else { /* Worker */
360 #ifdef ESYS_MPI
361 /* Each worker receives one message */
362 MPI_Status status;
363 MPI_Recv(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, 0, 81723, mpi_info->comm, &status);
364 chunkEle = tempInts[chunkSize*(2+numNodes)];
365 #endif
366 } /* Worker */
367
368 mesh_p->FaceElements->allocTable(chunkEle);
369
370 if (noError()) {
371 /* Copy Element data from tempInts to mesh_p */
372
373 mesh_p->FaceElements->minColor=0;
374 mesh_p->FaceElements->maxColor=chunkEle-1;
375 #pragma omp parallel for private (i0, i1)
376 for (i0=0; i0<chunkEle; i0++) {
377 mesh_p->FaceElements->Id[i0] = tempInts[i0*(2+numNodes)+0];
378 mesh_p->FaceElements->Tag[i0] = tempInts[i0*(2+numNodes)+1];
379 mesh_p->FaceElements->Owner[i0] =mpi_info->rank;
380 mesh_p->FaceElements->Color[i0] = i0;
381 for (i1 = 0; i1 < numNodes; i1++) {
382 mesh_p->FaceElements->Nodes[INDEX2(i1, i0, numNodes)] = tempInts[i0*(2+numNodes)+2+i1];
383 }
384 }
385 }
386
387 delete[] tempInts;
388 }
389 /******************* end of Read the face element data ******************/
390
391
392 /************************* read contact elements ************************/
393
394 /* Read the element typeID */
395 if (noError()) {
396 if (mpi_info->rank == 0) {
397 scan_ret = fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
398 FSCANF_CHECK(scan_ret, "Mesh_read")
399 typeID=ReferenceElement::getTypeId(element_type);
400 }
401 #ifdef ESYS_MPI
402 if (mpi_info->size > 1) {
403 int temp1[2];
404 temp1[0] = (int) typeID;
405 temp1[1] = numEle;
406 MPI_Bcast (temp1, 2, MPI_INT, 0, mpi_info->comm);
407 typeID = (ElementTypeId) temp1[0];
408 numEle = temp1[1];
409 }
410 #endif
411 if (typeID==NoRef) {
412 sprintf(error_msg, "Mesh_read: Unidentified element type %s", element_type);
413 setError(VALUE_ERROR, error_msg);
414 }
415 }
416
417 if (noError()) {
418 /* Allocate the ElementFile */
419 refContactElements.reset(new ReferenceElementSet(typeID, order, reduced_order));
420 mesh_p->ContactElements=new ElementFile(refContactElements, mpi_info);
421 numNodes = mesh_p->ContactElements->numNodes; // new meaning for numNodes: num nodes per element
422 }
423 /******************* Read the contact element data **********************/
424 if (noError()) {
425 int chunkSize = numEle / mpi_info->size + 1, totalEle=0, nextCPU=1, chunkEle=0;
426 int *tempInts = new index_t[chunkSize*(2+numNodes)+1]; /* Store Id + Tag + node list (+ one int at end for chunkEle) */
427 /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
428 if (mpi_info->rank == 0) { /* Master */
429 for (;;) { /* Infinite loop */
430 #pragma omp parallel for private (i0) schedule(static)
431 for (i0=0; i0<chunkSize*(2+numNodes)+1; i0++) tempInts[i0] = -1;
432
433 chunkEle = 0;
434 for (i0=0; i0<chunkSize; i0++) {
435 if (totalEle >= numEle) break; /* End inner loop */
436 scan_ret = fscanf(fileHandle_p, "%d %d", &tempInts[i0*(2+numNodes)+0], &tempInts[i0*(2+numNodes)+1]);
437 FSCANF_CHECK(scan_ret, "Mesh_read")
438 for (i1 = 0; i1 < numNodes; i1++) {
439 scan_ret = fscanf(fileHandle_p, " %d", &tempInts[i0*(2+numNodes)+2+i1]);
440 FSCANF_CHECK(scan_ret, "Mesh_read")
441 }
442 scan_ret = fscanf(fileHandle_p, "\n");
443 FSCANF_CHECK(scan_ret, "Mesh_read")
444 totalEle++;
445 chunkEle++;
446 }
447 #ifdef ESYS_MPI
448 // Eventually we'll send chunk of elements to each CPU except
449 // 0 itself, here goes one of them
450 if (nextCPU < mpi_info->size) {
451 tempInts[chunkSize*(2+numNodes)] = chunkEle;
452 MPI_Send(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, nextCPU, 81724, mpi_info->comm);
453 }
454 #endif
455 nextCPU++;
456 // Infinite loop ends when I've read a chunk for each of the
457 // worker nodes plus one more chunk for the master
458 if (nextCPU > mpi_info->size)
459 break; // End infinite loop
460 } // Infinite loop
461 } // End master
462 else { // Worker
463 #ifdef ESYS_MPI
464 // Each worker receives one message
465 MPI_Status status;
466 MPI_Recv(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, 0, 81724, mpi_info->comm, &status);
467 chunkEle = tempInts[chunkSize*(2+numNodes)] ;
468 #endif
469 } // Worker
470
471 // Copy Element data from tempInts to mesh_p
472 mesh_p->ContactElements->allocTable(chunkEle);
473
474 if (noError()) {
475 mesh_p->ContactElements->minColor=0;
476 mesh_p->ContactElements->maxColor=chunkEle-1;
477 #pragma omp parallel for private (i0, i1)
478 for (i0=0; i0<chunkEle; i0++) {
479 mesh_p->ContactElements->Id[i0] = tempInts[i0*(2+numNodes)+0];
480 mesh_p->ContactElements->Tag[i0] = tempInts[i0*(2+numNodes)+1];
481 mesh_p->ContactElements->Owner[i0] =mpi_info->rank;
482 mesh_p->ContactElements->Color[i0] = i0;
483 for (i1 = 0; i1 < numNodes; i1++) {
484 mesh_p->ContactElements->Nodes[INDEX2(i1, i0, numNodes)] = tempInts[i0*(2+numNodes)+2+i1];
485 }
486 }
487 }
488 delete[] tempInts;
489 } // end of Read the contact element data
490
491 // ****************** read nodal elements ******************
492
493 // *************** Read the element typeID ***********
494
495 if (noError()) {
496 if (mpi_info->rank == 0) {
497 scan_ret = fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
498 FSCANF_CHECK(scan_ret, "Mesh_read")
499 typeID=ReferenceElement::getTypeId(element_type);
500 }
501 #ifdef ESYS_MPI
502 if (mpi_info->size > 1) {
503 int temp1[2];
504 temp1[0] = (int) typeID;
505 temp1[1] = numEle;
506 MPI_Bcast (temp1, 2, MPI_INT, 0, mpi_info->comm);
507 typeID = (ElementTypeId) temp1[0];
508 numEle = temp1[1];
509 }
510 #endif
511 if (typeID==NoRef) {
512 sprintf(error_msg, "Mesh::read: Unidentified element type %s", element_type);
513 setError(VALUE_ERROR, error_msg);
514 }
515 }
516
517 if (noError()) {
518 // Allocate the ElementFile
519 refPoints.reset(new ReferenceElementSet(typeID, order, reduced_order));
520 mesh_p->Points=new ElementFile(refPoints, mpi_info);
521 // New meaning for numNodes: num nodes per element
522 numNodes = mesh_p->Points->numNodes;
523 }
524
525 // ******************* Read the nodal element data ****************
526 if (noError()) {
527 int chunkSize = numEle / mpi_info->size + 1, totalEle=0, nextCPU=1, chunkEle=0;
528 // Store Id + Tag + node list (+ one int at end for chunkEle)
529 int *tempInts = new index_t[chunkSize*(2+numNodes)+1];
530 // Elements are specified as a list of integers...only need one
531 // message instead of two as with the nodes
532 if (mpi_info->rank == 0) { // Master
533 for (;;) { // Infinite loop
534 #pragma omp parallel for private (i0) schedule(static)
535 for (i0=0; i0<chunkSize*(2+numNodes)+1; i0++) tempInts[i0] = -1;
536
537 chunkEle = 0;
538 for (i0=0; i0<chunkSize; i0++) {
539 if (totalEle >= numEle) break; /* End inner loop */
540 scan_ret = fscanf(fileHandle_p, "%d %d", &tempInts[i0*(2+numNodes)+0], &tempInts[i0*(2+numNodes)+1]);
541 FSCANF_CHECK(scan_ret, "Mesh_read")
542 for (i1 = 0; i1 < numNodes; i1++) {
543 scan_ret = fscanf(fileHandle_p, " %d", &tempInts[i0*(2+numNodes)+2+i1]);
544 FSCANF_CHECK(scan_ret, "Mesh_read")
545 }
546 scan_ret = fscanf(fileHandle_p, "\n");
547 FSCANF_CHECK(scan_ret, "Mesh_read")
548 totalEle++;
549 chunkEle++;
550 }
551 #ifdef ESYS_MPI
552 // Eventually we'll send chunk of elements to each CPU
553 // except 0 itself, here goes one of them
554 if (nextCPU < mpi_info->size) {
555 tempInts[chunkSize*(2+numNodes)] = chunkEle;
556 MPI_Send(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, nextCPU, 81725, mpi_info->comm);
557 }
558 #endif
559 nextCPU++;
560 // Infinite loop ends when I've read a chunk for each of
561 // the worker nodes plus one more chunk for the master
562 if (nextCPU > mpi_info->size)
563 break; // End infinite loop
564 } // Infinite loop
565 } // End master
566 else { // Worker
567 #ifdef ESYS_MPI
568 // Each worker receives one message
569 MPI_Status status;
570 MPI_Recv(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, 0, 81725, mpi_info->comm, &status);
571 chunkEle = tempInts[chunkSize*(2+numNodes)];
572 #endif
573 } // Worker
574
575 // Copy Element data from tempInts to mesh_p
576 mesh_p->Points->allocTable(chunkEle);
577
578 if (noError()) {
579 mesh_p->Points->minColor=0;
580 mesh_p->Points->maxColor=chunkEle-1;
581 #pragma omp parallel for private (i0, i1) schedule(static)
582 for (i0=0; i0<chunkEle; i0++) {
583 mesh_p->Points->Id[i0] = tempInts[i0*(2+numNodes)+0];
584 mesh_p->Points->Tag[i0] = tempInts[i0*(2+numNodes)+1];
585 mesh_p->Points->Owner[i0] =mpi_info->rank;
586 mesh_p->Points->Color[i0] = i0;
587 for (i1 = 0; i1 < numNodes; i1++) {
588 mesh_p->Points->Nodes[INDEX2(i1, i0, numNodes)] = tempInts[i0*(2+numNodes)+2+i1];
589 }
590 }
591 }
592
593 delete[] tempInts;
594 } // ************** end of Read the nodal element data ***************
595
596 // ***************** get the name tags ********************************
597 if (noError()) {
598 char *remainder=0, *ptr;
599 size_t len=0;
600 #ifdef ESYS_MPI
601 int len_i;
602 #endif
603 int tag_key;
604 if (mpi_info->rank == 0) { // Master
605 // Read the word 'Tag'
606 if (!feof(fileHandle_p)) {
607 scan_ret = fscanf(fileHandle_p, "%s\n", name);
608 FSCANF_CHECK(scan_ret, "Mesh_read")
609 }
610
611 #ifdef _WIN32
612 // windows ftell lies on unix formatted text files
613 remainder = NULL;
614 len=0;
615 while (1) {
616 size_t malloc_chunk = 1024;
617 size_t buff_size = 0;
618 int ch;
619
620 ch = fgetc(fileHandle_p);
621 if( ch == '\r' )
622 continue;
623
624 if (len+1 > buff_size) {
625 TMPMEMREALLOC(remainder,remainder,buff_size+malloc_chunk,char);
626 }
627 if (ch == EOF) {
628 // hit EOF
629 remainder[len] = (char)0;
630 break;
631 }
632 remainder[len] = (char)ch;
633 len++;
634 }
635 #else
636 // Read rest of file in one chunk, after using seek to find length
637 {
638 long cur_pos, end_pos;
639
640 cur_pos = ftell(fileHandle_p);
641 fseek(fileHandle_p, 0L, SEEK_END);
642 end_pos = ftell(fileHandle_p);
643 fseek(fileHandle_p, (long)cur_pos, SEEK_SET);
644 remainder = new char[end_pos-cur_pos+1];
645 if (!feof(fileHandle_p)) {
646 scan_ret = fread(remainder, (size_t) end_pos-cur_pos,
647 sizeof(char), fileHandle_p);
648
649 FSCANF_CHECK(scan_ret, "Mesh_read")
650 remainder[end_pos-cur_pos] = 0;
651 }
652 }
653 #endif
654 len = strlen(remainder);
655 // trim the string
656 while ((len>1) && isspace(remainder[--len])) {remainder[len]=0;}
657 len = strlen(remainder);
658 } // Master
659 #ifdef ESYS_MPI
660 len_i=(int) len;
661 MPI_Bcast(&len_i, 1, MPI_INT, 0, mpi_info->comm);
662 len=(size_t) len_i;
663 if (mpi_info->rank != 0) {
664 remainder = new char[len+1];
665 remainder[0] = 0;
666 }
667 if (MPI_Bcast (remainder, len+1, MPI_CHAR, 0, mpi_info->comm) !=
668 MPI_SUCCESS)
669 setError(ESYS_MPI_ERROR, "Mesh_read: broadcast of remainder failed");
670 #endif
671
672 if (remainder[0]) {
673 ptr = remainder;
674 do {
675 sscanf(ptr, "%s %d\n", name, &tag_key);
676 if (*name)
677 mesh_p->addTagMap(name, tag_key);
678 ptr++;
679 } while(NULL != (ptr = strchr(ptr, '\n')) && *ptr);
680 }
681 if (remainder)
682 delete[] remainder;
683 }
684
685 // close file
686 if (mpi_info->rank == 0)
687 fclose(fileHandle_p);
688
689 // resolve id's and rearrange elements
690 if (noError()) mesh_p->resolveNodeIds();
691 if (noError()) mesh_p->prepare(optimize);
692
693 // that's it
694 if (!noError()) {
695 delete mesh_p;
696 mesh_p=NULL;
697 }
698 Esys_MPIInfo_free(mpi_info);
699 return mesh_p;
700 }
701
702 } // namespace finley
703

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision
svn:mergeinfo /branches/lapack2681/finley/src/Mesh_read.cpp:2682-2741 /branches/pasowrap/finley/src/Mesh_read.cpp:3661-3674 /branches/py3_attempt2/finley/src/Mesh_read.cpp:3871-3891 /branches/restext/finley/src/Mesh_read.cpp:2610-2624 /branches/ripleygmg_from_3668/finley/src/Mesh_read.cpp:3669-3791 /branches/stage3.0/finley/src/Mesh_read.cpp:2569-2590 /branches/symbolic_from_3470/finley/src/Mesh_read.cpp:3471-3974 /release/3.0/finley/src/Mesh_read.cpp:2591-2601 /trunk/finley/src/Mesh_read.cpp:4257-4344

  ViewVC Help
Powered by ViewVC 1.1.26