/[escript]/trunk/finley/src/Mesh_read.c
ViewVC logotype

Contents of /trunk/finley/src/Mesh_read.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2881 - (show annotations)
Thu Jan 28 02:03:15 2010 UTC (9 years, 10 months ago) by jfenwick
File MIME type: text/plain
File size: 26802 byte(s)
Don't panic.
Updating copyright stamps

1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2010 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 /**************************************************************/
16
17 /* Finley: read mesh */
18
19 /**************************************************************/
20
21 #include <ctype.h>
22 #include "Mesh.h"
23
24 #define FSCANF_CHECK(scan_ret, reason) { if (scan_ret == EOF) { perror(reason); Finley_setError(IO_ERROR,"scan error while reading finley file"); return NULL;} }
25
26
27 Finley_Mesh* Finley_Mesh_read(char* fname,index_t order, index_t reduced_order, bool_t optimize)
28 {
29 Paso_MPIInfo *mpi_info = NULL;
30 dim_t numNodes, numDim, numEle, i0, i1;
31 Finley_Mesh *mesh_p=NULL;
32 Finley_ReferenceElementSet *refPoints=NULL, *refContactElements=NULL, *refFaceElements=NULL, *refElements=NULL;
33 char name[LenString_MAX],element_type[LenString_MAX],frm[20];
34 char error_msg[LenErrorMsg_MAX];
35 FILE *fileHandle_p = NULL;
36 ElementTypeId typeID=NoRef;
37 int scan_ret;
38
39 Finley_resetError();
40 mpi_info = Paso_MPIInfo_alloc( MPI_COMM_WORLD );
41
42 if (mpi_info->rank == 0) {
43 /* get file handle */
44 fileHandle_p = fopen(fname, "r");
45 if (fileHandle_p==NULL) {
46 sprintf(error_msg,"Finley_Mesh_read: Opening file %s for reading failed.",fname);
47 Finley_setError(IO_ERROR,error_msg);
48 Paso_MPIInfo_free( mpi_info );
49 return NULL;
50 }
51
52 /* read header */
53 sprintf(frm,"%%%d[^\n]",LenString_MAX-1);
54 scan_ret = fscanf(fileHandle_p, frm, name);
55 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
56
57 /* get the number of nodes */
58 scan_ret = fscanf(fileHandle_p, "%1d%*s %d\n", &numDim,&numNodes);
59 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
60 }
61
62 #ifdef PASO_MPI
63 /* MPI Broadcast numDim, numNodes, name if there are multiple MPI procs*/
64 if (mpi_info->size > 1) {
65 int temp1[3];
66 if (mpi_info->rank == 0) {
67 temp1[0] = numDim;
68 temp1[1] = numNodes;
69 temp1[2] = strlen(name) + 1;
70 } else {
71 temp1[0] = 0;
72 temp1[1] = 0;
73 temp1[2] = 1;
74 }
75 MPI_Bcast (temp1, 3, MPI_INT, 0, mpi_info->comm);
76 numDim = temp1[0];
77 numNodes = temp1[1];
78 MPI_Bcast (name, temp1[2], MPI_CHAR, 0, mpi_info->comm);
79 }
80 #endif
81
82 /* allocate mesh */
83 mesh_p = Finley_Mesh_alloc(name,numDim,mpi_info);
84
85 if (Finley_noError()) {
86 /* Each CPU will get at most chunkSize nodes so the message has to be sufficiently large */
87 int chunkSize = numNodes / mpi_info->size + 1, totalNodes=0, chunkNodes=0, nextCPU=1;
88 int *tempInts = TMPMEMALLOC(chunkSize*3+1, index_t); /* Stores the integer message data */
89 double *tempCoords = TMPMEMALLOC(chunkSize*numDim, double); /* Stores the double message data */
90
91 /*
92 Read chunkSize nodes, send it in a chunk to worker CPU which copies chunk into its local mesh_p
93 It doesn't matter that a CPU has the wrong nodes for its elements, this is sorted out later
94 First chunk sent to CPU 1, second to CPU 2, ...
95 Last chunk stays on CPU 0 (the master)
96 The three columns of integers (Id, gDOF, Tag) are gathered into a single array tempInts and sent together in a single MPI message
97 */
98
99 if (mpi_info->rank == 0) { /* Master */
100 for (;;) { /* Infinite loop */
101 #pragma omp parallel for private (i0) schedule(static)
102 for (i0=0; i0<chunkSize*3+1; i0++) tempInts[i0] = -1;
103
104 #pragma omp parallel for private (i0) schedule(static)
105 for (i0=0; i0<chunkSize*numDim; i0++) tempCoords[i0] = -1.0;
106
107 chunkNodes = 0;
108 for (i1=0; i1<chunkSize; i1++) {
109 if (totalNodes >= numNodes) break; /* End of inner loop */
110 if (1 == numDim) {
111 scan_ret = fscanf(fileHandle_p, "%d %d %d %le\n",
112 &tempInts[0+i1], &tempInts[chunkSize+i1], &tempInts[chunkSize*2+i1],
113 &tempCoords[i1*numDim+0]);
114 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
115 }
116 if (2 == numDim) {
117 scan_ret = fscanf(fileHandle_p, "%d %d %d %le %le\n",
118 &tempInts[0+i1], &tempInts[chunkSize+i1], &tempInts[chunkSize*2+i1],
119 &tempCoords[i1*numDim+0], &tempCoords[i1*numDim+1]);
120 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
121 }
122 if (3 == numDim) {
123 scan_ret = fscanf(fileHandle_p, "%d %d %d %le %le %le\n",
124 &tempInts[0+i1], &tempInts[chunkSize+i1], &tempInts[chunkSize*2+i1],
125 &tempCoords[i1*numDim+0], &tempCoords[i1*numDim+1], &tempCoords[i1*numDim+2]);
126 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
127 }
128 totalNodes++; /* When do we quit the infinite loop? */
129 chunkNodes++; /* How many nodes do we actually have in this chunk? It may be smaller than chunkSize. */
130 }
131 if (chunkNodes > chunkSize) {
132 Finley_setError(PASO_MPI_ERROR, "Finley_Mesh_read: error reading chunks of mesh, data too large for message size");
133 return NULL;
134 }
135 #ifdef PASO_MPI
136 /* Eventually we'll send chunkSize nodes to each CPU numbered 1 ... mpi_info->size-1, here goes one of them */
137 if (nextCPU < mpi_info->size) {
138 tempInts[chunkSize*3] = chunkNodes; /* The message has one more int to send chunkNodes */
139 MPI_Send(tempInts, chunkSize*3+1, MPI_INT, nextCPU, 81720, mpi_info->comm);
140 MPI_Send(tempCoords, chunkSize*numDim, MPI_DOUBLE, nextCPU, 81721, mpi_info->comm);
141 }
142 #endif
143 nextCPU++;
144 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
145 if (nextCPU > mpi_info->size) break; /* End infinite loop */
146 } /* Infinite loop */
147 } /* End master */
148 else { /* Worker */
149 #ifdef PASO_MPI
150 /* Each worker receives two messages */
151 MPI_Status status;
152 MPI_Recv(tempInts, chunkSize*3+1, MPI_INT, 0, 81720, mpi_info->comm, &status);
153 MPI_Recv(tempCoords, chunkSize*numDim, MPI_DOUBLE, 0, 81721, mpi_info->comm, &status);
154 chunkNodes = tempInts[chunkSize*3]; /* How many nodes are in this workers chunk? */
155 #endif
156 } /* Worker */
157
158 /* Copy node data from tempMem to mesh_p */
159 Finley_NodeFile_allocTable(mesh_p->Nodes, chunkNodes);
160
161 if (Finley_noError()) {
162 #pragma omp parallel for private (i0, i1) schedule(static)
163 for (i0=0; i0<chunkNodes; i0++) {
164 mesh_p->Nodes->Id[i0] = tempInts[0+i0];
165 mesh_p->Nodes->globalDegreesOfFreedom[i0] = tempInts[chunkSize+i0];
166 mesh_p->Nodes->Tag[i0] = tempInts[chunkSize*2+i0];
167 for (i1=0; i1<numDim; i1++) {
168 mesh_p->Nodes->Coordinates[INDEX2(i1,i0,numDim)] = tempCoords[i0*numDim+i1];
169 }
170 }
171 }
172 TMPMEMFREE(tempInts);
173 TMPMEMFREE(tempCoords);
174 }
175
176 /* *********************************** read elements ******************************************************************/
177 if (Finley_noError()) {
178
179 /* Read the element typeID */
180 if (mpi_info->rank == 0) {
181 scan_ret = fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
182 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
183 typeID=Finley_ReferenceElement_getTypeId(element_type);
184 }
185 #ifdef PASO_MPI
186 if (mpi_info->size > 1) {
187 int temp1[2], mpi_error;
188 temp1[0] = (int) typeID;
189 temp1[1] = numEle;
190 mpi_error = MPI_Bcast (temp1, 2, MPI_INT, 0, mpi_info->comm);
191 if (mpi_error != MPI_SUCCESS) {
192 Finley_setError(PASO_MPI_ERROR, "Finley_Mesh_read: broadcast of Element typeID failed");
193 return NULL;
194 }
195 typeID = (ElementTypeId) temp1[0];
196 numEle = temp1[1];
197 }
198 #endif
199 if (typeID==NoRef) {
200 sprintf(error_msg, "Finley_Mesh_read: Unidentified element type %s", element_type);
201 Finley_setError(VALUE_ERROR, error_msg);
202 }
203 }
204
205 /* Allocate the ElementFile */
206 if (Finley_noError()) {
207 refElements= Finley_ReferenceElementSet_alloc(typeID,order, reduced_order);
208 mesh_p->Elements=Finley_ElementFile_alloc(refElements, mpi_info);
209 numNodes = mesh_p->Elements->referenceElementSet->numNodes; /* New meaning for numNodes: num nodes per element */
210 }
211
212 /* *************************** Read the element data *******************************************************************/
213 if (Finley_noError()) {
214
215 int chunkSize = numEle / mpi_info->size + 1, totalEle=0, nextCPU=1, chunkEle=0;
216 int *tempInts = TMPMEMALLOC(chunkSize*(2+numNodes)+1, index_t); /* Store Id + Tag + node list (+ one int at end for chunkEle) */
217 /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
218 if (mpi_info->rank == 0) { /* Master */
219 for (;;) { /* Infinite loop */
220 #pragma omp parallel for private (i0) schedule(static)
221 for (i0=0; i0<chunkSize*(2+numNodes)+1; i0++) tempInts[i0] = -1;
222
223 chunkEle = 0;
224 for (i0=0; i0<chunkSize; i0++) {
225 if (totalEle >= numEle) break; /* End inner loop */
226 scan_ret = fscanf(fileHandle_p, "%d %d", &tempInts[i0*(2+numNodes)+0], &tempInts[i0*(2+numNodes)+1]);
227 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
228 for (i1 = 0; i1 < numNodes; i1++) {
229 scan_ret = fscanf(fileHandle_p, " %d", &tempInts[i0*(2+numNodes)+2+i1]);
230 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
231 }
232 scan_ret = fscanf(fileHandle_p, "\n");
233 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
234 totalEle++;
235 chunkEle++;
236 }
237 #ifdef PASO_MPI
238 /* Eventually we'll send chunk of elements to each CPU except 0 itself, here goes one of them */
239 if (nextCPU < mpi_info->size) {
240 tempInts[chunkSize*(2+numNodes)] = chunkEle;
241 MPI_Send(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, nextCPU, 81722, mpi_info->comm);
242 }
243 #endif
244 nextCPU++;
245 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
246 if (nextCPU > mpi_info->size) break; /* End infinite loop */
247 } /* Infinite loop */
248 } /* End master */
249 else { /* Worker */
250 #ifdef PASO_MPI
251 /* Each worker receives one message */
252 MPI_Status status;
253 MPI_Recv(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, 0, 81722, mpi_info->comm, &status);
254 chunkEle = tempInts[chunkSize*(2+numNodes)];
255 #endif
256 } /* Worker */
257
258
259 Finley_ElementFile_allocTable(mesh_p->Elements, chunkEle);
260
261
262 /* Copy Element data from tempInts to mesh_p */
263 if (Finley_noError()) {
264
265 mesh_p->Elements->minColor=0;
266 mesh_p->Elements->maxColor=chunkEle-1;
267 #pragma omp parallel for private (i0, i1) schedule(static)
268 for (i0=0; i0<chunkEle; i0++) {
269 mesh_p->Elements->Id[i0] = tempInts[i0*(2+numNodes)+0];
270 mesh_p->Elements->Tag[i0] = tempInts[i0*(2+numNodes)+1];
271 mesh_p->Elements->Owner[i0] =mpi_info->rank;
272 mesh_p->Elements->Color[i0] = i0;
273 for (i1 = 0; i1 < numNodes; i1++) {
274 mesh_p->Elements->Nodes[INDEX2(i1, i0, numNodes)] = tempInts[i0*(2+numNodes)+2+i1];
275 }
276 }
277 }
278
279 TMPMEMFREE(tempInts);
280 }
281 /* ******************** end of Read the element data ******************************************************/
282
283 /* ********************* read face elements ***************************************************************/
284 if (Finley_noError()) {
285 /* Read the element typeID */
286
287 if (mpi_info->rank == 0) {
288 scan_ret = fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
289 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
290 typeID=Finley_ReferenceElement_getTypeId(element_type);
291 }
292 #ifdef PASO_MPI
293 if (mpi_info->size > 1) {
294 int temp1[2];
295 temp1[0] = (int) typeID;
296 temp1[1] = numEle;
297 MPI_Bcast (temp1, 2, MPI_INT, 0, mpi_info->comm);
298 typeID = (ElementTypeId) temp1[0];
299 numEle = temp1[1];
300 }
301 #endif
302 if (typeID==NoRef) {
303 sprintf(error_msg, "Finley_Mesh_read: Unidentified element type %s", element_type);
304 Finley_setError(VALUE_ERROR, error_msg);
305 }
306 if (Finley_noError()) {
307 /* Allocate the ElementFile */
308 refFaceElements= Finley_ReferenceElementSet_alloc(typeID,order, reduced_order);
309 mesh_p->FaceElements=Finley_ElementFile_alloc(refFaceElements, mpi_info);
310 numNodes = mesh_p->FaceElements->referenceElementSet->numNodes; /* New meaning for numNodes: num nodes per element */
311 }
312
313 }
314
315 /* ********************** Read the face element data ********************************************************* */
316
317 if (Finley_noError()) {
318 int chunkSize = numEle / mpi_info->size + 1, totalEle=0, nextCPU=1, chunkEle=0;
319 int *tempInts = TMPMEMALLOC(chunkSize*(2+numNodes)+1, index_t); /* Store Id + Tag + node list (+ one int at end for chunkEle) */
320 /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
321 if (mpi_info->rank == 0) { /* Master */
322 for (;;) { /* Infinite loop */
323 #pragma omp parallel for private (i0) schedule(static)
324 for (i0=0; i0<chunkSize*(2+numNodes)+1; i0++) tempInts[i0] = -1;
325
326 chunkEle = 0;
327 for (i0=0; i0<chunkSize; i0++) {
328 if (totalEle >= numEle) break; /* End inner loop */
329 scan_ret = fscanf(fileHandle_p, "%d %d", &tempInts[i0*(2+numNodes)+0], &tempInts[i0*(2+numNodes)+1]);
330 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
331 for (i1 = 0; i1 < numNodes; i1++) {
332 scan_ret = fscanf(fileHandle_p, " %d", &tempInts[i0*(2+numNodes)+2+i1]);
333 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
334 }
335 scan_ret = fscanf(fileHandle_p, "\n");
336 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
337 totalEle++;
338 chunkEle++;
339 }
340 #ifdef PASO_MPI
341 /* Eventually we'll send chunk of elements to each CPU except 0 itself, here goes one of them */
342 if (nextCPU < mpi_info->size) {
343 tempInts[chunkSize*(2+numNodes)] = chunkEle;
344 MPI_Send(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, nextCPU, 81723, mpi_info->comm);
345 }
346 #endif
347 nextCPU++;
348 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
349 if (nextCPU > mpi_info->size) break; /* End infinite loop */
350 } /* Infinite loop */
351 } /* End master */
352 else { /* Worker */
353 #ifdef PASO_MPI
354 /* Each worker receives one message */
355 MPI_Status status;
356 MPI_Recv(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, 0, 81723, mpi_info->comm, &status);
357 chunkEle = tempInts[chunkSize*(2+numNodes)];
358 #endif
359 } /* Worker */
360
361 Finley_ElementFile_allocTable(mesh_p->FaceElements, chunkEle);
362
363 if (Finley_noError()) {
364 /* Copy Element data from tempInts to mesh_p */
365
366 mesh_p->FaceElements->minColor=0;
367 mesh_p->FaceElements->maxColor=chunkEle-1;
368 #pragma omp parallel for private (i0, i1)
369 for (i0=0; i0<chunkEle; i0++) {
370 mesh_p->FaceElements->Id[i0] = tempInts[i0*(2+numNodes)+0];
371 mesh_p->FaceElements->Tag[i0] = tempInts[i0*(2+numNodes)+1];
372 mesh_p->FaceElements->Owner[i0] =mpi_info->rank;
373 mesh_p->FaceElements->Color[i0] = i0;
374 for (i1 = 0; i1 < numNodes; i1++) {
375 mesh_p->FaceElements->Nodes[INDEX2(i1, i0, numNodes)] = tempInts[i0*(2+numNodes)+2+i1];
376 }
377 }
378 }
379
380 TMPMEMFREE(tempInts);
381 }
382 /* ************************************* end of Read the face element data *************************************** */
383
384
385 /* ************************************* read contact elements ************************************************** */
386
387 /* Read the element typeID */
388 if (Finley_noError()) {
389 if (mpi_info->rank == 0) {
390 scan_ret = fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
391 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
392 typeID=Finley_ReferenceElement_getTypeId(element_type);
393 }
394 #ifdef PASO_MPI
395 if (mpi_info->size > 1) {
396 int temp1[2];
397 temp1[0] = (int) typeID;
398 temp1[1] = numEle;
399 MPI_Bcast (temp1, 2, MPI_INT, 0, mpi_info->comm);
400 typeID = (ElementTypeId) temp1[0];
401 numEle = temp1[1];
402 }
403 #endif
404 if (typeID==NoRef) {
405 sprintf(error_msg, "Finley_Mesh_read: Unidentified element type %s", element_type);
406 Finley_setError(VALUE_ERROR, error_msg);
407 }
408 }
409
410 if (Finley_noError()) {
411 /* Allocate the ElementFile */
412 refContactElements= Finley_ReferenceElementSet_alloc(typeID,order, reduced_order);
413 mesh_p->ContactElements=Finley_ElementFile_alloc(refContactElements, mpi_info);
414 numNodes = mesh_p->ContactElements->referenceElementSet->numNodes; /* New meaning for numNodes: num nodes per element */
415 }
416 /* *************************** Read the contact element data ************************************************* */
417 if (Finley_noError()) {
418 int chunkSize = numEle / mpi_info->size + 1, totalEle=0, nextCPU=1, chunkEle=0;
419 int *tempInts = TMPMEMALLOC(chunkSize*(2+numNodes)+1, index_t); /* Store Id + Tag + node list (+ one int at end for chunkEle) */
420 /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
421 if (mpi_info->rank == 0) { /* Master */
422 for (;;) { /* Infinite loop */
423 #pragma omp parallel for private (i0) schedule(static)
424 for (i0=0; i0<chunkSize*(2+numNodes)+1; i0++) tempInts[i0] = -1;
425
426 chunkEle = 0;
427 for (i0=0; i0<chunkSize; i0++) {
428 if (totalEle >= numEle) break; /* End inner loop */
429 scan_ret = fscanf(fileHandle_p, "%d %d", &tempInts[i0*(2+numNodes)+0], &tempInts[i0*(2+numNodes)+1]);
430 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
431 for (i1 = 0; i1 < numNodes; i1++) {
432 scan_ret = fscanf(fileHandle_p, " %d", &tempInts[i0*(2+numNodes)+2+i1]);
433 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
434 }
435 scan_ret = fscanf(fileHandle_p, "\n");
436 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
437 totalEle++;
438 chunkEle++;
439 }
440 #ifdef PASO_MPI
441 /* Eventually we'll send chunk of elements to each CPU except 0 itself, here goes one of them */
442 if (nextCPU < mpi_info->size) {
443 tempInts[chunkSize*(2+numNodes)] = chunkEle;
444 MPI_Send(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, nextCPU, 81724, mpi_info->comm);
445 }
446 #endif
447 nextCPU++;
448 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
449 if (nextCPU > mpi_info->size) break; /* End infinite loop */
450 } /* Infinite loop */
451 } /* End master */
452 else { /* Worker */
453 #ifdef PASO_MPI
454 /* Each worker receives one message */
455 MPI_Status status;
456 MPI_Recv(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, 0, 81724, mpi_info->comm, &status);
457 chunkEle = tempInts[chunkSize*(2+numNodes)] ;
458 #endif
459 } /* Worker */
460
461 /* Copy Element data from tempInts to mesh_p */
462 Finley_ElementFile_allocTable(mesh_p->ContactElements, chunkEle);
463
464 if (Finley_noError()) {
465 mesh_p->ContactElements->minColor=0;
466 mesh_p->ContactElements->maxColor=chunkEle-1;
467 #pragma omp parallel for private (i0, i1)
468 for (i0=0; i0<chunkEle; i0++) {
469 mesh_p->ContactElements->Id[i0] = tempInts[i0*(2+numNodes)+0];
470 mesh_p->ContactElements->Tag[i0] = tempInts[i0*(2+numNodes)+1];
471 mesh_p->ContactElements->Owner[i0] =mpi_info->rank;
472 mesh_p->ContactElements->Color[i0] = i0;
473 for (i1 = 0; i1 < numNodes; i1++) {
474 mesh_p->ContactElements->Nodes[INDEX2(i1, i0, numNodes)] = tempInts[i0*(2+numNodes)+2+i1];
475 }
476 }
477 }
478 TMPMEMFREE(tempInts);
479 } /* end of Read the contact element data */
480
481 /* ********************************* read nodal elements ****************************************************** */
482
483 /* ******************************* Read the element typeID */
484
485 if (Finley_noError()) {
486 if (mpi_info->rank == 0) {
487 scan_ret = fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
488 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
489 typeID=Finley_ReferenceElement_getTypeId(element_type);
490 }
491 #ifdef PASO_MPI
492 if (mpi_info->size > 1) {
493 int temp1[2];
494 temp1[0] = (int) typeID;
495 temp1[1] = numEle;
496 MPI_Bcast (temp1, 2, MPI_INT, 0, mpi_info->comm);
497 typeID = (ElementTypeId) temp1[0];
498 numEle = temp1[1];
499 }
500 #endif
501 if (typeID==NoRef) {
502 sprintf(error_msg, "Finley_Mesh_read: Unidentified element type %s", element_type);
503 Finley_setError(VALUE_ERROR, error_msg);
504 }
505 }
506
507 if (Finley_noError()) {
508 /* Allocate the ElementFile */
509 refPoints= Finley_ReferenceElementSet_alloc(typeID,order, reduced_order);
510 mesh_p->Points=Finley_ElementFile_alloc(refPoints, mpi_info);
511 numNodes = mesh_p->Points->referenceElementSet->numNodes; /* New meaning for numNodes: num nodes per element */
512 }
513
514 /********************************** Read the nodal element data **************************************************/
515 if (Finley_noError()) {
516 int chunkSize = numEle / mpi_info->size + 1, totalEle=0, nextCPU=1, chunkEle=0;
517 int *tempInts = TMPMEMALLOC(chunkSize*(2+numNodes)+1, index_t); /* Store Id + Tag + node list (+ one int at end for chunkEle) */
518 /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
519 if (mpi_info->rank == 0) { /* Master */
520 for (;;) { /* Infinite loop */
521 #pragma omp parallel for private (i0) schedule(static)
522 for (i0=0; i0<chunkSize*(2+numNodes)+1; i0++) tempInts[i0] = -1;
523
524 chunkEle = 0;
525 for (i0=0; i0<chunkSize; i0++) {
526 if (totalEle >= numEle) break; /* End inner loop */
527 scan_ret = fscanf(fileHandle_p, "%d %d", &tempInts[i0*(2+numNodes)+0], &tempInts[i0*(2+numNodes)+1]);
528 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
529 for (i1 = 0; i1 < numNodes; i1++) {
530 scan_ret = fscanf(fileHandle_p, " %d", &tempInts[i0*(2+numNodes)+2+i1]);
531 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
532 }
533 scan_ret = fscanf(fileHandle_p, "\n");
534 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
535 totalEle++;
536 chunkEle++;
537 }
538 #ifdef PASO_MPI
539 /* Eventually we'll send chunk of elements to each CPU except 0 itself, here goes one of them */
540 if (nextCPU < mpi_info->size) {
541 tempInts[chunkSize*(2+numNodes)] = chunkEle;
542 MPI_Send(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, nextCPU, 81725, mpi_info->comm);
543 }
544 #endif
545 nextCPU++;
546 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
547 if (nextCPU > mpi_info->size) break; /* End infinite loop */
548 } /* Infinite loop */
549 } /* End master */
550 else { /* Worker */
551 #ifdef PASO_MPI
552 /* Each worker receives one message */
553 MPI_Status status;
554 MPI_Recv(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, 0, 81725, mpi_info->comm, &status);
555 chunkEle = tempInts[chunkSize*(2+numNodes)];
556 #endif
557 } /* Worker */
558
559 /* Copy Element data from tempInts to mesh_p */
560 Finley_ElementFile_allocTable(mesh_p->Points, chunkEle);
561
562 if (Finley_noError()) {
563 mesh_p->Points->minColor=0;
564 mesh_p->Points->maxColor=chunkEle-1;
565 #pragma omp parallel for private (i0, i1) schedule(static)
566 for (i0=0; i0<chunkEle; i0++) {
567 mesh_p->Points->Id[i0] = tempInts[i0*(2+numNodes)+0];
568 mesh_p->Points->Tag[i0] = tempInts[i0*(2+numNodes)+1];
569 mesh_p->Points->Owner[i0] =mpi_info->rank;
570 mesh_p->Points->Color[i0] = i0;
571 for (i1 = 0; i1 < numNodes; i1++) {
572 mesh_p->Points->Nodes[INDEX2(i1, i0, numNodes)] = tempInts[i0*(2+numNodes)+2+i1];
573 }
574 }
575 }
576
577 TMPMEMFREE(tempInts);
578 } /* ******************************** end of Read the nodal element data ************************************************************* */
579
580
581 /****************** get the name tags *****************************************/
582 if (Finley_noError()) {
583 char *remainder=0, *ptr;
584 size_t len=0;
585 #ifdef PASO_MPI
586 int len_i;
587 #endif
588 int tag_key;
589 if (mpi_info->rank == 0) { /* Master */
590 /* Read the word 'Tag' */
591 if (! feof(fileHandle_p)) {
592 scan_ret = fscanf(fileHandle_p, "%s\n", name);
593 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
594 }
595
596 #if defined(_WIN32) /* windows ftell lies on unix formatted text files */
597 remainder = NULL;
598 len=0;
599 while (1)
600 {
601 size_t malloc_chunk = 1024;
602 size_t buff_size = 0;
603 int ch;
604
605 ch = fgetc(fileHandle_p);
606 if( ch == '\r' )
607 {
608 continue;
609 }
610 if( len+1 > buff_size )
611 {
612 TMPMEMREALLOC(remainder,remainder,buff_size+malloc_chunk,char);
613 }
614 if( ch == EOF )
615 {
616 /* hit EOF */
617 remainder[len] = (char)0;
618 break;
619 }
620 remainder[len] = (char)ch;
621 len++;
622 }
623 #else
624 /* Read rest of file in one chunk, after using seek to find length */
625 {
626 long cur_pos, end_pos;
627
628 cur_pos = ftell(fileHandle_p);
629 fseek(fileHandle_p, 0L, SEEK_END);
630 end_pos = ftell(fileHandle_p);
631 fseek(fileHandle_p, (long)cur_pos, SEEK_SET);
632 remainder = TMPMEMALLOC(end_pos-cur_pos+1, char);
633 if (! feof(fileHandle_p))
634 {
635 scan_ret = fread(remainder, (size_t) end_pos-cur_pos,
636 sizeof(char), fileHandle_p);
637
638 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
639 remainder[end_pos-cur_pos] = 0;
640 }
641 }
642 #endif
643 len = strlen(remainder);
644 while ((len>1) && isspace(remainder[--len])) {remainder[len]=0;}
645 len = strlen(remainder);
646 TMPMEMREALLOC(remainder,remainder,len+1,char);
647 } /* Master */
648 #ifdef PASO_MPI
649
650 len_i=(int) len;
651 MPI_Bcast (&len_i, 1, MPI_INT, 0, mpi_info->comm);
652 len=(size_t) len_i;
653 if (mpi_info->rank != 0) {
654 remainder = TMPMEMALLOC(len+1, char);
655 remainder[0] = 0;
656 }
657 if (MPI_Bcast (remainder, len+1, MPI_CHAR, 0, mpi_info->comm) !=
658 MPI_SUCCESS)
659 Finley_setError(PASO_MPI_ERROR, "Finley_Mesh_read: broadcast of remainder failed");
660 #endif
661
662 if (remainder[0]) {
663 ptr = remainder;
664 do {
665 sscanf(ptr, "%s %d\n", name, &tag_key);
666 if (*name) Finley_Mesh_addTagMap(mesh_p,name,tag_key);
667 ptr++;
668 } while(NULL != (ptr = strchr(ptr, '\n')) && *ptr);
669 }
670 if (remainder)
671 TMPMEMFREE(remainder);
672 }
673
674 /* close file */
675 if (mpi_info->rank == 0) fclose(fileHandle_p);
676
677 /* resolve id's : */
678 /* rearrange elements: */
679 if (Finley_noError()) Finley_Mesh_resolveNodeIds(mesh_p);
680 if (Finley_noError()) Finley_Mesh_prepare(mesh_p, optimize);
681
682 /* that's it */
683 if (! Finley_noError()) {
684 Finley_Mesh_free(mesh_p);
685 }
686 /* free up memory */
687 Finley_ReferenceElementSet_dealloc(refPoints);
688 Finley_ReferenceElementSet_dealloc(refContactElements);
689 Finley_ReferenceElementSet_dealloc(refFaceElements);
690 Finley_ReferenceElementSet_dealloc(refElements);
691 Paso_MPIInfo_free( mpi_info );
692 return mesh_p;
693 }
694

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26