/[escript]/branches/domexper/dudley/src/Mesh_read.c
ViewVC logotype

Contents of /branches/domexper/dudley/src/Mesh_read.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3221 - (show annotations)
Wed Sep 29 01:00:21 2010 UTC (8 years, 6 months ago) by jfenwick
File MIME type: text/plain
File size: 21196 byte(s)
Comment stripping

1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2010 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 /**************************************************************/
16
17 /* Dudley: read mesh */
18
19 /**************************************************************/
20
21 #include <ctype.h>
22 #include "Mesh.h"
23
24 #define FSCANF_CHECK(scan_ret, reason) { if (scan_ret == EOF) { perror(reason); Dudley_setError(IO_ERROR,"scan error while reading dudley file"); return NULL;} }
25
26
27 Dudley_Mesh* Dudley_Mesh_read(char* fname,index_t order, index_t reduced_order, bool_t optimize)
28 {
29 Paso_MPIInfo *mpi_info = NULL;
30 dim_t numNodes, numDim, numEle, i0, i1;
31 Dudley_Mesh *mesh_p=NULL;
32 char name[LenString_MAX],element_type[LenString_MAX],frm[20];
33 char error_msg[LenErrorMsg_MAX];
34 FILE *fileHandle_p = NULL;
35 ElementTypeId typeID=NoRef;
36 int scan_ret;
37
38 Dudley_resetError();
39 mpi_info = Paso_MPIInfo_alloc( MPI_COMM_WORLD );
40
41 if (mpi_info->rank == 0) {
42 /* get file handle */
43 fileHandle_p = fopen(fname, "r");
44 if (fileHandle_p==NULL) {
45 sprintf(error_msg,"Dudley_Mesh_read: Opening file %s for reading failed.",fname);
46 Dudley_setError(IO_ERROR,error_msg);
47 Paso_MPIInfo_free( mpi_info );
48 return NULL;
49 }
50
51 /* read header */
52 sprintf(frm,"%%%d[^\n]",LenString_MAX-1);
53 scan_ret = fscanf(fileHandle_p, frm, name);
54 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
55
56 /* get the number of nodes */
57 scan_ret = fscanf(fileHandle_p, "%1d%*s %d\n", &numDim,&numNodes);
58 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
59 }
60
61 #ifdef PASO_MPI
62 /* MPI Broadcast numDim, numNodes, name if there are multiple MPI procs*/
63 if (mpi_info->size > 1)
64 {
65 int temp1[3];
66 if (mpi_info->rank == 0)
67 {
68 temp1[0] = numDim;
69 temp1[1] = numNodes;
70 temp1[2] = strlen(name) + 1;
71 } else {
72 temp1[0] = 0;
73 temp1[1] = 0;
74 temp1[2] = 1;
75 }
76 MPI_Bcast (temp1, 3, MPI_INT, 0, mpi_info->comm);
77 numDim = temp1[0];
78 numNodes = temp1[1];
79 MPI_Bcast (name, temp1[2], MPI_CHAR, 0, mpi_info->comm);
80 }
81 #endif
82
83 /* allocate mesh */
84 mesh_p = Dudley_Mesh_alloc(name,numDim,mpi_info);
85
86 if (Dudley_noError())
87 {
88 /* Each CPU will get at most chunkSize nodes so the message has to be sufficiently large */
89 int chunkSize = numNodes / mpi_info->size + 1, totalNodes=0, chunkNodes=0, nextCPU=1;
90 int *tempInts = TMPMEMALLOC(chunkSize*3+1, index_t); /* Stores the integer message data */
91 double *tempCoords = TMPMEMALLOC(chunkSize*numDim, double); /* Stores the double message data */
92
93 /*
94 Read chunkSize nodes, send it in a chunk to worker CPU which copies chunk into its local mesh_p
95 It doesn't matter that a CPU has the wrong nodes for its elements, this is sorted out later
96 First chunk sent to CPU 1, second to CPU 2, ...
97 Last chunk stays on CPU 0 (the master)
98 The three columns of integers (Id, gDOF, Tag) are gathered into a single array tempInts and sent
99 together in a single MPI message
100 */
101
102 if (mpi_info->rank == 0) /* Master */
103 {
104 for (;;) /* Infinite loop */
105 {
106 #pragma omp parallel for private (i0) schedule(static)
107 for (i0=0; i0<chunkSize*3+1; i0++) tempInts[i0] = -1;
108
109 #pragma omp parallel for private (i0) schedule(static)
110 for (i0=0; i0<chunkSize*numDim; i0++) tempCoords[i0] = -1.0;
111
112 chunkNodes = 0;
113 for (i1=0; i1<chunkSize; i1++)
114 {
115 if (totalNodes >= numNodes) break; /* End of inner loop */
116 if (1 == numDim) {
117 scan_ret = fscanf(fileHandle_p, "%d %d %d %le\n",
118 &tempInts[0+i1], &tempInts[chunkSize+i1], &tempInts[chunkSize*2+i1],
119 &tempCoords[i1*numDim+0]);
120 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
121 }
122 if (2 == numDim)
123 {
124 scan_ret = fscanf(fileHandle_p, "%d %d %d %le %le\n",
125 &tempInts[0+i1], &tempInts[chunkSize+i1], &tempInts[chunkSize*2+i1],
126 &tempCoords[i1*numDim+0], &tempCoords[i1*numDim+1]);
127 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
128 }
129 if (3 == numDim)
130 {
131 scan_ret = fscanf(fileHandle_p, "%d %d %d %le %le %le\n",
132 &tempInts[0+i1], &tempInts[chunkSize+i1], &tempInts[chunkSize*2+i1],
133 &tempCoords[i1*numDim+0], &tempCoords[i1*numDim+1], &tempCoords[i1*numDim+2]);
134 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
135 }
136 totalNodes++; /* When do we quit the infinite loop? */
137 chunkNodes++; /* How many nodes do we actually have in this chunk? It may be smaller than chunkSize. */
138 }
139 if (chunkNodes > chunkSize)
140 {
141 Dudley_setError(PASO_MPI_ERROR, "Dudley_Mesh_read: error reading chunks of mesh, data too large for message size");
142 return NULL;
143 }
144 #ifdef PASO_MPI
145 /* Eventually we'll send chunkSize nodes to each CPU numbered 1 ... mpi_info->size-1, here goes one of them */
146 if (nextCPU < mpi_info->size)
147 {
148 tempInts[chunkSize*3] = chunkNodes; /* The message has one more int to send chunkNodes */
149 MPI_Send(tempInts, chunkSize*3+1, MPI_INT, nextCPU, 81720, mpi_info->comm);
150 MPI_Send(tempCoords, chunkSize*numDim, MPI_DOUBLE, nextCPU, 81721, mpi_info->comm);
151 }
152 #endif
153 nextCPU++;
154 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
155 if (nextCPU > mpi_info->size) break; /* End infinite loop */
156 } /* Infinite loop */
157 } /* End master */
158 else /* Worker */
159 {
160 #ifdef PASO_MPI
161 /* Each worker receives two messages */
162 MPI_Status status;
163 MPI_Recv(tempInts, chunkSize*3+1, MPI_INT, 0, 81720, mpi_info->comm, &status);
164 MPI_Recv(tempCoords, chunkSize*numDim, MPI_DOUBLE, 0, 81721, mpi_info->comm, &status);
165 chunkNodes = tempInts[chunkSize*3]; /* How many nodes are in this workers chunk? */
166 #endif
167 } /* Worker */
168
169 /* Copy node data from tempMem to mesh_p */
170 Dudley_NodeFile_allocTable(mesh_p->Nodes, chunkNodes);
171 if (Dudley_noError())
172 {
173 #pragma omp parallel for private (i0, i1) schedule(static)
174 for (i0=0; i0<chunkNodes; i0++)
175 {
176 mesh_p->Nodes->Id[i0] = tempInts[0+i0];
177 mesh_p->Nodes->globalDegreesOfFreedom[i0] = tempInts[chunkSize+i0];
178 mesh_p->Nodes->Tag[i0] = tempInts[chunkSize*2+i0];
179 for (i1=0; i1<numDim; i1++)
180 {
181 mesh_p->Nodes->Coordinates[INDEX2(i1,i0,numDim)] = tempCoords[i0*numDim+i1];
182 }
183 }
184 }
185 TMPMEMFREE(tempInts);
186 TMPMEMFREE(tempCoords);
187 }
188
189 /* *********************************** read elements ******************************************************************/
190 if (Dudley_noError())
191 {
192 /* Read the element typeID */
193 if (mpi_info->rank == 0)
194 {
195 scan_ret = fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
196 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
197 typeID=eltTypeFromString(element_type);
198 }
199 #ifdef PASO_MPI
200 if (mpi_info->size > 1)
201 {
202 int temp1[2], mpi_error;
203 temp1[0] = (int) typeID;
204 temp1[1] = numEle;
205 mpi_error = MPI_Bcast (temp1, 2, MPI_INT, 0, mpi_info->comm);
206 if (mpi_error != MPI_SUCCESS)
207 {
208 Dudley_setError(PASO_MPI_ERROR, "Dudley_Mesh_read: broadcast of Element typeID failed");
209 return NULL;
210 }
211 typeID = (ElementTypeId) temp1[0];
212 numEle = temp1[1];
213 }
214 #endif
215 if (typeID==NoRef) {
216 sprintf(error_msg, "Dudley_Mesh_read: Unidentified element type %s", element_type);
217 Dudley_setError(VALUE_ERROR, error_msg);
218 }
219 }
220
221 /* Allocate the ElementFile */
222 if (Dudley_noError()) {
223 mesh_p->Elements=Dudley_ElementFile_alloc(typeID, mpi_info);
224 numNodes = mesh_p->Elements->numNodes; /* New meaning for numNodes: num nodes per element */
225 }
226
227 /* *************************** Read the element data *******************************************************************/
228 if (Dudley_noError())
229 {
230 int chunkSize = numEle / mpi_info->size + 1, totalEle=0, nextCPU=1, chunkEle=0;
231 int *tempInts = TMPMEMALLOC(chunkSize*(2+numNodes)+1, index_t); /* Store Id + Tag + node list (+ one int at end for chunkEle) */
232 /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
233 if (mpi_info->rank == 0) /* Master */
234 {
235 for (;;) /* Infinite loop */
236 {
237 #pragma omp parallel for private (i0) schedule(static)
238 for (i0=0; i0<chunkSize*(2+numNodes)+1; i0++) tempInts[i0] = -1;
239 chunkEle = 0;
240 for (i0=0; i0<chunkSize; i0++)
241 {
242 if (totalEle >= numEle) break; /* End inner loop */
243 scan_ret = fscanf(fileHandle_p, "%d %d", &tempInts[i0*(2+numNodes)+0], &tempInts[i0*(2+numNodes)+1]);
244 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
245 for (i1 = 0; i1 < numNodes; i1++)
246 {
247 scan_ret = fscanf(fileHandle_p, " %d", &tempInts[i0*(2+numNodes)+2+i1]);
248 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
249 }
250 scan_ret = fscanf(fileHandle_p, "\n");
251 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
252 totalEle++;
253 chunkEle++;
254 }
255 #ifdef PASO_MPI
256 /* Eventually we'll send chunk of elements to each CPU except 0 itself, here goes one of them */
257 if (nextCPU < mpi_info->size) {
258 tempInts[chunkSize*(2+numNodes)] = chunkEle;
259 MPI_Send(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, nextCPU, 81722, mpi_info->comm);
260 }
261 #endif
262 nextCPU++;
263 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
264 if (nextCPU > mpi_info->size) break; /* End infinite loop */
265 } /* Infinite loop */
266 } /* End master */
267 else
268 { /* Worker */
269 #ifdef PASO_MPI
270 /* Each worker receives one message */
271 MPI_Status status;
272 MPI_Recv(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, 0, 81722, mpi_info->comm, &status);
273 chunkEle = tempInts[chunkSize*(2+numNodes)];
274 #endif
275 } /* Worker */
276 Dudley_ElementFile_allocTable(mesh_p->Elements, chunkEle);
277
278 /* Copy Element data from tempInts to mesh_p */
279 if (Dudley_noError())
280 {
281 mesh_p->Elements->minColor=0;
282 mesh_p->Elements->maxColor=chunkEle-1;
283 #pragma omp parallel for private (i0, i1) schedule(static)
284 for (i0=0; i0<chunkEle; i0++)
285 {
286 mesh_p->Elements->Id[i0] = tempInts[i0*(2+numNodes)+0];
287 mesh_p->Elements->Tag[i0] = tempInts[i0*(2+numNodes)+1];
288 mesh_p->Elements->Owner[i0] =mpi_info->rank;
289 mesh_p->Elements->Color[i0] = i0;
290 for (i1 = 0; i1 < numNodes; i1++)
291 {
292 mesh_p->Elements->Nodes[INDEX2(i1, i0, numNodes)] = tempInts[i0*(2+numNodes)+2+i1];
293 }
294 }
295 }
296 TMPMEMFREE(tempInts);
297 }
298 /* ******************** end of Read the element data ******************************************************/
299
300 /* ********************* read face elements ***************************************************************/
301 if (Dudley_noError())
302 {
303 /* Read the element typeID */
304 if (mpi_info->rank == 0)
305 {
306 scan_ret = fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
307 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
308 typeID=eltTypeFromString(element_type);
309 }
310 #ifdef PASO_MPI
311 if (mpi_info->size > 1)
312 {
313 int temp1[2];
314 temp1[0] = (int) typeID;
315 temp1[1] = numEle;
316 MPI_Bcast (temp1, 2, MPI_INT, 0, mpi_info->comm);
317 typeID = (ElementTypeId) temp1[0];
318 numEle = temp1[1];
319 }
320 #endif
321 if (typeID==NoRef)
322 {
323 sprintf(error_msg, "Dudley_Mesh_read: Unidentified element type %s", element_type);
324 Dudley_setError(VALUE_ERROR, error_msg);
325 }
326 if (Dudley_noError())
327 {
328 /* Allocate the ElementFile */
329 mesh_p->FaceElements=Dudley_ElementFile_alloc(typeID, mpi_info);
330 numNodes = mesh_p->FaceElements->numNodes; /* New meaning for numNodes: num nodes per element */
331 }
332 }
333 /* ********************** Read the face element data ********************************************************* */
334
335 if (Dudley_noError())
336 {
337 int chunkSize = numEle / mpi_info->size + 1, totalEle=0, nextCPU=1, chunkEle=0;
338 int *tempInts = TMPMEMALLOC(chunkSize*(2+numNodes)+1, index_t); /* Store Id + Tag + node list (+ one int at end for chunkEle) */
339 /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
340 if (mpi_info->rank == 0) /* Master */
341 {
342 for (;;) /* Infinite loop */
343 {
344 #pragma omp parallel for private (i0) schedule(static)
345 for (i0=0; i0<chunkSize*(2+numNodes)+1; i0++) tempInts[i0] = -1;
346 chunkEle = 0;
347 for (i0=0; i0<chunkSize; i0++)
348 {
349 if (totalEle >= numEle) break; /* End inner loop */
350 scan_ret = fscanf(fileHandle_p, "%d %d", &tempInts[i0*(2+numNodes)+0], &tempInts[i0*(2+numNodes)+1]);
351 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
352 for (i1 = 0; i1 < numNodes; i1++)
353 {
354 scan_ret = fscanf(fileHandle_p, " %d", &tempInts[i0*(2+numNodes)+2+i1]);
355 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
356 }
357 scan_ret = fscanf(fileHandle_p, "\n");
358 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
359 totalEle++;
360 chunkEle++;
361 }
362 #ifdef PASO_MPI
363 /* Eventually we'll send chunk of elements to each CPU except 0 itself, here goes one of them */
364 if (nextCPU < mpi_info->size)
365 {
366 tempInts[chunkSize*(2+numNodes)] = chunkEle;
367 MPI_Send(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, nextCPU, 81723, mpi_info->comm);
368 }
369 #endif
370 nextCPU++;
371 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
372 if (nextCPU > mpi_info->size) break; /* End infinite loop */
373 } /* Infinite loop */
374 } /* End master */
375 else /* Worker */
376 {
377 #ifdef PASO_MPI
378 /* Each worker receives one message */
379 MPI_Status status;
380 MPI_Recv(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, 0, 81723, mpi_info->comm, &status);
381 chunkEle = tempInts[chunkSize*(2+numNodes)];
382 #endif
383 } /* Worker */
384 Dudley_ElementFile_allocTable(mesh_p->FaceElements, chunkEle);
385 if (Dudley_noError())
386 {
387 /* Copy Element data from tempInts to mesh_p */
388 mesh_p->FaceElements->minColor=0;
389 mesh_p->FaceElements->maxColor=chunkEle-1;
390 #pragma omp parallel for private (i0, i1)
391 for (i0=0; i0<chunkEle; i0++)
392 {
393 mesh_p->FaceElements->Id[i0] = tempInts[i0*(2+numNodes)+0];
394 mesh_p->FaceElements->Tag[i0] = tempInts[i0*(2+numNodes)+1];
395 mesh_p->FaceElements->Owner[i0] =mpi_info->rank;
396 mesh_p->FaceElements->Color[i0] = i0;
397 for (i1 = 0; i1 < numNodes; i1++)
398 {
399 mesh_p->FaceElements->Nodes[INDEX2(i1, i0, numNodes)] = tempInts[i0*(2+numNodes)+2+i1];
400 }
401 }
402 }
403
404 TMPMEMFREE(tempInts);
405 }
406 /* ************************************* end of Read the face element data *************************************** */
407
408 /* ********************************* read nodal elements ****************************************************** */
409 /* ******************************* Read the element typeID */
410 if (Dudley_noError())
411 {
412 if (mpi_info->rank == 0)
413 {
414 scan_ret = fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
415 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
416 typeID=eltTypeFromString(element_type);
417 }
418 #ifdef PASO_MPI
419 if (mpi_info->size > 1)
420 {
421 int temp1[2];
422 temp1[0] = (int) typeID;
423 temp1[1] = numEle;
424 MPI_Bcast (temp1, 2, MPI_INT, 0, mpi_info->comm);
425 typeID = (ElementTypeId) temp1[0];
426 numEle = temp1[1];
427 }
428 #endif
429 if (typeID==NoRef)
430 {
431 sprintf(error_msg, "Dudley_Mesh_read: Unidentified element type %s", element_type);
432 Dudley_setError(VALUE_ERROR, error_msg);
433 }
434 }
435 if (Dudley_noError())
436 {
437 /* Allocate the ElementFile */
438 mesh_p->Points=Dudley_ElementFile_alloc(typeID, mpi_info);
439 numNodes = mesh_p->Points->numNodes; /* New meaning for numNodes: num nodes per element */
440 }
441 /********************************** Read the nodal element data **************************************************/
442 if (Dudley_noError())
443 {
444 int chunkSize = numEle / mpi_info->size + 1, totalEle=0, nextCPU=1, chunkEle=0;
445 int *tempInts = TMPMEMALLOC(chunkSize*(2+numNodes)+1, index_t); /* Store Id + Tag + node list (+ one int at end for chunkEle) */
446 /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
447 if (mpi_info->rank == 0) /* Master */
448 {
449 for (;;) /* Infinite loop */
450 {
451 #pragma omp parallel for private (i0) schedule(static)
452 for (i0=0; i0<chunkSize*(2+numNodes)+1; i0++) tempInts[i0] = -1;
453 chunkEle = 0;
454 for (i0=0; i0<chunkSize; i0++)
455 {
456 if (totalEle >= numEle) break; /* End inner loop */
457 scan_ret = fscanf(fileHandle_p, "%d %d", &tempInts[i0*(2+numNodes)+0], &tempInts[i0*(2+numNodes)+1]);
458 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
459 for (i1 = 0; i1 < numNodes; i1++)
460 {
461 scan_ret = fscanf(fileHandle_p, " %d", &tempInts[i0*(2+numNodes)+2+i1]);
462 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
463 }
464 scan_ret = fscanf(fileHandle_p, "\n");
465 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
466 totalEle++;
467 chunkEle++;
468 }
469 #ifdef PASO_MPI
470 /* Eventually we'll send chunk of elements to each CPU except 0 itself, here goes one of them */
471 if (nextCPU < mpi_info->size)
472 {
473 tempInts[chunkSize*(2+numNodes)] = chunkEle;
474 MPI_Send(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, nextCPU, 81725, mpi_info->comm);
475 }
476 #endif
477 nextCPU++;
478 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
479 if (nextCPU > mpi_info->size) break; /* End infinite loop */
480 } /* Infinite loop */
481 } /* End master */
482 else /* Worker */
483 {
484 #ifdef PASO_MPI
485 /* Each worker receives one message */
486 MPI_Status status;
487 MPI_Recv(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, 0, 81725, mpi_info->comm, &status);
488 chunkEle = tempInts[chunkSize*(2+numNodes)];
489 #endif
490 } /* Worker */
491
492 /* Copy Element data from tempInts to mesh_p */
493 Dudley_ElementFile_allocTable(mesh_p->Points, chunkEle);
494 if (Dudley_noError())
495 {
496 mesh_p->Points->minColor=0;
497 mesh_p->Points->maxColor=chunkEle-1;
498 #pragma omp parallel for private (i0, i1) schedule(static)
499 for (i0=0; i0<chunkEle; i0++)
500 {
501 mesh_p->Points->Id[i0] = tempInts[i0*(2+numNodes)+0];
502 mesh_p->Points->Tag[i0] = tempInts[i0*(2+numNodes)+1];
503 mesh_p->Points->Owner[i0] =mpi_info->rank;
504 mesh_p->Points->Color[i0] = i0;
505 for (i1 = 0; i1 < numNodes; i1++)
506 {
507 mesh_p->Points->Nodes[INDEX2(i1, i0, numNodes)] = tempInts[i0*(2+numNodes)+2+i1];
508 }
509 }
510 }
511
512 TMPMEMFREE(tempInts);
513 } /* ******************************** end of Read the nodal element data ************************************ */
514 /****************** get the name tags *****************************************/
515 if (Dudley_noError())
516 {
517 char *remainder=0, *ptr;
518 size_t len=0;
519 #ifdef PASO_MPI
520 int len_i;
521 #endif
522 int tag_key;
523 if (mpi_info->rank == 0) /* Master */
524 {
525 /* Read the word 'Tag' */
526 if (! feof(fileHandle_p))
527 {
528 scan_ret = fscanf(fileHandle_p, "%s\n", name);
529 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
530 }
531 #if defined(_WIN32) /* windows ftell lies on unix formatted text files */
532 remainder = NULL;
533 len=0;
534 while (1)
535 {
536 size_t malloc_chunk = 1024;
537 size_t buff_size = 0;
538 int ch;
539 ch = fgetc(fileHandle_p);
540 if( ch == '\r' )
541 {
542 continue;
543 }
544 if( len+1 > buff_size )
545 {
546 TMPMEMREALLOC(remainder,remainder,buff_size+malloc_chunk,char);
547 }
548 if( ch == EOF )
549 {
550 /* hit EOF */
551 remainder[len] = (char)0;
552 break;
553 }
554 remainder[len] = (char)ch;
555 len++;
556 }
557 #else
558 /* Read rest of file in one chunk, after using seek to find length */
559 {
560 long cur_pos, end_pos;
561 cur_pos = ftell(fileHandle_p);
562 fseek(fileHandle_p, 0L, SEEK_END);
563 end_pos = ftell(fileHandle_p);
564 fseek(fileHandle_p, (long)cur_pos, SEEK_SET);
565 remainder = TMPMEMALLOC(end_pos-cur_pos+1, char);
566 if (! feof(fileHandle_p))
567 {
568 scan_ret = fread(remainder, (size_t) end_pos-cur_pos,
569 sizeof(char), fileHandle_p);
570 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
571 remainder[end_pos-cur_pos] = 0;
572 }
573 }
574 #endif
575 len = strlen(remainder);
576 while ((len>1) && isspace(remainder[--len])) {remainder[len]=0;}
577 len = strlen(remainder);
578 TMPMEMREALLOC(remainder,remainder,len+1,char);
579 } /* Master */
580 #ifdef PASO_MPI
581
582 len_i=(int) len;
583 MPI_Bcast (&len_i, 1, MPI_INT, 0, mpi_info->comm);
584 len=(size_t) len_i;
585 if (mpi_info->rank != 0)
586 {
587 remainder = TMPMEMALLOC(len+1, char);
588 remainder[0] = 0;
589 }
590 if (MPI_Bcast (remainder, len+1, MPI_CHAR, 0, mpi_info->comm)!=MPI_SUCCESS)
591 Dudley_setError(PASO_MPI_ERROR, "Dudley_Mesh_read: broadcast of remainder failed");
592 #endif
593
594 if (remainder[0])
595 {
596 ptr = remainder;
597 do
598 {
599 sscanf(ptr, "%s %d\n", name, &tag_key);
600 if (*name) Dudley_Mesh_addTagMap(mesh_p,name,tag_key);
601 ptr++;
602 } while(NULL != (ptr = strchr(ptr, '\n')) && *ptr);
603 }
604 if (remainder)
605 TMPMEMFREE(remainder);
606 }
607
608 /* close file */
609 if (mpi_info->rank == 0) fclose(fileHandle_p);
610
611 /* resolve id's : */
612 /* rearrange elements: */
613 if (Dudley_noError()) Dudley_Mesh_resolveNodeIds(mesh_p);
614 if (Dudley_noError()) Dudley_Mesh_prepare(mesh_p, optimize);
615
616 /* that's it */
617 if (! Dudley_noError())
618 {
619 Dudley_Mesh_free(mesh_p);
620 }
621 /* free up memory */
622 Paso_MPIInfo_free( mpi_info );
623 return mesh_p;
624 }
625

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26