/[escript]/trunk/dudley/src/Mesh_read.c
ViewVC logotype

Contents of /trunk/dudley/src/Mesh_read.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3259 - (show annotations)
Mon Oct 11 01:48:14 2010 UTC (9 years, 3 months ago) by jfenwick
File MIME type: text/plain
File size: 21852 byte(s)
Merging dudley and scons updates from branches

1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2010 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14 /**************************************************************/
15
16 /* Dudley: read mesh */
17
18 /**************************************************************/
19
20 #include <ctype.h>
21 #include "Mesh.h"
22
23 #define FSCANF_CHECK(scan_ret, reason) { if (scan_ret == EOF) { perror(reason); Dudley_setError(IO_ERROR,"scan error while reading dudley file"); return NULL;} }
24
25 Dudley_Mesh *Dudley_Mesh_read(char *fname, index_t order, index_t reduced_order, bool_t optimize)
26 {
27 Esys_MPIInfo *mpi_info = NULL;
28 dim_t numNodes, numDim, numEle, i0, i1;
29 Dudley_Mesh *mesh_p = NULL;
30 char name[LenString_MAX], element_type[LenString_MAX], frm[20];
31 char error_msg[LenErrorMsg_MAX];
32 FILE *fileHandle_p = NULL;
33 Dudley_ElementTypeId typeID = Dudley_NoRef;
34 int scan_ret;
35
36 Dudley_resetError();
37 mpi_info = Esys_MPIInfo_alloc(MPI_COMM_WORLD);
38
39 if (mpi_info->rank == 0)
40 {
41 /* get file handle */
42 fileHandle_p = fopen(fname, "r");
43 if (fileHandle_p == NULL)
44 {
45 sprintf(error_msg, "Dudley_Mesh_read: Opening file %s for reading failed.", fname);
46 Dudley_setError(IO_ERROR, error_msg);
47 Esys_MPIInfo_free(mpi_info);
48 return NULL;
49 }
50
51 /* read header */
52 sprintf(frm, "%%%d[^\n]", LenString_MAX - 1);
53 scan_ret = fscanf(fileHandle_p, frm, name);
54 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
55 /* get the number of nodes */
56 scan_ret = fscanf(fileHandle_p, "%1d%*s %d\n", &numDim, &numNodes);
57 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")}
58 #ifdef ESYS_MPI
59 /* MPI Broadcast numDim, numNodes, name if there are multiple MPI procs */
60 if (mpi_info->size > 1)
61 {
62 int temp1[3];
63 if (mpi_info->rank == 0)
64 {
65 temp1[0] = numDim;
66 temp1[1] = numNodes;
67 temp1[2] = strlen(name) + 1;
68 }
69 else
70 {
71 temp1[0] = 0;
72 temp1[1] = 0;
73 temp1[2] = 1;
74 }
75 MPI_Bcast(temp1, 3, MPI_INT, 0, mpi_info->comm);
76 numDim = temp1[0];
77 numNodes = temp1[1];
78 MPI_Bcast(name, temp1[2], MPI_CHAR, 0, mpi_info->comm);
79 }
80 #endif
81
82 /* allocate mesh */
83 mesh_p = Dudley_Mesh_alloc(name, numDim, mpi_info);
84
85 if (Dudley_noError())
86 {
87 /* Each CPU will get at most chunkSize nodes so the message has to be sufficiently large */
88 int chunkSize = numNodes / mpi_info->size + 1, totalNodes = 0, chunkNodes = 0, nextCPU = 1;
89 int *tempInts = TMPMEMALLOC(chunkSize * 3 + 1, index_t); /* Stores the integer message data */
90 double *tempCoords = TMPMEMALLOC(chunkSize * numDim, double); /* Stores the double message data */
91
92 /*
93 Read chunkSize nodes, send it in a chunk to worker CPU which copies chunk into its local mesh_p
94 It doesn't matter that a CPU has the wrong nodes for its elements, this is sorted out later
95 First chunk sent to CPU 1, second to CPU 2, ...
96 Last chunk stays on CPU 0 (the master)
97 The three columns of integers (Id, gDOF, Tag) are gathered into a single array tempInts and sent
98 together in a single MPI message
99 */
100
101 if (mpi_info->rank == 0) /* Master */
102 {
103 for (;;) /* Infinite loop */
104 {
105 #pragma omp parallel for private (i0) schedule(static)
106 for (i0 = 0; i0 < chunkSize * 3 + 1; i0++)
107 tempInts[i0] = -1;
108
109 #pragma omp parallel for private (i0) schedule(static)
110 for (i0 = 0; i0 < chunkSize * numDim; i0++)
111 tempCoords[i0] = -1.0;
112
113 chunkNodes = 0;
114 for (i1 = 0; i1 < chunkSize; i1++)
115 {
116 if (totalNodes >= numNodes)
117 break; /* End of inner loop */
118 if (1 == numDim)
119 {
120 scan_ret = fscanf(fileHandle_p, "%d %d %d %le\n",
121 &tempInts[0 + i1], &tempInts[chunkSize + i1], &tempInts[chunkSize * 2 + i1],
122 &tempCoords[i1 * numDim + 0]);
123 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")}
124 if (2 == numDim)
125 {
126 scan_ret = fscanf(fileHandle_p, "%d %d %d %le %le\n",
127 &tempInts[0 + i1], &tempInts[chunkSize + i1], &tempInts[chunkSize * 2 + i1],
128 &tempCoords[i1 * numDim + 0], &tempCoords[i1 * numDim + 1]);
129 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")}
130 if (3 == numDim)
131 {
132 scan_ret = fscanf(fileHandle_p, "%d %d %d %le %le %le\n",
133 &tempInts[0 + i1], &tempInts[chunkSize + i1], &tempInts[chunkSize * 2 + i1],
134 &tempCoords[i1 * numDim + 0], &tempCoords[i1 * numDim + 1],
135 &tempCoords[i1 * numDim + 2]);
136 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")}
137 totalNodes++; /* When do we quit the infinite loop? */
138 chunkNodes++; /* How many nodes do we actually have in this chunk? It may be smaller than chunkSize. */
139 }
140 if (chunkNodes > chunkSize)
141 {
142 Dudley_setError(ESYS_MPI_ERROR,
143 "Dudley_Mesh_read: error reading chunks of mesh, data too large for message size");
144 return NULL;
145 }
146 #ifdef ESYS_MPI
147 /* Eventually we'll send chunkSize nodes to each CPU numbered 1 ... mpi_info->size-1, here goes one of them */
148 if (nextCPU < mpi_info->size)
149 {
150 tempInts[chunkSize * 3] = chunkNodes; /* The message has one more int to send chunkNodes */
151 MPI_Send(tempInts, chunkSize * 3 + 1, MPI_INT, nextCPU, 81720, mpi_info->comm);
152 MPI_Send(tempCoords, chunkSize * numDim, MPI_DOUBLE, nextCPU, 81721, mpi_info->comm);
153 }
154 #endif
155 nextCPU++;
156 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
157 if (nextCPU > mpi_info->size)
158 break; /* End infinite loop */
159 } /* Infinite loop */
160 } /* End master */
161 else /* Worker */
162 {
163 #ifdef ESYS_MPI
164 /* Each worker receives two messages */
165 MPI_Status status;
166 MPI_Recv(tempInts, chunkSize * 3 + 1, MPI_INT, 0, 81720, mpi_info->comm, &status);
167 MPI_Recv(tempCoords, chunkSize * numDim, MPI_DOUBLE, 0, 81721, mpi_info->comm, &status);
168 chunkNodes = tempInts[chunkSize * 3]; /* How many nodes are in this workers chunk? */
169 #endif
170 } /* Worker */
171
172 /* Copy node data from tempMem to mesh_p */
173 Dudley_NodeFile_allocTable(mesh_p->Nodes, chunkNodes);
174 if (Dudley_noError())
175 {
176 #pragma omp parallel for private (i0, i1) schedule(static)
177 for (i0 = 0; i0 < chunkNodes; i0++)
178 {
179 mesh_p->Nodes->Id[i0] = tempInts[0 + i0];
180 mesh_p->Nodes->globalDegreesOfFreedom[i0] = tempInts[chunkSize + i0];
181 mesh_p->Nodes->Tag[i0] = tempInts[chunkSize * 2 + i0];
182 for (i1 = 0; i1 < numDim; i1++)
183 {
184 mesh_p->Nodes->Coordinates[INDEX2(i1, i0, numDim)] = tempCoords[i0 * numDim + i1];
185 }
186 }
187 }
188 TMPMEMFREE(tempInts);
189 TMPMEMFREE(tempCoords);
190 }
191
192 /* *********************************** read elements ***************************************************************** */
193 if (Dudley_noError())
194 {
195 /* Read the element typeID */
196 if (mpi_info->rank == 0)
197 {
198 scan_ret = fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
199 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read") typeID = eltTypeFromString(element_type);
200 }
201 #ifdef ESYS_MPI
202 if (mpi_info->size > 1)
203 {
204 int temp1[2], mpi_error;
205 temp1[0] = (int)typeID;
206 temp1[1] = numEle;
207 mpi_error = MPI_Bcast(temp1, 2, MPI_INT, 0, mpi_info->comm);
208 if (mpi_error != MPI_SUCCESS)
209 {
210 Dudley_setError(ESYS_MPI_ERROR, "Dudley_Mesh_read: broadcast of Element typeID failed");
211 return NULL;
212 }
213 typeID = (Dudley_ElementTypeId) temp1[0];
214 numEle = temp1[1];
215 }
216 #endif
217 if (typeID == Dudley_NoRef)
218 {
219 sprintf(error_msg, "Dudley_Mesh_read: Unidentified element type %s", element_type);
220 Dudley_setError(VALUE_ERROR, error_msg);
221 }
222 }
223
224 /* Allocate the ElementFile */
225 if (Dudley_noError())
226 {
227 mesh_p->Elements = Dudley_ElementFile_alloc(typeID, mpi_info);
228 numNodes = mesh_p->Elements->numNodes; /* New meaning for numNodes: num nodes per element */
229 }
230
231 /* *************************** Read the element data ****************************************************************** */
232 if (Dudley_noError())
233 {
234 int chunkSize = numEle / mpi_info->size + 1, totalEle = 0, nextCPU = 1, chunkEle = 0;
235 int *tempInts = TMPMEMALLOC(chunkSize * (2 + numNodes) + 1, index_t); /* Store Id + Tag + node list (+ one int at end for chunkEle) */
236 /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
237 if (mpi_info->rank == 0) /* Master */
238 {
239 for (;;) /* Infinite loop */
240 {
241 #pragma omp parallel for private (i0) schedule(static)
242 for (i0 = 0; i0 < chunkSize * (2 + numNodes) + 1; i0++)
243 tempInts[i0] = -1;
244 chunkEle = 0;
245 for (i0 = 0; i0 < chunkSize; i0++)
246 {
247 if (totalEle >= numEle)
248 break; /* End inner loop */
249 scan_ret =
250 fscanf(fileHandle_p, "%d %d", &tempInts[i0 * (2 + numNodes) + 0],
251 &tempInts[i0 * (2 + numNodes) + 1]);
252 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read") for (i1 = 0; i1 < numNodes; i1++)
253 {
254 scan_ret = fscanf(fileHandle_p, " %d", &tempInts[i0 * (2 + numNodes) + 2 + i1]);
255 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")}
256 scan_ret = fscanf(fileHandle_p, "\n");
257 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read") totalEle++;
258 chunkEle++;
259 }
260 #ifdef ESYS_MPI
261 /* Eventually we'll send chunk of elements to each CPU except 0 itself, here goes one of them */
262 if (nextCPU < mpi_info->size)
263 {
264 tempInts[chunkSize * (2 + numNodes)] = chunkEle;
265 MPI_Send(tempInts, chunkSize * (2 + numNodes) + 1, MPI_INT, nextCPU, 81722, mpi_info->comm);
266 }
267 #endif
268 nextCPU++;
269 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
270 if (nextCPU > mpi_info->size)
271 break; /* End infinite loop */
272 } /* Infinite loop */
273 } /* End master */
274 else
275 { /* Worker */
276 #ifdef ESYS_MPI
277 /* Each worker receives one message */
278 MPI_Status status;
279 MPI_Recv(tempInts, chunkSize * (2 + numNodes) + 1, MPI_INT, 0, 81722, mpi_info->comm, &status);
280 chunkEle = tempInts[chunkSize * (2 + numNodes)];
281 #endif
282 } /* Worker */
283 Dudley_ElementFile_allocTable(mesh_p->Elements, chunkEle);
284
285 /* Copy Element data from tempInts to mesh_p */
286 if (Dudley_noError())
287 {
288 mesh_p->Elements->minColor = 0;
289 mesh_p->Elements->maxColor = chunkEle - 1;
290 #pragma omp parallel for private (i0, i1) schedule(static)
291 for (i0 = 0; i0 < chunkEle; i0++)
292 {
293 mesh_p->Elements->Id[i0] = tempInts[i0 * (2 + numNodes) + 0];
294 mesh_p->Elements->Tag[i0] = tempInts[i0 * (2 + numNodes) + 1];
295 mesh_p->Elements->Owner[i0] = mpi_info->rank;
296 mesh_p->Elements->Color[i0] = i0;
297 for (i1 = 0; i1 < numNodes; i1++)
298 {
299 mesh_p->Elements->Nodes[INDEX2(i1, i0, numNodes)] = tempInts[i0 * (2 + numNodes) + 2 + i1];
300 }
301 }
302 }
303 TMPMEMFREE(tempInts);
304 }
305 /* ******************** end of Read the element data ***************************************************** */
306
307 /* ********************* read face elements ************************************************************** */
308 if (Dudley_noError())
309 {
310 /* Read the element typeID */
311 if (mpi_info->rank == 0)
312 {
313 scan_ret = fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
314 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read") typeID = eltTypeFromString(element_type);
315 }
316 #ifdef ESYS_MPI
317 if (mpi_info->size > 1)
318 {
319 int temp1[2];
320 temp1[0] = (int)typeID;
321 temp1[1] = numEle;
322 MPI_Bcast(temp1, 2, MPI_INT, 0, mpi_info->comm);
323 typeID = (Dudley_ElementTypeId) temp1[0];
324 numEle = temp1[1];
325 }
326 #endif
327 if (typeID == Dudley_NoRef)
328 {
329 sprintf(error_msg, "Dudley_Mesh_read: Unidentified element type %s", element_type);
330 Dudley_setError(VALUE_ERROR, error_msg);
331 }
332 if (Dudley_noError())
333 {
334 /* Allocate the ElementFile */
335 mesh_p->FaceElements = Dudley_ElementFile_alloc(typeID, mpi_info);
336 numNodes = mesh_p->FaceElements->numNodes; /* New meaning for numNodes: num nodes per element */
337 }
338 }
339 /* ********************** Read the face element data ********************************************************* */
340
341 if (Dudley_noError())
342 {
343 int chunkSize = numEle / mpi_info->size + 1, totalEle = 0, nextCPU = 1, chunkEle = 0;
344 int *tempInts = TMPMEMALLOC(chunkSize * (2 + numNodes) + 1, index_t); /* Store Id + Tag + node list (+ one int at end for chunkEle) */
345 /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
346 if (mpi_info->rank == 0) /* Master */
347 {
348 for (;;) /* Infinite loop */
349 {
350 #pragma omp parallel for private (i0) schedule(static)
351 for (i0 = 0; i0 < chunkSize * (2 + numNodes) + 1; i0++)
352 tempInts[i0] = -1;
353 chunkEle = 0;
354 for (i0 = 0; i0 < chunkSize; i0++)
355 {
356 if (totalEle >= numEle)
357 break; /* End inner loop */
358 scan_ret =
359 fscanf(fileHandle_p, "%d %d", &tempInts[i0 * (2 + numNodes) + 0],
360 &tempInts[i0 * (2 + numNodes) + 1]);
361 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read") for (i1 = 0; i1 < numNodes; i1++)
362 {
363 scan_ret = fscanf(fileHandle_p, " %d", &tempInts[i0 * (2 + numNodes) + 2 + i1]);
364 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")}
365 scan_ret = fscanf(fileHandle_p, "\n");
366 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read") totalEle++;
367 chunkEle++;
368 }
369 #ifdef ESYS_MPI
370 /* Eventually we'll send chunk of elements to each CPU except 0 itself, here goes one of them */
371 if (nextCPU < mpi_info->size)
372 {
373 tempInts[chunkSize * (2 + numNodes)] = chunkEle;
374 MPI_Send(tempInts, chunkSize * (2 + numNodes) + 1, MPI_INT, nextCPU, 81723, mpi_info->comm);
375 }
376 #endif
377 nextCPU++;
378 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
379 if (nextCPU > mpi_info->size)
380 break; /* End infinite loop */
381 } /* Infinite loop */
382 } /* End master */
383 else /* Worker */
384 {
385 #ifdef ESYS_MPI
386 /* Each worker receives one message */
387 MPI_Status status;
388 MPI_Recv(tempInts, chunkSize * (2 + numNodes) + 1, MPI_INT, 0, 81723, mpi_info->comm, &status);
389 chunkEle = tempInts[chunkSize * (2 + numNodes)];
390 #endif
391 } /* Worker */
392 Dudley_ElementFile_allocTable(mesh_p->FaceElements, chunkEle);
393 if (Dudley_noError())
394 {
395 /* Copy Element data from tempInts to mesh_p */
396 mesh_p->FaceElements->minColor = 0;
397 mesh_p->FaceElements->maxColor = chunkEle - 1;
398 #pragma omp parallel for private (i0, i1)
399 for (i0 = 0; i0 < chunkEle; i0++)
400 {
401 mesh_p->FaceElements->Id[i0] = tempInts[i0 * (2 + numNodes) + 0];
402 mesh_p->FaceElements->Tag[i0] = tempInts[i0 * (2 + numNodes) + 1];
403 mesh_p->FaceElements->Owner[i0] = mpi_info->rank;
404 mesh_p->FaceElements->Color[i0] = i0;
405 for (i1 = 0; i1 < numNodes; i1++)
406 {
407 mesh_p->FaceElements->Nodes[INDEX2(i1, i0, numNodes)] = tempInts[i0 * (2 + numNodes) + 2 + i1];
408 }
409 }
410 }
411
412 TMPMEMFREE(tempInts);
413 }
414 /* ************************************* end of Read the face element data *************************************** */
415
416 /* ********************************* read nodal elements ****************************************************** */
417 /* ******************************* Read the element typeID */
418 if (Dudley_noError())
419 {
420 if (mpi_info->rank == 0)
421 {
422 scan_ret = fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
423 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read") typeID = eltTypeFromString(element_type);
424 }
425 #ifdef ESYS_MPI
426 if (mpi_info->size > 1)
427 {
428 int temp1[2];
429 temp1[0] = (int)typeID;
430 temp1[1] = numEle;
431 MPI_Bcast(temp1, 2, MPI_INT, 0, mpi_info->comm);
432 typeID = (Dudley_ElementTypeId) temp1[0];
433 numEle = temp1[1];
434 }
435 #endif
436 if (typeID == Dudley_NoRef)
437 {
438 sprintf(error_msg, "Dudley_Mesh_read: Unidentified element type %s", element_type);
439 Dudley_setError(VALUE_ERROR, error_msg);
440 }
441 }
442 if (Dudley_noError())
443 {
444 /* Allocate the ElementFile */
445 mesh_p->Points = Dudley_ElementFile_alloc(typeID, mpi_info);
446 numNodes = mesh_p->Points->numNodes; /* New meaning for numNodes: num nodes per element */
447 }
448 /********************************** Read the nodal element data **************************************************/
449 if (Dudley_noError())
450 {
451 int chunkSize = numEle / mpi_info->size + 1, totalEle = 0, nextCPU = 1, chunkEle = 0;
452 int *tempInts = TMPMEMALLOC(chunkSize * (2 + numNodes) + 1, index_t); /* Store Id + Tag + node list (+ one int at end for chunkEle) */
453 /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
454 if (mpi_info->rank == 0) /* Master */
455 {
456 for (;;) /* Infinite loop */
457 {
458 #pragma omp parallel for private (i0) schedule(static)
459 for (i0 = 0; i0 < chunkSize * (2 + numNodes) + 1; i0++)
460 tempInts[i0] = -1;
461 chunkEle = 0;
462 for (i0 = 0; i0 < chunkSize; i0++)
463 {
464 if (totalEle >= numEle)
465 break; /* End inner loop */
466 scan_ret =
467 fscanf(fileHandle_p, "%d %d", &tempInts[i0 * (2 + numNodes) + 0],
468 &tempInts[i0 * (2 + numNodes) + 1]);
469 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read") for (i1 = 0; i1 < numNodes; i1++)
470 {
471 scan_ret = fscanf(fileHandle_p, " %d", &tempInts[i0 * (2 + numNodes) + 2 + i1]);
472 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")}
473 scan_ret = fscanf(fileHandle_p, "\n");
474 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read") totalEle++;
475 chunkEle++;
476 }
477 #ifdef ESYS_MPI
478 /* Eventually we'll send chunk of elements to each CPU except 0 itself, here goes one of them */
479 if (nextCPU < mpi_info->size)
480 {
481 tempInts[chunkSize * (2 + numNodes)] = chunkEle;
482 MPI_Send(tempInts, chunkSize * (2 + numNodes) + 1, MPI_INT, nextCPU, 81725, mpi_info->comm);
483 }
484 #endif
485 nextCPU++;
486 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
487 if (nextCPU > mpi_info->size)
488 break; /* End infinite loop */
489 } /* Infinite loop */
490 } /* End master */
491 else /* Worker */
492 {
493 #ifdef ESYS_MPI
494 /* Each worker receives one message */
495 MPI_Status status;
496 MPI_Recv(tempInts, chunkSize * (2 + numNodes) + 1, MPI_INT, 0, 81725, mpi_info->comm, &status);
497 chunkEle = tempInts[chunkSize * (2 + numNodes)];
498 #endif
499 } /* Worker */
500
501 /* Copy Element data from tempInts to mesh_p */
502 Dudley_ElementFile_allocTable(mesh_p->Points, chunkEle);
503 if (Dudley_noError())
504 {
505 mesh_p->Points->minColor = 0;
506 mesh_p->Points->maxColor = chunkEle - 1;
507 #pragma omp parallel for private (i0, i1) schedule(static)
508 for (i0 = 0; i0 < chunkEle; i0++)
509 {
510 mesh_p->Points->Id[i0] = tempInts[i0 * (2 + numNodes) + 0];
511 mesh_p->Points->Tag[i0] = tempInts[i0 * (2 + numNodes) + 1];
512 mesh_p->Points->Owner[i0] = mpi_info->rank;
513 mesh_p->Points->Color[i0] = i0;
514 for (i1 = 0; i1 < numNodes; i1++)
515 {
516 mesh_p->Points->Nodes[INDEX2(i1, i0, numNodes)] = tempInts[i0 * (2 + numNodes) + 2 + i1];
517 }
518 }
519 }
520
521 TMPMEMFREE(tempInts);
522 }
523 /* ******************************** end of Read the nodal element data ************************************ */
524 /****************** get the name tags *****************************************/
525 if (Dudley_noError())
526 {
527 char *remainder = 0, *ptr;
528 size_t len = 0;
529 #ifdef ESYS_MPI
530 int len_i;
531 #endif
532 int tag_key;
533 if (mpi_info->rank == 0) /* Master */
534 {
535 /* Read the word 'Tag' */
536 if (!feof(fileHandle_p))
537 {
538 scan_ret = fscanf(fileHandle_p, "%s\n", name);
539 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")}
540 #if defined(_WIN32) /* windows ftell lies on unix formatted text files */
541 remainder = NULL;
542 len = 0;
543 while (1)
544 {
545 size_t malloc_chunk = 1024;
546 size_t buff_size = 0;
547 int ch;
548 ch = fgetc(fileHandle_p);
549 if (ch == '\r')
550 {
551 continue;
552 }
553 if (len + 1 > buff_size)
554 {
555 TMPMEMREALLOC(remainder, remainder, buff_size + malloc_chunk, char);
556 }
557 if (ch == EOF)
558 {
559 /* hit EOF */
560 remainder[len] = (char)0;
561 break;
562 }
563 remainder[len] = (char)ch;
564 len++;
565 }
566 #else
567 /* Read rest of file in one chunk, after using seek to find length */
568 {
569 long cur_pos, end_pos;
570 cur_pos = ftell(fileHandle_p);
571 fseek(fileHandle_p, 0L, SEEK_END);
572 end_pos = ftell(fileHandle_p);
573 fseek(fileHandle_p, (long)cur_pos, SEEK_SET);
574 remainder = TMPMEMALLOC(end_pos - cur_pos + 1, char);
575 if (!feof(fileHandle_p))
576 {
577 scan_ret = fread(remainder, (size_t) end_pos - cur_pos, sizeof(char), fileHandle_p);
578 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read") remainder[end_pos - cur_pos] = 0;
579 }
580 }
581 #endif
582 len = strlen(remainder);
583 while ((len > 1) && isspace(remainder[--len]))
584 {
585 remainder[len] = 0;
586 }
587 len = strlen(remainder);
588 TMPMEMREALLOC(remainder, remainder, len + 1, char);
589 } /* Master */
590 #ifdef ESYS_MPI
591
592 len_i = (int)len;
593 MPI_Bcast(&len_i, 1, MPI_INT, 0, mpi_info->comm);
594 len = (size_t) len_i;
595 if (mpi_info->rank != 0)
596 {
597 remainder = TMPMEMALLOC(len + 1, char);
598 remainder[0] = 0;
599 }
600 if (MPI_Bcast(remainder, len + 1, MPI_CHAR, 0, mpi_info->comm) != MPI_SUCCESS)
601 Dudley_setError(ESYS_MPI_ERROR, "Dudley_Mesh_read: broadcast of remainder failed");
602 #endif
603
604 if (remainder[0])
605 {
606 ptr = remainder;
607 do
608 {
609 sscanf(ptr, "%s %d\n", name, &tag_key);
610 if (*name)
611 Dudley_Mesh_addTagMap(mesh_p, name, tag_key);
612 ptr++;
613 }
614 while (NULL != (ptr = strchr(ptr, '\n')) && *ptr);
615 }
616 if (remainder)
617 TMPMEMFREE(remainder);
618 }
619
620 /* close file */
621 if (mpi_info->rank == 0)
622 fclose(fileHandle_p);
623
624 /* resolve id's : */
625 /* rearrange elements: */
626 if (Dudley_noError())
627 Dudley_Mesh_resolveNodeIds(mesh_p);
628 if (Dudley_noError())
629 Dudley_Mesh_prepare(mesh_p, optimize);
630
631 /* that's it */
632 if (!Dudley_noError())
633 {
634 Dudley_Mesh_free(mesh_p);
635 }
636 /* free up memory */
637 Esys_MPIInfo_free(mpi_info);
638 return mesh_p;
639 }

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26