/[escript]/branches/trilinos_from_5897/dudley/src/Mesh_read.cpp
ViewVC logotype

Contents of /branches/trilinos_from_5897/dudley/src/Mesh_read.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 5963 - (show annotations)
Mon Feb 22 06:59:27 2016 UTC (3 years, 3 months ago) by caltinay
File size: 22107 byte(s)
sync and fix.

1
2 /*****************************************************************************
3 *
4 * Copyright (c) 2003-2016 by The University of Queensland
5 * http://www.uq.edu.au
6 *
7 * Primary Business: Queensland, Australia
8 * Licensed under the Open Software License version 3.0
9 * http://www.opensource.org/licenses/osl-3.0.php
10 *
11 * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12 * Development 2012-2013 by School of Earth Sciences
13 * Development from 2014 by Centre for Geoscience Computing (GeoComp)
14 *
15 *****************************************************************************/
16
17 /************************************************************************************/
18
19 /* Dudley: read mesh */
20
21 /************************************************************************************/
22
23 #define ESNEEDPYTHON
24 #include "esysUtils/first.h"
25
26 #include <ctype.h>
27 #include "Mesh.h"
28
29 #define FSCANF_CHECK(scan_ret, reason) { if (scan_ret == EOF) { perror(reason); Dudley_setError(IO_ERROR,"scan error while reading dudley file"); return NULL;} }
30
31 Dudley_Mesh *Dudley_Mesh_read(char *fname, index_t order, index_t reduced_order, bool optimize)
32 {
33 dim_t numNodes, numDim=0, numEle, i0, i1;
34 Dudley_Mesh *mesh_p = NULL;
35 char name[1024], element_type[1024], frm[20];
36 char error_msg[1024];
37 FILE *fileHandle_p = NULL;
38 Dudley_ElementTypeId typeID = Dudley_NoRef;
39 int scan_ret;
40
41 Dudley_resetError();
42 /* No! Bad! take a parameter for this */
43 esysUtils::JMPI mpi_info = esysUtils::makeInfo(MPI_COMM_WORLD);
44
45 if (mpi_info->rank == 0)
46 {
47 /* get file handle */
48 fileHandle_p = fopen(fname, "r");
49 if (fileHandle_p == NULL)
50 {
51 sprintf(error_msg, "Dudley_Mesh_read: Opening file %s for reading failed.", fname);
52 Dudley_setError(IO_ERROR, error_msg);
53 return NULL;
54 }
55
56 /* read header */
57 sprintf(frm, "%%%d[^\n]", 1023);
58 scan_ret = fscanf(fileHandle_p, frm, name);
59 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
60 /* get the number of nodes */
61 scan_ret = fscanf(fileHandle_p, "%1d%*s %d\n", &numDim, &numNodes);
62 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")}
63 #ifdef ESYS_MPI
64 /* MPI Broadcast numDim, numNodes, name if there are multiple MPI procs */
65 if (mpi_info->size > 1)
66 {
67 int temp1[3];
68 if (mpi_info->rank == 0)
69 {
70 temp1[0] = numDim;
71 temp1[1] = numNodes;
72 temp1[2] = strlen(name) + 1;
73 }
74 else
75 {
76 temp1[0] = 0;
77 temp1[1] = 0;
78 temp1[2] = 1;
79 }
80 MPI_Bcast(temp1, 3, MPI_INT, 0, mpi_info->comm);
81 numDim = temp1[0];
82 numNodes = temp1[1];
83 MPI_Bcast(name, temp1[2], MPI_CHAR, 0, mpi_info->comm);
84 }
85 #endif
86
87 /* allocate mesh */
88 mesh_p = Dudley_Mesh_alloc(name, numDim, mpi_info);
89
90 if (Dudley_noError())
91 {
92 /* Each CPU will get at most chunkSize nodes so the message has to be sufficiently large */
93 int chunkSize = numNodes / mpi_info->size + 1, totalNodes = 0, chunkNodes = 0, nextCPU = 1;
94 int *tempInts = new index_t[chunkSize * 3 + 1]; /* Stores the integer message data */
95 double *tempCoords = new double[chunkSize * numDim]; /* Stores the double message data */
96
97 /*
98 Read chunkSize nodes, send it in a chunk to worker CPU which copies chunk into its local mesh_p
99 It doesn't matter that a CPU has the wrong nodes for its elements, this is sorted out later
100 First chunk sent to CPU 1, second to CPU 2, ...
101 Last chunk stays on CPU 0 (the master)
102 The three columns of integers (Id, gDOF, Tag) are gathered into a single array tempInts and sent
103 together in a single MPI message
104 */
105
106 if (mpi_info->rank == 0) /* Master */
107 {
108 for (;;) /* Infinite loop */
109 {
110 #pragma omp parallel for private (i0) schedule(static)
111 for (i0 = 0; i0 < chunkSize * 3 + 1; i0++)
112 tempInts[i0] = -1;
113
114 #pragma omp parallel for private (i0) schedule(static)
115 for (i0 = 0; i0 < chunkSize * numDim; i0++)
116 tempCoords[i0] = -1.0;
117
118 chunkNodes = 0;
119 for (i1 = 0; i1 < chunkSize; i1++)
120 {
121 if (totalNodes >= numNodes)
122 break; /* End of inner loop */
123 if (1 == numDim)
124 {
125 scan_ret = fscanf(fileHandle_p, "%d %d %d %le\n",
126 &tempInts[0 + i1], &tempInts[chunkSize + i1], &tempInts[chunkSize * 2 + i1],
127 &tempCoords[i1 * numDim + 0]);
128 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")}
129 if (2 == numDim)
130 {
131 scan_ret = fscanf(fileHandle_p, "%d %d %d %le %le\n",
132 &tempInts[0 + i1], &tempInts[chunkSize + i1], &tempInts[chunkSize * 2 + i1],
133 &tempCoords[i1 * numDim + 0], &tempCoords[i1 * numDim + 1]);
134 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")}
135 if (3 == numDim)
136 {
137 scan_ret = fscanf(fileHandle_p, "%d %d %d %le %le %le\n",
138 &tempInts[0 + i1], &tempInts[chunkSize + i1], &tempInts[chunkSize * 2 + i1],
139 &tempCoords[i1 * numDim + 0], &tempCoords[i1 * numDim + 1],
140 &tempCoords[i1 * numDim + 2]);
141 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")}
142 totalNodes++; /* When do we quit the infinite loop? */
143 chunkNodes++; /* How many nodes do we actually have in this chunk? It may be smaller than chunkSize. */
144 }
145 if (chunkNodes > chunkSize)
146 {
147 Dudley_setError(ESYS_MPI_ERROR,
148 "Dudley_Mesh_read: error reading chunks of mesh, data too large for message size");
149 return NULL;
150 }
151 #ifdef ESYS_MPI
152 /* Eventually we'll send chunkSize nodes to each CPU numbered 1 ... mpi_info->size-1, here goes one of them */
153 if (nextCPU < mpi_info->size)
154 {
155 tempInts[chunkSize * 3] = chunkNodes; /* The message has one more int to send chunkNodes */
156 MPI_Send(tempInts, chunkSize * 3 + 1, MPI_INT, nextCPU, 81720, mpi_info->comm);
157 MPI_Send(tempCoords, chunkSize * numDim, MPI_DOUBLE, nextCPU, 81721, mpi_info->comm);
158 }
159 #endif
160 nextCPU++;
161 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
162 if (nextCPU > mpi_info->size)
163 break; /* End infinite loop */
164 } /* Infinite loop */
165 } /* End master */
166 else /* Worker */
167 {
168 #ifdef ESYS_MPI
169 /* Each worker receives two messages */
170 MPI_Status status;
171 MPI_Recv(tempInts, chunkSize * 3 + 1, MPI_INT, 0, 81720, mpi_info->comm, &status);
172 MPI_Recv(tempCoords, chunkSize * numDim, MPI_DOUBLE, 0, 81721, mpi_info->comm, &status);
173 chunkNodes = tempInts[chunkSize * 3]; /* How many nodes are in this workers chunk? */
174 #endif
175 } /* Worker */
176
177 /* Copy node data from tempMem to mesh_p */
178 Dudley_NodeFile_allocTable(mesh_p->Nodes, chunkNodes);
179 if (Dudley_noError())
180 {
181 #pragma omp parallel for private (i0, i1) schedule(static)
182 for (i0 = 0; i0 < chunkNodes; i0++)
183 {
184 mesh_p->Nodes->Id[i0] = tempInts[0 + i0];
185 mesh_p->Nodes->globalDegreesOfFreedom[i0] = tempInts[chunkSize + i0];
186 mesh_p->Nodes->Tag[i0] = tempInts[chunkSize * 2 + i0];
187 for (i1 = 0; i1 < numDim; i1++)
188 {
189 mesh_p->Nodes->Coordinates[INDEX2(i1, i0, numDim)] = tempCoords[i0 * numDim + i1];
190 }
191 }
192 }
193 delete[] tempInts;
194 delete[] tempCoords;
195 }
196
197 /* *********************************** read elements *************************************************************************************** */
198 if (Dudley_noError())
199 {
200 /* Read the element typeID */
201 if (mpi_info->rank == 0)
202 {
203 scan_ret = fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
204 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read") typeID = eltTypeFromString(element_type);
205 }
206 #ifdef ESYS_MPI
207 if (mpi_info->size > 1)
208 {
209 int temp1[2], mpi_error;
210 temp1[0] = (int)typeID;
211 temp1[1] = numEle;
212 mpi_error = MPI_Bcast(temp1, 2, MPI_INT, 0, mpi_info->comm);
213 if (mpi_error != MPI_SUCCESS)
214 {
215 Dudley_setError(ESYS_MPI_ERROR, "Dudley_Mesh_read: broadcast of Element typeID failed");
216 return NULL;
217 }
218 typeID = (Dudley_ElementTypeId) temp1[0];
219 numEle = temp1[1];
220 }
221 #endif
222 if (typeID == Dudley_NoRef)
223 {
224 sprintf(error_msg, "Dudley_Mesh_read: Unidentified element type %s", element_type);
225 Dudley_setError(VALUE_ERROR, error_msg);
226 }
227 }
228
229 /* Allocate the ElementFile */
230 if (Dudley_noError())
231 {
232 mesh_p->Elements = Dudley_ElementFile_alloc(typeID, mpi_info);
233 numNodes = mesh_p->Elements->numNodes; /* New meaning for numNodes: num nodes per element */
234 }
235
236 /* *************************** Read the element data **************************************************************************************** */
237 if (Dudley_noError())
238 {
239 int chunkSize = numEle / mpi_info->size + 1, totalEle = 0, nextCPU = 1, chunkEle = 0;
240 int *tempInts = new index_t[chunkSize * (2 + numNodes) + 1]; /* Store Id + Tag + node list (+ one int at end for chunkEle) */
241 /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
242 if (mpi_info->rank == 0) /* Master */
243 {
244 for (;;) /* Infinite loop */
245 {
246 #pragma omp parallel for private (i0) schedule(static)
247 for (i0 = 0; i0 < chunkSize * (2 + numNodes) + 1; i0++)
248 tempInts[i0] = -1;
249 chunkEle = 0;
250 for (i0 = 0; i0 < chunkSize; i0++)
251 {
252 if (totalEle >= numEle)
253 break; /* End inner loop */
254 scan_ret =
255 fscanf(fileHandle_p, "%d %d", &tempInts[i0 * (2 + numNodes) + 0],
256 &tempInts[i0 * (2 + numNodes) + 1]);
257 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read") for (i1 = 0; i1 < numNodes; i1++)
258 {
259 scan_ret = fscanf(fileHandle_p, " %d", &tempInts[i0 * (2 + numNodes) + 2 + i1]);
260 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")}
261 scan_ret = fscanf(fileHandle_p, "\n");
262 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read") totalEle++;
263 chunkEle++;
264 }
265 #ifdef ESYS_MPI
266 /* Eventually we'll send chunk of elements to each CPU except 0 itself, here goes one of them */
267 if (nextCPU < mpi_info->size)
268 {
269 tempInts[chunkSize * (2 + numNodes)] = chunkEle;
270 MPI_Send(tempInts, chunkSize * (2 + numNodes) + 1, MPI_INT, nextCPU, 81722, mpi_info->comm);
271 }
272 #endif
273 nextCPU++;
274 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
275 if (nextCPU > mpi_info->size)
276 break; /* End infinite loop */
277 } /* Infinite loop */
278 } /* End master */
279 else
280 { /* Worker */
281 #ifdef ESYS_MPI
282 /* Each worker receives one message */
283 MPI_Status status;
284 MPI_Recv(tempInts, chunkSize * (2 + numNodes) + 1, MPI_INT, 0, 81722, mpi_info->comm, &status);
285 chunkEle = tempInts[chunkSize * (2 + numNodes)];
286 #endif
287 } /* Worker */
288 Dudley_ElementFile_allocTable(mesh_p->Elements, chunkEle);
289
290 /* Copy Element data from tempInts to mesh_p */
291 if (Dudley_noError())
292 {
293 mesh_p->Elements->minColor = 0;
294 mesh_p->Elements->maxColor = chunkEle - 1;
295 #pragma omp parallel for private (i0, i1) schedule(static)
296 for (i0 = 0; i0 < chunkEle; i0++)
297 {
298 mesh_p->Elements->Id[i0] = tempInts[i0 * (2 + numNodes) + 0];
299 mesh_p->Elements->Tag[i0] = tempInts[i0 * (2 + numNodes) + 1];
300 mesh_p->Elements->Owner[i0] = mpi_info->rank;
301 mesh_p->Elements->Color[i0] = i0;
302 for (i1 = 0; i1 < numNodes; i1++)
303 {
304 mesh_p->Elements->Nodes[INDEX2(i1, i0, numNodes)] = tempInts[i0 * (2 + numNodes) + 2 + i1];
305 }
306 }
307 }
308 delete[] tempInts;
309 }
310 /* ******************** end of Read the element data ***************************************************** */
311
312 /* ********************* read face elements ************************************************************************************ */
313 if (Dudley_noError())
314 {
315 /* Read the element typeID */
316 if (mpi_info->rank == 0)
317 {
318 scan_ret = fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
319 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read") typeID = eltTypeFromString(element_type);
320 }
321 #ifdef ESYS_MPI
322 if (mpi_info->size > 1)
323 {
324 int temp1[2];
325 temp1[0] = (int)typeID;
326 temp1[1] = numEle;
327 MPI_Bcast(temp1, 2, MPI_INT, 0, mpi_info->comm);
328 typeID = (Dudley_ElementTypeId) temp1[0];
329 numEle = temp1[1];
330 }
331 #endif
332 if (typeID == Dudley_NoRef)
333 {
334 sprintf(error_msg, "Dudley_Mesh_read: Unidentified element type %s", element_type);
335 Dudley_setError(VALUE_ERROR, error_msg);
336 }
337 if (Dudley_noError())
338 {
339 /* Allocate the ElementFile */
340 mesh_p->FaceElements = Dudley_ElementFile_alloc(typeID, mpi_info);
341 numNodes = mesh_p->FaceElements->numNodes; /* New meaning for numNodes: num nodes per element */
342 }
343 }
344 /* ********************** Read the face element data ******************************************************************************* */
345
346 if (Dudley_noError())
347 {
348 int chunkSize = numEle / mpi_info->size + 1, totalEle = 0, nextCPU = 1, chunkEle = 0;
349 int *tempInts = new index_t[chunkSize * (2 + numNodes) + 1]; /* Store Id + Tag + node list (+ one int at end for chunkEle) */
350 /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
351 if (mpi_info->rank == 0) /* Master */
352 {
353 for (;;) /* Infinite loop */
354 {
355 #pragma omp parallel for private (i0) schedule(static)
356 for (i0 = 0; i0 < chunkSize * (2 + numNodes) + 1; i0++)
357 tempInts[i0] = -1;
358 chunkEle = 0;
359 for (i0 = 0; i0 < chunkSize; i0++)
360 {
361 if (totalEle >= numEle)
362 break; /* End inner loop */
363 scan_ret =
364 fscanf(fileHandle_p, "%d %d", &tempInts[i0 * (2 + numNodes) + 0],
365 &tempInts[i0 * (2 + numNodes) + 1]);
366 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read") for (i1 = 0; i1 < numNodes; i1++)
367 {
368 scan_ret = fscanf(fileHandle_p, " %d", &tempInts[i0 * (2 + numNodes) + 2 + i1]);
369 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")}
370 scan_ret = fscanf(fileHandle_p, "\n");
371 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read") totalEle++;
372 chunkEle++;
373 }
374 #ifdef ESYS_MPI
375 /* Eventually we'll send chunk of elements to each CPU except 0 itself, here goes one of them */
376 if (nextCPU < mpi_info->size)
377 {
378 tempInts[chunkSize * (2 + numNodes)] = chunkEle;
379 MPI_Send(tempInts, chunkSize * (2 + numNodes) + 1, MPI_INT, nextCPU, 81723, mpi_info->comm);
380 }
381 #endif
382 nextCPU++;
383 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
384 if (nextCPU > mpi_info->size)
385 break; /* End infinite loop */
386 } /* Infinite loop */
387 } /* End master */
388 else /* Worker */
389 {
390 #ifdef ESYS_MPI
391 /* Each worker receives one message */
392 MPI_Status status;
393 MPI_Recv(tempInts, chunkSize * (2 + numNodes) + 1, MPI_INT, 0, 81723, mpi_info->comm, &status);
394 chunkEle = tempInts[chunkSize * (2 + numNodes)];
395 #endif
396 } /* Worker */
397 Dudley_ElementFile_allocTable(mesh_p->FaceElements, chunkEle);
398 if (Dudley_noError())
399 {
400 /* Copy Element data from tempInts to mesh_p */
401 mesh_p->FaceElements->minColor = 0;
402 mesh_p->FaceElements->maxColor = chunkEle - 1;
403 #pragma omp parallel for private (i0, i1)
404 for (i0 = 0; i0 < chunkEle; i0++)
405 {
406 mesh_p->FaceElements->Id[i0] = tempInts[i0 * (2 + numNodes) + 0];
407 mesh_p->FaceElements->Tag[i0] = tempInts[i0 * (2 + numNodes) + 1];
408 mesh_p->FaceElements->Owner[i0] = mpi_info->rank;
409 mesh_p->FaceElements->Color[i0] = i0;
410 for (i1 = 0; i1 < numNodes; i1++)
411 {
412 mesh_p->FaceElements->Nodes[INDEX2(i1, i0, numNodes)] = tempInts[i0 * (2 + numNodes) + 2 + i1];
413 }
414 }
415 }
416
417 delete[] tempInts;
418 }
419 /* ************************************* end of Read the face element data *************************************** */
420
421 /* ********************************* read nodal elements ****************************************************** */
422 /* ******************************* Read the element typeID */
423 if (Dudley_noError())
424 {
425 if (mpi_info->rank == 0)
426 {
427 scan_ret = fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
428 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read") typeID = eltTypeFromString(element_type);
429 }
430 #ifdef ESYS_MPI
431 if (mpi_info->size > 1)
432 {
433 int temp1[2];
434 temp1[0] = (int)typeID;
435 temp1[1] = numEle;
436 MPI_Bcast(temp1, 2, MPI_INT, 0, mpi_info->comm);
437 typeID = (Dudley_ElementTypeId) temp1[0];
438 numEle = temp1[1];
439 }
440 #endif
441 if (typeID == Dudley_NoRef)
442 {
443 sprintf(error_msg, "Dudley_Mesh_read: Unidentified element type %s", element_type);
444 Dudley_setError(VALUE_ERROR, error_msg);
445 }
446 }
447 if (Dudley_noError())
448 {
449 /* Allocate the ElementFile */
450 mesh_p->Points = Dudley_ElementFile_alloc(typeID, mpi_info);
451 numNodes = mesh_p->Points->numNodes; /* New meaning for numNodes: num nodes per element */
452 }
453 /********************************** Read the nodal element data **************************************************/
454 if (Dudley_noError())
455 {
456 int chunkSize = numEle / mpi_info->size + 1, totalEle = 0, nextCPU = 1, chunkEle = 0;
457 int *tempInts = new index_t[chunkSize * (2 + numNodes) + 1]; /* Store Id + Tag + node list (+ one int at end for chunkEle) */
458 /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
459 if (mpi_info->rank == 0) /* Master */
460 {
461 for (;;) /* Infinite loop */
462 {
463 #pragma omp parallel for private (i0) schedule(static)
464 for (i0 = 0; i0 < chunkSize * (2 + numNodes) + 1; i0++)
465 tempInts[i0] = -1;
466 chunkEle = 0;
467 for (i0 = 0; i0 < chunkSize; i0++)
468 {
469 if (totalEle >= numEle)
470 break; /* End inner loop */
471 scan_ret =
472 fscanf(fileHandle_p, "%d %d", &tempInts[i0 * (2 + numNodes) + 0],
473 &tempInts[i0 * (2 + numNodes) + 1]);
474 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read") for (i1 = 0; i1 < numNodes; i1++)
475 {
476 scan_ret = fscanf(fileHandle_p, " %d", &tempInts[i0 * (2 + numNodes) + 2 + i1]);
477 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")}
478 scan_ret = fscanf(fileHandle_p, "\n");
479 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read") totalEle++;
480 chunkEle++;
481 }
482 #ifdef ESYS_MPI
483 /* Eventually we'll send chunk of elements to each CPU except 0 itself, here goes one of them */
484 if (nextCPU < mpi_info->size)
485 {
486 tempInts[chunkSize * (2 + numNodes)] = chunkEle;
487 MPI_Send(tempInts, chunkSize * (2 + numNodes) + 1, MPI_INT, nextCPU, 81725, mpi_info->comm);
488 }
489 #endif
490 nextCPU++;
491 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
492 if (nextCPU > mpi_info->size)
493 break; /* End infinite loop */
494 } /* Infinite loop */
495 } /* End master */
496 else /* Worker */
497 {
498 #ifdef ESYS_MPI
499 /* Each worker receives one message */
500 MPI_Status status;
501 MPI_Recv(tempInts, chunkSize * (2 + numNodes) + 1, MPI_INT, 0, 81725, mpi_info->comm, &status);
502 chunkEle = tempInts[chunkSize * (2 + numNodes)];
503 #endif
504 } /* Worker */
505
506 /* Copy Element data from tempInts to mesh_p */
507 Dudley_ElementFile_allocTable(mesh_p->Points, chunkEle);
508 if (Dudley_noError())
509 {
510 mesh_p->Points->minColor = 0;
511 mesh_p->Points->maxColor = chunkEle - 1;
512 #pragma omp parallel for private (i0, i1) schedule(static)
513 for (i0 = 0; i0 < chunkEle; i0++)
514 {
515 mesh_p->Points->Id[i0] = tempInts[i0 * (2 + numNodes) + 0];
516 mesh_p->Points->Tag[i0] = tempInts[i0 * (2 + numNodes) + 1];
517 mesh_p->Points->Owner[i0] = mpi_info->rank;
518 mesh_p->Points->Color[i0] = i0;
519 for (i1 = 0; i1 < numNodes; i1++)
520 {
521 mesh_p->Points->Nodes[INDEX2(i1, i0, numNodes)] = tempInts[i0 * (2 + numNodes) + 2 + i1];
522 }
523 }
524 }
525
526 delete[] tempInts;
527 }
528 /* ******************************** end of Read the nodal element data ************************************ */
529 /****************** get the name tags *****************************************/
530 if (Dudley_noError())
531 {
532 char *remainder = 0, *ptr;
533 size_t len = 0;
534 #ifdef ESYS_MPI
535 int len_i;
536 #endif
537 int tag_key;
538 if (mpi_info->rank == 0) /* Master */
539 {
540 /* Read the word 'Tag' */
541 if (!feof(fileHandle_p))
542 {
543 scan_ret = fscanf(fileHandle_p, "%s\n", name);
544 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")}
545 #if defined(_WIN32) /* windows ftell lies on unix formatted text files */
546 remainder = NULL;
547 len = 0;
548 while (1)
549 {
550 size_t malloc_chunk = 1024;
551 size_t buff_size = 0;
552 int ch;
553 ch = fgetc(fileHandle_p);
554 if (ch == '\r')
555 {
556 continue;
557 }
558 if (len + 1 > buff_size)
559 {
560 TMPMEMREALLOC(remainder, remainder, buff_size + malloc_chunk, char);
561 }
562 if (ch == EOF)
563 {
564 /* hit EOF */
565 remainder[len] = (char)0;
566 break;
567 }
568 remainder[len] = (char)ch;
569 len++;
570 }
571 #else
572 /* Read rest of file in one chunk, after using seek to find length */
573 {
574 long cur_pos, end_pos;
575 cur_pos = ftell(fileHandle_p);
576 fseek(fileHandle_p, 0L, SEEK_END);
577 end_pos = ftell(fileHandle_p);
578 fseek(fileHandle_p, (long)cur_pos, SEEK_SET);
579 remainder = new char[end_pos - cur_pos + 1];
580 if (!feof(fileHandle_p))
581 {
582 scan_ret = fread(remainder, (size_t) end_pos - cur_pos, sizeof(char), fileHandle_p);
583 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read") remainder[end_pos - cur_pos] = 0;
584 }
585 }
586 #endif
587 len = strlen(remainder);
588 while ((len > 1) && isspace(remainder[--len]))
589 {
590 remainder[len] = 0;
591 }
592 len = strlen(remainder);
593 // shrink the allocation unit
594 // TMPMEMREALLOC(remainder, remainder, len + 1, char);
595 } /* Master */
596 #ifdef ESYS_MPI
597
598 len_i = (int)len;
599 MPI_Bcast(&len_i, 1, MPI_INT, 0, mpi_info->comm);
600 len = (size_t) len_i;
601 if (mpi_info->rank != 0)
602 {
603 remainder = new char[len + 1];
604 remainder[0] = 0;
605 }
606 if (MPI_Bcast(remainder, len + 1, MPI_CHAR, 0, mpi_info->comm) != MPI_SUCCESS)
607 Dudley_setError(ESYS_MPI_ERROR, "Dudley_Mesh_read: broadcast of remainder failed");
608 #endif
609
610 if (remainder[0])
611 {
612 ptr = remainder;
613 do
614 {
615 sscanf(ptr, "%s %d\n", name, &tag_key);
616 if (*name)
617 Dudley_Mesh_addTagMap(mesh_p, name, tag_key);
618 ptr++;
619 }
620 while (NULL != (ptr = strchr(ptr, '\n')) && *ptr);
621 }
622 if (remainder)
623 delete[] remainder;
624 }
625
626 /* close file */
627 if (mpi_info->rank == 0)
628 fclose(fileHandle_p);
629
630 /* resolve id's : */
631 /* rearrange elements: */
632 if (Dudley_noError())
633 Dudley_Mesh_resolveNodeIds(mesh_p);
634 if (Dudley_noError())
635 Dudley_Mesh_prepare(mesh_p, optimize);
636
637 /* that's it */
638 if (!Dudley_noError())
639 {
640 Dudley_Mesh_free(mesh_p);
641 }
642 /* free up memory */
643 return mesh_p;
644 }

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision
svn:mergeinfo /branches/4.0fordebian/dudley/src/Mesh_read.cpp:5567-5588 /branches/lapack2681/finley/src/Mesh_read.cpp:2682-2741 /branches/pasowrap/dudley/src/Mesh_read.cpp:3661-3674 /branches/py3_attempt2/dudley/src/Mesh_read.cpp:3871-3891 /branches/restext/finley/src/Mesh_read.cpp:2610-2624 /branches/ripleygmg_from_3668/dudley/src/Mesh_read.cpp:3669-3791 /branches/stage3.0/finley/src/Mesh_read.cpp:2569-2590 /branches/symbolic_from_3470/dudley/src/Mesh_read.cpp:3471-3974 /branches/symbolic_from_3470/ripley/test/python/dudley/src/Mesh_read.cpp:3517-3974 /release/3.0/finley/src/Mesh_read.cpp:2591-2601 /release/4.0/dudley/src/Mesh_read.cpp:5380-5406 /trunk/dudley/src/Mesh_read.cpp:4257-4344,5898-5962 /trunk/ripley/test/python/dudley/src/Mesh_read.cpp:3480-3515

  ViewVC Help
Powered by ViewVC 1.1.26