/[escript]/trunk/finley/src/Mesh_read.c
ViewVC logotype

Contents of /trunk/finley/src/Mesh_read.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2748 - (show annotations)
Tue Nov 17 07:32:59 2009 UTC (9 years, 10 months ago) by gross
File MIME type: text/plain
File size: 26963 byte(s)
Macro elements are implemented now. VTK writer for macro elements still needs testing.
1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2009 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14
15 /**************************************************************/
16
17 /* Finley: read mesh */
18
19 /**************************************************************/
20
21 #include <ctype.h>
22 #include "Mesh.h"
23
24 #define FSCANF_CHECK(scan_ret, reason) { if (scan_ret == EOF) { perror(reason); Finley_setError(IO_ERROR,"scan error while reading finley file"); return NULL;} }
25
26
27 Finley_Mesh* Finley_Mesh_read(char* fname,index_t order, index_t reduced_order, bool_t optimize)
28 {
29 Paso_MPIInfo *mpi_info = NULL;
30 dim_t numNodes, numDim, numEle, i0, i1;
31 Finley_Mesh *mesh_p=NULL;
32 Finley_ReferenceElementSet *refPoints=NULL, *refContactElements=NULL, *refFaceElements=NULL, *refElements=NULL;
33 char name[LenString_MAX],element_type[LenString_MAX],frm[20];
34 char error_msg[LenErrorMsg_MAX];
35 FILE *fileHandle_p = NULL;
36 ElementTypeId typeID=NoRef;
37 int scan_ret;
38
39 Finley_resetError();
40 mpi_info = Paso_MPIInfo_alloc( MPI_COMM_WORLD );
41
42 if (mpi_info->rank == 0) {
43 /* get file handle */
44 fileHandle_p = fopen(fname, "r");
45 if (fileHandle_p==NULL) {
46 sprintf(error_msg,"Finley_Mesh_read: Opening file %s for reading failed.",fname);
47 Finley_setError(IO_ERROR,error_msg);
48 Paso_MPIInfo_free( mpi_info );
49 return NULL;
50 }
51
52 /* read header */
53 sprintf(frm,"%%%d[^\n]",LenString_MAX-1);
54 scan_ret = fscanf(fileHandle_p, frm, name);
55 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
56
57 /* get the number of nodes */
58 scan_ret = fscanf(fileHandle_p, "%1d%*s %d\n", &numDim,&numNodes);
59 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
60 }
61
62 #ifdef PASO_MPI
63 /* MPI Broadcast numDim, numNodes, name if there are multiple MPI procs*/
64 if (mpi_info->size > 1) {
65 int temp1[3];
66 if (mpi_info->rank == 0) {
67 temp1[0] = numDim;
68 temp1[1] = numNodes;
69 temp1[2] = strlen(name) + 1;
70 } else {
71 temp1[0] = 0;
72 temp1[1] = 0;
73 temp1[2] = 1;
74 }
75 MPI_Bcast (temp1, 3, MPI_INT, 0, mpi_info->comm);
76 numDim = temp1[0];
77 numNodes = temp1[1];
78 error_code = MPI_Bcast (name, temp1[2], MPI_CHAR, 0, mpi_info->comm);
79 if (error_code != MPI_SUCCESS) {
80 Finley_setError(PASO_MPI_ERROR, "Finley_Mesh_read: broadcast of name failed");
81 return NULL;
82 }
83 }
84 #endif
85
86 /* allocate mesh */
87 mesh_p = Finley_Mesh_alloc(name,numDim,order,reduced_order,mpi_info);
88
89 if (Finley_noError()) {
90 /* Each CPU will get at most chunkSize nodes so the message has to be sufficiently large */
91 int chunkSize = numNodes / mpi_info->size + 1, totalNodes=0, chunkNodes=0, nextCPU=1;
92 int *tempInts = TMPMEMALLOC(chunkSize*3+1, index_t); /* Stores the integer message data */
93 double *tempCoords = TMPMEMALLOC(chunkSize*numDim, double); /* Stores the double message data */
94
95 /*
96 Read chunkSize nodes, send it in a chunk to worker CPU which copies chunk into its local mesh_p
97 It doesn't matter that a CPU has the wrong nodes for its elements, this is sorted out later
98 First chunk sent to CPU 1, second to CPU 2, ...
99 Last chunk stays on CPU 0 (the master)
100 The three columns of integers (Id, gDOF, Tag) are gathered into a single array tempInts and sent together in a single MPI message
101 */
102
103 if (mpi_info->rank == 0) { /* Master */
104 for (;;) { /* Infinite loop */
105 #pragma omp parallel for private (i0) schedule(static)
106 for (i0=0; i0<chunkSize*3+1; i0++) tempInts[i0] = -1;
107
108 #pragma omp parallel for private (i0) schedule(static)
109 for (i0=0; i0<chunkSize*numDim; i0++) tempCoords[i0] = -1.0;
110
111 chunkNodes = 0;
112 for (i1=0; i1<chunkSize; i1++) {
113 if (totalNodes >= numNodes) break; /* End of inner loop */
114 if (1 == numDim) {
115 scan_ret = fscanf(fileHandle_p, "%d %d %d %le\n",
116 &tempInts[0+i1], &tempInts[chunkSize+i1], &tempInts[chunkSize*2+i1],
117 &tempCoords[i1*numDim+0]);
118 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
119 }
120 if (2 == numDim) {
121 scan_ret = fscanf(fileHandle_p, "%d %d %d %le %le\n",
122 &tempInts[0+i1], &tempInts[chunkSize+i1], &tempInts[chunkSize*2+i1],
123 &tempCoords[i1*numDim+0], &tempCoords[i1*numDim+1]);
124 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
125 }
126 if (3 == numDim) {
127 scan_ret = fscanf(fileHandle_p, "%d %d %d %le %le %le\n",
128 &tempInts[0+i1], &tempInts[chunkSize+i1], &tempInts[chunkSize*2+i1],
129 &tempCoords[i1*numDim+0], &tempCoords[i1*numDim+1], &tempCoords[i1*numDim+2]);
130 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
131 }
132 totalNodes++; /* When do we quit the infinite loop? */
133 chunkNodes++; /* How many nodes do we actually have in this chunk? It may be smaller than chunkSize. */
134 }
135 if (chunkNodes > chunkSize) {
136 Finley_setError(PASO_MPI_ERROR, "Finley_Mesh_read: error reading chunks of mesh, data too large for message size");
137 return NULL;
138 }
139 #ifdef PASO_MPI
140 /* Eventually we'll send chunkSize nodes to each CPU numbered 1 ... mpi_info->size-1, here goes one of them */
141 if (nextCPU < mpi_info->size) {
142 tempInts[chunkSize*3] = chunkNodes; /* The message has one more int to send chunkNodes */
143 MPI_Send(tempInts, chunkSize*3+1, MPI_INT, nextCPU, 81720, mpi_info->comm);
144 MPI_Send(tempCoords, chunkSize*numDim, MPI_DOUBLE, nextCPU, 81721, mpi_info->comm);
145 }
146 #endif
147 nextCPU++;
148 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
149 if (nextCPU > mpi_info->size) break; /* End infinite loop */
150 } /* Infinite loop */
151 } /* End master */
152 else { /* Worker */
153 #ifdef PASO_MPI
154 /* Each worker receives two messages */
155 MPI_Status status;
156 MPI_Recv(tempInts, chunkSize*3+1, MPI_INT, 0, 81720, mpi_info->comm, &status);
157 MPI_Recv(tempCoords, chunkSize*numDim, MPI_DOUBLE, 0, 81721, mpi_info->comm, &status);
158 chunkNodes = tempInts[chunkSize*3]; /* How many nodes are in this workers chunk? */
159 #endif
160 } /* Worker */
161
162 /* Copy node data from tempMem to mesh_p */
163 Finley_NodeFile_allocTable(mesh_p->Nodes, chunkNodes);
164
165 if (Finley_noError()) {
166 #pragma omp parallel for private (i0, i1) schedule(static)
167 for (i0=0; i0<chunkNodes; i0++) {
168 mesh_p->Nodes->Id[i0] = tempInts[0+i0];
169 mesh_p->Nodes->globalDegreesOfFreedom[i0] = tempInts[chunkSize+i0];
170 mesh_p->Nodes->Tag[i0] = tempInts[chunkSize*2+i0];
171 for (i1=0; i1<numDim; i1++) {
172 mesh_p->Nodes->Coordinates[INDEX2(i1,i0,numDim)] = tempCoords[i0*numDim+i1];
173 }
174 }
175 }
176 TMPMEMFREE(tempInts);
177 TMPMEMFREE(tempCoords);
178 }
179
180 /* *********************************** read elements ******************************************************************/
181 if (Finley_noError()) {
182
183 /* Read the element typeID */
184 if (mpi_info->rank == 0) {
185 scan_ret = fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
186 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
187 typeID=Finley_ReferenceElement_getTypeId(element_type);
188 }
189 #ifdef PASO_MPI
190 if (mpi_info->size > 1) {
191 int temp1[2], mpi_error;
192 temp1[0] = (int) typeID;
193 temp1[1] = numEle;
194 mpi_error = MPI_Bcast (temp1, 2, MPI_INT, 0, mpi_info->comm);
195 if (mpi_error != MPI_SUCCESS) {
196 Finley_setError(PASO_MPI_ERROR, "Finley_Mesh_read: broadcast of Element typeID failed");
197 return NULL;
198 }
199 typeID = (ElementTypeId) temp1[0];
200 numEle = temp1[1];
201 }
202 #endif
203 if (typeID==NoRef) {
204 sprintf(error_msg, "Finley_Mesh_read: Unidentified element type %s", element_type);
205 Finley_setError(VALUE_ERROR, error_msg);
206 }
207 }
208
209 /* Allocate the ElementFile */
210 if (Finley_noError()) {
211 refElements= Finley_ReferenceElementSet_alloc(typeID,mesh_p->order, mesh_p->reduced_order);
212 mesh_p->Elements=Finley_ElementFile_alloc(refElements, mpi_info);
213 numNodes = mesh_p->Elements->referenceElementSet->numNodes; /* New meaning for numNodes: num nodes per element */
214 }
215
216 /* *************************** Read the element data *******************************************************************/
217 if (Finley_noError()) {
218
219 int chunkSize = numEle / mpi_info->size + 1, totalEle=0, nextCPU=1, chunkEle=0;
220 int *tempInts = TMPMEMALLOC(chunkSize*(2+numNodes)+1, index_t); /* Store Id + Tag + node list (+ one int at end for chunkEle) */
221 /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
222 if (mpi_info->rank == 0) { /* Master */
223 for (;;) { /* Infinite loop */
224 #pragma omp parallel for private (i0) schedule(static)
225 for (i0=0; i0<chunkSize*(2+numNodes)+1; i0++) tempInts[i0] = -1;
226
227 chunkEle = 0;
228 for (i0=0; i0<chunkSize; i0++) {
229 if (totalEle >= numEle) break; /* End inner loop */
230 scan_ret = fscanf(fileHandle_p, "%d %d", &tempInts[i0*(2+numNodes)+0], &tempInts[i0*(2+numNodes)+1]);
231 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
232 for (i1 = 0; i1 < numNodes; i1++) {
233 scan_ret = fscanf(fileHandle_p, " %d", &tempInts[i0*(2+numNodes)+2+i1]);
234 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
235 }
236 scan_ret = fscanf(fileHandle_p, "\n");
237 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
238 totalEle++;
239 chunkEle++;
240 }
241 #ifdef PASO_MPI
242 /* Eventually we'll send chunk of elements to each CPU except 0 itself, here goes one of them */
243 if (nextCPU < mpi_info->size) {
244 tempInts[chunkSize*(2+numNodes)] = chunkEle;
245 MPI_Send(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, nextCPU, 81722, mpi_info->comm);
246 }
247 #endif
248 nextCPU++;
249 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
250 if (nextCPU > mpi_info->size) break; /* End infinite loop */
251 } /* Infinite loop */
252 } /* End master */
253 else { /* Worker */
254 #ifdef PASO_MPI
255 /* Each worker receives one message */
256 MPI_Status status;
257 MPI_Recv(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, 0, 81722, mpi_info->comm, &status);
258 chunkEle = tempInts[chunkSize*(2+numNodes)];
259 #endif
260 } /* Worker */
261
262
263 Finley_ElementFile_allocTable(mesh_p->Elements, chunkEle);
264
265
266 /* Copy Element data from tempInts to mesh_p */
267 if (Finley_noError()) {
268
269 mesh_p->Elements->minColor=0;
270 mesh_p->Elements->maxColor=chunkEle-1;
271 #pragma omp parallel for private (i0, i1) schedule(static)
272 for (i0=0; i0<chunkEle; i0++) {
273 mesh_p->Elements->Id[i0] = tempInts[i0*(2+numNodes)+0];
274 mesh_p->Elements->Tag[i0] = tempInts[i0*(2+numNodes)+1];
275 mesh_p->Elements->Owner[i0] =mpi_info->rank;
276 mesh_p->Elements->Color[i0] = i0;
277 for (i1 = 0; i1 < numNodes; i1++) {
278 mesh_p->Elements->Nodes[INDEX2(i1, i0, numNodes)] = tempInts[i0*(2+numNodes)+2+i1];
279 }
280 }
281 }
282
283 TMPMEMFREE(tempInts);
284 }
285 /* ******************** end of Read the element data ******************************************************/
286
287 /* ********************* read face elements ***************************************************************/
288 if (Finley_noError()) {
289 /* Read the element typeID */
290
291 if (mpi_info->rank == 0) {
292 scan_ret = fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
293 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
294 typeID=Finley_ReferenceElement_getTypeId(element_type);
295 }
296 #ifdef PASO_MPI
297 if (mpi_info->size > 1) {
298 int temp1[2];
299 temp1[0] = (int) typeID;
300 temp1[1] = numEle;
301 MPI_Bcast (temp1, 2, MPI_INT, 0, mpi_info->comm);
302 typeID = (ElementTypeId) temp1[0];
303 numEle = temp1[1];
304 }
305 #endif
306 if (typeID==NoRef) {
307 sprintf(error_msg, "Finley_Mesh_read: Unidentified element type %s", element_type);
308 Finley_setError(VALUE_ERROR, error_msg);
309 }
310 if (Finley_noError()) {
311 /* Allocate the ElementFile */
312 refFaceElements= Finley_ReferenceElementSet_alloc(typeID,mesh_p->order, mesh_p->reduced_order);
313 mesh_p->FaceElements=Finley_ElementFile_alloc(refFaceElements, mpi_info);
314 numNodes = mesh_p->FaceElements->referenceElementSet->numNodes; /* New meaning for numNodes: num nodes per element */
315 }
316
317 }
318
319 /* ********************** Read the face element data ********************************************************* */
320
321 if (Finley_noError()) {
322 int chunkSize = numEle / mpi_info->size + 1, totalEle=0, nextCPU=1, chunkEle=0;
323 int *tempInts = TMPMEMALLOC(chunkSize*(2+numNodes)+1, index_t); /* Store Id + Tag + node list (+ one int at end for chunkEle) */
324 /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
325 if (mpi_info->rank == 0) { /* Master */
326 for (;;) { /* Infinite loop */
327 #pragma omp parallel for private (i0) schedule(static)
328 for (i0=0; i0<chunkSize*(2+numNodes)+1; i0++) tempInts[i0] = -1;
329
330 chunkEle = 0;
331 for (i0=0; i0<chunkSize; i0++) {
332 if (totalEle >= numEle) break; /* End inner loop */
333 scan_ret = fscanf(fileHandle_p, "%d %d", &tempInts[i0*(2+numNodes)+0], &tempInts[i0*(2+numNodes)+1]);
334 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
335 for (i1 = 0; i1 < numNodes; i1++) {
336 scan_ret = fscanf(fileHandle_p, " %d", &tempInts[i0*(2+numNodes)+2+i1]);
337 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
338 }
339 scan_ret = fscanf(fileHandle_p, "\n");
340 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
341 totalEle++;
342 chunkEle++;
343 }
344 #ifdef PASO_MPI
345 /* Eventually we'll send chunk of elements to each CPU except 0 itself, here goes one of them */
346 if (nextCPU < mpi_info->size) {
347 tempInts[chunkSize*(2+numNodes)] = chunkEle;
348 MPI_Send(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, nextCPU, 81723, mpi_info->comm);
349 }
350 #endif
351 nextCPU++;
352 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
353 if (nextCPU > mpi_info->size) break; /* End infinite loop */
354 } /* Infinite loop */
355 } /* End master */
356 else { /* Worker */
357 #ifdef PASO_MPI
358 /* Each worker receives one message */
359 MPI_Status status;
360 MPI_Recv(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, 0, 81723, mpi_info->comm, &status);
361 chunkEle = tempInts[chunkSize*(2+numNodes)];
362 #endif
363 } /* Worker */
364
365 Finley_ElementFile_allocTable(mesh_p->FaceElements, chunkEle);
366
367 if (Finley_noError()) {
368 /* Copy Element data from tempInts to mesh_p */
369
370 mesh_p->FaceElements->minColor=0;
371 mesh_p->FaceElements->maxColor=chunkEle-1;
372 #pragma omp parallel for private (i0, i1)
373 for (i0=0; i0<chunkEle; i0++) {
374 mesh_p->FaceElements->Id[i0] = tempInts[i0*(2+numNodes)+0];
375 mesh_p->FaceElements->Tag[i0] = tempInts[i0*(2+numNodes)+1];
376 mesh_p->FaceElements->Owner[i0] =mpi_info->rank;
377 mesh_p->FaceElements->Color[i0] = i0;
378 for (i1 = 0; i1 < numNodes; i1++) {
379 mesh_p->FaceElements->Nodes[INDEX2(i1, i0, numNodes)] = tempInts[i0*(2+numNodes)+2+i1];
380 }
381 }
382 }
383
384 TMPMEMFREE(tempInts);
385 }
386 /* ************************************* end of Read the face element data *************************************** */
387
388
389 /* ************************************* read contact elements ************************************************** */
390
391 /* Read the element typeID */
392 if (Finley_noError()) {
393 if (mpi_info->rank == 0) {
394 scan_ret = fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
395 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
396 typeID=Finley_ReferenceElement_getTypeId(element_type);
397 }
398 #ifdef PASO_MPI
399 if (mpi_info->size > 1) {
400 int temp1[2];
401 temp1[0] = (int) typeID;
402 temp1[1] = numEle;
403 MPI_Bcast (temp1, 2, MPI_INT, 0, mpi_info->comm);
404 typeID = (ElementTypeId) temp1[0];
405 numEle = temp1[1];
406 }
407 #endif
408 if (typeID==NoRef) {
409 sprintf(error_msg, "Finley_Mesh_read: Unidentified element type %s", element_type);
410 Finley_setError(VALUE_ERROR, error_msg);
411 }
412 }
413
414 if (Finley_noError()) {
415 /* Allocate the ElementFile */
416 refContactElements= Finley_ReferenceElementSet_alloc(typeID,mesh_p->order, mesh_p->reduced_order);
417 mesh_p->ContactElements=Finley_ElementFile_alloc(refContactElements, mpi_info);
418 numNodes = mesh_p->ContactElements->referenceElementSet->numNodes; /* New meaning for numNodes: num nodes per element */
419 }
420 /* *************************** Read the contact element data ************************************************* */
421 if (Finley_noError()) {
422 int chunkSize = numEle / mpi_info->size + 1, totalEle=0, nextCPU=1, chunkEle=0;
423 int *tempInts = TMPMEMALLOC(chunkSize*(2+numNodes)+1, index_t); /* Store Id + Tag + node list (+ one int at end for chunkEle) */
424 /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
425 if (mpi_info->rank == 0) { /* Master */
426 for (;;) { /* Infinite loop */
427 #pragma omp parallel for private (i0) schedule(static)
428 for (i0=0; i0<chunkSize*(2+numNodes)+1; i0++) tempInts[i0] = -1;
429
430 chunkEle = 0;
431 for (i0=0; i0<chunkSize; i0++) {
432 if (totalEle >= numEle) break; /* End inner loop */
433 scan_ret = fscanf(fileHandle_p, "%d %d", &tempInts[i0*(2+numNodes)+0], &tempInts[i0*(2+numNodes)+1]);
434 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
435 for (i1 = 0; i1 < numNodes; i1++) {
436 scan_ret = fscanf(fileHandle_p, " %d", &tempInts[i0*(2+numNodes)+2+i1]);
437 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
438 }
439 scan_ret = fscanf(fileHandle_p, "\n");
440 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
441 totalEle++;
442 chunkEle++;
443 }
444 #ifdef PASO_MPI
445 /* Eventually we'll send chunk of elements to each CPU except 0 itself, here goes one of them */
446 if (nextCPU < mpi_info->size) {
447 tempInts[chunkSize*(2+numNodes)] = chunkEle;
448 MPI_Send(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, nextCPU, 81724, mpi_info->comm);
449 }
450 #endif
451 nextCPU++;
452 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
453 if (nextCPU > mpi_info->size) break; /* End infinite loop */
454 } /* Infinite loop */
455 } /* End master */
456 else { /* Worker */
457 #ifdef PASO_MPI
458 /* Each worker receives one message */
459 MPI_Status status;
460 MPI_Recv(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, 0, 81724, mpi_info->comm, &status);
461 chunkEle = tempInts[chunkSize*(2+numNodes)] ;
462 #endif
463 } /* Worker */
464
465 /* Copy Element data from tempInts to mesh_p */
466 Finley_ElementFile_allocTable(mesh_p->ContactElements, chunkEle);
467
468 if (Finley_noError()) {
469 mesh_p->ContactElements->minColor=0;
470 mesh_p->ContactElements->maxColor=chunkEle-1;
471 #pragma omp parallel for private (i0, i1)
472 for (i0=0; i0<chunkEle; i0++) {
473 mesh_p->ContactElements->Id[i0] = tempInts[i0*(2+numNodes)+0];
474 mesh_p->ContactElements->Tag[i0] = tempInts[i0*(2+numNodes)+1];
475 mesh_p->ContactElements->Owner[i0] =mpi_info->rank;
476 mesh_p->ContactElements->Color[i0] = i0;
477 for (i1 = 0; i1 < numNodes; i1++) {
478 mesh_p->ContactElements->Nodes[INDEX2(i1, i0, numNodes)] = tempInts[i0*(2+numNodes)+2+i1];
479 }
480 }
481 }
482 TMPMEMFREE(tempInts);
483 } /* end of Read the contact element data */
484
485 /* ********************************* read nodal elements ****************************************************** */
486
487 /* ******************************* Read the element typeID */
488
489 if (Finley_noError()) {
490 if (mpi_info->rank == 0) {
491 scan_ret = fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
492 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
493 typeID=Finley_ReferenceElement_getTypeId(element_type);
494 }
495 #ifdef PASO_MPI
496 if (mpi_info->size > 1) {
497 int temp1[2];
498 temp1[0] = (int) typeID;
499 temp1[1] = numEle;
500 MPI_Bcast (temp1, 2, MPI_INT, 0, mpi_info->comm);
501 typeID = (ElementTypeId) temp1[0];
502 numEle = temp1[1];
503 }
504 #endif
505 if (typeID==NoRef) {
506 sprintf(error_msg, "Finley_Mesh_read: Unidentified element type %s", element_type);
507 Finley_setError(VALUE_ERROR, error_msg);
508 }
509 }
510
511 if (Finley_noError()) {
512 /* Allocate the ElementFile */
513 refPoints= Finley_ReferenceElementSet_alloc(typeID,mesh_p->order, mesh_p->reduced_order);
514 mesh_p->Points=Finley_ElementFile_alloc(refPoints, mpi_info);
515 numNodes = mesh_p->Points->referenceElementSet->numNodes; /* New meaning for numNodes: num nodes per element */
516 }
517
518 /********************************** Read the nodal element data **************************************************/
519 if (Finley_noError()) {
520 int chunkSize = numEle / mpi_info->size + 1, totalEle=0, nextCPU=1, chunkEle=0;
521 int *tempInts = TMPMEMALLOC(chunkSize*(2+numNodes)+1, index_t); /* Store Id + Tag + node list (+ one int at end for chunkEle) */
522 /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
523 if (mpi_info->rank == 0) { /* Master */
524 for (;;) { /* Infinite loop */
525 #pragma omp parallel for private (i0) schedule(static)
526 for (i0=0; i0<chunkSize*(2+numNodes)+1; i0++) tempInts[i0] = -1;
527
528 chunkEle = 0;
529 for (i0=0; i0<chunkSize; i0++) {
530 if (totalEle >= numEle) break; /* End inner loop */
531 scan_ret = fscanf(fileHandle_p, "%d %d", &tempInts[i0*(2+numNodes)+0], &tempInts[i0*(2+numNodes)+1]);
532 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
533 for (i1 = 0; i1 < numNodes; i1++) {
534 scan_ret = fscanf(fileHandle_p, " %d", &tempInts[i0*(2+numNodes)+2+i1]);
535 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
536 }
537 scan_ret = fscanf(fileHandle_p, "\n");
538 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
539 totalEle++;
540 chunkEle++;
541 }
542 #ifdef PASO_MPI
543 /* Eventually we'll send chunk of elements to each CPU except 0 itself, here goes one of them */
544 if (nextCPU < mpi_info->size) {
545 tempInts[chunkSize*(2+numNodes)] = chunkEle;
546 MPI_Send(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, nextCPU, 81725, mpi_info->comm);
547 }
548 #endif
549 nextCPU++;
550 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
551 if (nextCPU > mpi_info->size) break; /* End infinite loop */
552 } /* Infinite loop */
553 } /* End master */
554 else { /* Worker */
555 #ifdef PASO_MPI
556 /* Each worker receives one message */
557 MPI_Status status;
558 MPI_Recv(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, 0, 81725, mpi_info->comm, &status);
559 chunkEle = tempInts[chunkSize*(2+numNodes)];
560 #endif
561 } /* Worker */
562
563 /* Copy Element data from tempInts to mesh_p */
564 Finley_ElementFile_allocTable(mesh_p->Points, chunkEle);
565
566 if (Finley_noError()) {
567 mesh_p->Points->minColor=0;
568 mesh_p->Points->maxColor=chunkEle-1;
569 #pragma omp parallel for private (i0, i1) schedule(static)
570 for (i0=0; i0<chunkEle; i0++) {
571 mesh_p->Points->Id[i0] = tempInts[i0*(2+numNodes)+0];
572 mesh_p->Points->Tag[i0] = tempInts[i0*(2+numNodes)+1];
573 mesh_p->Points->Owner[i0] =mpi_info->rank;
574 mesh_p->Points->Color[i0] = i0;
575 for (i1 = 0; i1 < numNodes; i1++) {
576 mesh_p->Points->Nodes[INDEX2(i1, i0, numNodes)] = tempInts[i0*(2+numNodes)+2+i1];
577 }
578 }
579 }
580
581 TMPMEMFREE(tempInts);
582 } /* ******************************** end of Read the nodal element data ************************************************************* */
583
584
585 /****************** get the name tags *****************************************/
586 if (Finley_noError()) {
587 char *remainder=0, *ptr;
588 size_t len=0;
589 #ifdef PASO_MPI
590 int len_i;
591 #endif
592 int tag_key;
593 if (mpi_info->rank == 0) { /* Master */
594 /* Read the word 'Tag' */
595 if (! feof(fileHandle_p)) {
596 scan_ret = fscanf(fileHandle_p, "%s\n", name);
597 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
598 }
599
600 #if defined(_WIN32) /* windows ftell lies on unix formatted text files */
601 remainder = NULL;
602 len=0;
603 while (1)
604 {
605 size_t malloc_chunk = 1024;
606 size_t buff_size = 0;
607 int ch;
608
609 ch = fgetc(fileHandle_p);
610 if( ch == '\r' )
611 {
612 continue;
613 }
614 if( len+1 > buff_size )
615 {
616 TMPMEMREALLOC(remainder,remainder,buff_size+malloc_chunk,char);
617 }
618 if( ch == EOF )
619 {
620 /* hit EOF */
621 remainder[len] = (char)0;
622 break;
623 }
624 remainder[len] = (char)ch;
625 len++;
626 }
627 #else
628 /* Read rest of file in one chunk, after using seek to find length */
629 {
630 long cur_pos, end_pos;
631
632 cur_pos = ftell(fileHandle_p);
633 fseek(fileHandle_p, 0L, SEEK_END);
634 end_pos = ftell(fileHandle_p);
635 fseek(fileHandle_p, (long)cur_pos, SEEK_SET);
636 remainder = TMPMEMALLOC(end_pos-cur_pos+1, char);
637 if (! feof(fileHandle_p))
638 {
639 scan_ret = fread(remainder, (size_t) end_pos-cur_pos,
640 sizeof(char), fileHandle_p);
641
642 FSCANF_CHECK(scan_ret, "Finley_Mesh_read")
643 remainder[end_pos-cur_pos] = 0;
644 }
645 }
646 #endif
647 len = strlen(remainder);
648 while ((len>1) && isspace(remainder[--len])) {remainder[len]=0;}
649 len = strlen(remainder);
650 TMPMEMREALLOC(remainder,remainder,len+1,char);
651 }
652 #ifdef PASO_MPI
653
654 len_i=(int) len;
655 MPI_Bcast (&len_i, 1, MPI_INT, 0, mpi_info->comm);
656 len=(size_t) len_i;
657 if (mpi_info->rank != 0) {
658 remainder = TMPMEMALLOC(len+1, char);
659 remainder[0] = 0;
660 }
661 MPI_Bcast (remainder, len+1, MPI_CHAR, 0, mpi_info->comm) !=
662 MPI_SUCCESS)
663 #endif
664
665 if (remainder[0]) {
666 ptr = remainder;
667 do {
668 sscanf(ptr, "%s %d\n", name, &tag_key);
669 if (*name) Finley_Mesh_addTagMap(mesh_p,name,tag_key);
670 ptr++;
671 } while(NULL != (ptr = strchr(ptr, '\n')) && *ptr);
672 TMPMEMFREE(remainder);
673 }
674 }
675
676 /* close file */
677 if (mpi_info->rank == 0) fclose(fileHandle_p);
678
679 /* resolve id's : */
680 /* rearrange elements: */
681 if (Finley_noError()) Finley_Mesh_resolveNodeIds(mesh_p);
682 if (Finley_noError()) Finley_Mesh_prepare(mesh_p, optimize);
683
684 /* that's it */
685 if (! Finley_noError()) {
686 Finley_Mesh_free(mesh_p);
687 }
688 /* free up memory */
689 Finley_ReferenceElementSet_dealloc(refPoints);
690 Finley_ReferenceElementSet_dealloc(refContactElements);
691 Finley_ReferenceElementSet_dealloc(refFaceElements);
692 Finley_ReferenceElementSet_dealloc(refElements);
693 Paso_MPIInfo_free( mpi_info );
694 return mesh_p;
695 }

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26