/[escript]/branches/domexper/dudley/src/Mesh_read.c
ViewVC logotype

Annotation of /branches/domexper/dudley/src/Mesh_read.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3221 - (hide annotations)
Wed Sep 29 01:00:21 2010 UTC (8 years, 7 months ago) by jfenwick
File MIME type: text/plain
File size: 21196 byte(s)
Comment stripping

1 jgs 150
2 ksteube 1312 /*******************************************************
3 ksteube 1811 *
4 jfenwick 2881 * Copyright (c) 2003-2010 by University of Queensland
5 ksteube 1811 * Earth Systems Science Computational Center (ESSCC)
6     * http://www.uq.edu.au/esscc
7     *
8     * Primary Business: Queensland, Australia
9     * Licensed under the Open Software License version 3.0
10     * http://www.opensource.org/licenses/osl-3.0.php
11     *
12     *******************************************************/
13 jgs 82
14 ksteube 1811
15 jgs 82 /**************************************************************/
16    
17 jfenwick 3086 /* Dudley: read mesh */
18 jgs 82
19     /**************************************************************/
20    
21 ksteube 1771 #include <ctype.h>
22 jgs 82 #include "Mesh.h"
23    
24 jfenwick 3086 #define FSCANF_CHECK(scan_ret, reason) { if (scan_ret == EOF) { perror(reason); Dudley_setError(IO_ERROR,"scan error while reading dudley file"); return NULL;} }
25 ksteube 1887
26 jgs 82
27 jfenwick 3086 Dudley_Mesh* Dudley_Mesh_read(char* fname,index_t order, index_t reduced_order, bool_t optimize)
28 ksteube 1312 {
29 gross 2748 Paso_MPIInfo *mpi_info = NULL;
30     dim_t numNodes, numDim, numEle, i0, i1;
31 jfenwick 3086 Dudley_Mesh *mesh_p=NULL;
32 gross 2748 char name[LenString_MAX],element_type[LenString_MAX],frm[20];
33     char error_msg[LenErrorMsg_MAX];
34     FILE *fileHandle_p = NULL;
35     ElementTypeId typeID=NoRef;
36 gross 2756 int scan_ret;
37 ksteube 1312
38 jfenwick 3086 Dudley_resetError();
39 gross 2748 mpi_info = Paso_MPIInfo_alloc( MPI_COMM_WORLD );
40 jgs 82
41 gross 2748 if (mpi_info->rank == 0) {
42     /* get file handle */
43     fileHandle_p = fopen(fname, "r");
44     if (fileHandle_p==NULL) {
45 jfenwick 3086 sprintf(error_msg,"Dudley_Mesh_read: Opening file %s for reading failed.",fname);
46     Dudley_setError(IO_ERROR,error_msg);
47 gross 2748 Paso_MPIInfo_free( mpi_info );
48     return NULL;
49 ksteube 1312 }
50 jgs 82
51 gross 2748 /* read header */
52     sprintf(frm,"%%%d[^\n]",LenString_MAX-1);
53     scan_ret = fscanf(fileHandle_p, frm, name);
54 jfenwick 3086 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
55 ksteube 1360
56 gross 2748 /* get the number of nodes */
57     scan_ret = fscanf(fileHandle_p, "%1d%*s %d\n", &numDim,&numNodes);
58 jfenwick 3086 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
59 gross 2385 }
60 ksteube 1360
61 gross 2748 #ifdef PASO_MPI
62     /* MPI Broadcast numDim, numNodes, name if there are multiple MPI procs*/
63 jfenwick 3126 if (mpi_info->size > 1)
64     {
65     int temp1[3];
66     if (mpi_info->rank == 0)
67     {
68     temp1[0] = numDim;
69     temp1[1] = numNodes;
70     temp1[2] = strlen(name) + 1;
71     } else {
72     temp1[0] = 0;
73     temp1[1] = 0;
74     temp1[2] = 1;
75     }
76     MPI_Bcast (temp1, 3, MPI_INT, 0, mpi_info->comm);
77     numDim = temp1[0];
78     numNodes = temp1[1];
79     MPI_Bcast (name, temp1[2], MPI_CHAR, 0, mpi_info->comm);
80     }
81 gross 2748 #endif
82 gross 2308
83 gross 2748 /* allocate mesh */
84 jfenwick 3086 mesh_p = Dudley_Mesh_alloc(name,numDim,mpi_info);
85 gross 2748
86 jfenwick 3126 if (Dudley_noError())
87     {
88     /* Each CPU will get at most chunkSize nodes so the message has to be sufficiently large */
89     int chunkSize = numNodes / mpi_info->size + 1, totalNodes=0, chunkNodes=0, nextCPU=1;
90     int *tempInts = TMPMEMALLOC(chunkSize*3+1, index_t); /* Stores the integer message data */
91     double *tempCoords = TMPMEMALLOC(chunkSize*numDim, double); /* Stores the double message data */
92 ksteube 1360
93 jfenwick 3126 /*
94     Read chunkSize nodes, send it in a chunk to worker CPU which copies chunk into its local mesh_p
95     It doesn't matter that a CPU has the wrong nodes for its elements, this is sorted out later
96     First chunk sent to CPU 1, second to CPU 2, ...
97     Last chunk stays on CPU 0 (the master)
98     The three columns of integers (Id, gDOF, Tag) are gathered into a single array tempInts and sent
99     together in a single MPI message
100     */
101 ksteube 1360
102 jfenwick 3126 if (mpi_info->rank == 0) /* Master */
103     {
104     for (;;) /* Infinite loop */
105     {
106     #pragma omp parallel for private (i0) schedule(static)
107     for (i0=0; i0<chunkSize*3+1; i0++) tempInts[i0] = -1;
108    
109     #pragma omp parallel for private (i0) schedule(static)
110     for (i0=0; i0<chunkSize*numDim; i0++) tempCoords[i0] = -1.0;
111    
112     chunkNodes = 0;
113     for (i1=0; i1<chunkSize; i1++)
114     {
115     if (totalNodes >= numNodes) break; /* End of inner loop */
116     if (1 == numDim) {
117     scan_ret = fscanf(fileHandle_p, "%d %d %d %le\n",
118     &tempInts[0+i1], &tempInts[chunkSize+i1], &tempInts[chunkSize*2+i1],
119     &tempCoords[i1*numDim+0]);
120     FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
121     }
122     if (2 == numDim)
123     {
124     scan_ret = fscanf(fileHandle_p, "%d %d %d %le %le\n",
125     &tempInts[0+i1], &tempInts[chunkSize+i1], &tempInts[chunkSize*2+i1],
126     &tempCoords[i1*numDim+0], &tempCoords[i1*numDim+1]);
127     FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
128     }
129     if (3 == numDim)
130     {
131     scan_ret = fscanf(fileHandle_p, "%d %d %d %le %le %le\n",
132     &tempInts[0+i1], &tempInts[chunkSize+i1], &tempInts[chunkSize*2+i1],
133     &tempCoords[i1*numDim+0], &tempCoords[i1*numDim+1], &tempCoords[i1*numDim+2]);
134     FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
135     }
136     totalNodes++; /* When do we quit the infinite loop? */
137     chunkNodes++; /* How many nodes do we actually have in this chunk? It may be smaller than chunkSize. */
138     }
139     if (chunkNodes > chunkSize)
140     {
141     Dudley_setError(PASO_MPI_ERROR, "Dudley_Mesh_read: error reading chunks of mesh, data too large for message size");
142     return NULL;
143     }
144     #ifdef PASO_MPI
145     /* Eventually we'll send chunkSize nodes to each CPU numbered 1 ... mpi_info->size-1, here goes one of them */
146     if (nextCPU < mpi_info->size)
147     {
148     tempInts[chunkSize*3] = chunkNodes; /* The message has one more int to send chunkNodes */
149     MPI_Send(tempInts, chunkSize*3+1, MPI_INT, nextCPU, 81720, mpi_info->comm);
150     MPI_Send(tempCoords, chunkSize*numDim, MPI_DOUBLE, nextCPU, 81721, mpi_info->comm);
151     }
152     #endif
153     nextCPU++;
154     /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
155     if (nextCPU > mpi_info->size) break; /* End infinite loop */
156     } /* Infinite loop */
157     } /* End master */
158     else /* Worker */
159     {
160     #ifdef PASO_MPI
161     /* Each worker receives two messages */
162     MPI_Status status;
163     MPI_Recv(tempInts, chunkSize*3+1, MPI_INT, 0, 81720, mpi_info->comm, &status);
164     MPI_Recv(tempCoords, chunkSize*numDim, MPI_DOUBLE, 0, 81721, mpi_info->comm, &status);
165     chunkNodes = tempInts[chunkSize*3]; /* How many nodes are in this workers chunk? */
166     #endif
167     } /* Worker */
168 ksteube 1402
169 jfenwick 3126 /* Copy node data from tempMem to mesh_p */
170 jfenwick 3086 Dudley_NodeFile_allocTable(mesh_p->Nodes, chunkNodes);
171 jfenwick 3126 if (Dudley_noError())
172     {
173     #pragma omp parallel for private (i0, i1) schedule(static)
174     for (i0=0; i0<chunkNodes; i0++)
175     {
176     mesh_p->Nodes->Id[i0] = tempInts[0+i0];
177     mesh_p->Nodes->globalDegreesOfFreedom[i0] = tempInts[chunkSize+i0];
178     mesh_p->Nodes->Tag[i0] = tempInts[chunkSize*2+i0];
179     for (i1=0; i1<numDim; i1++)
180     {
181     mesh_p->Nodes->Coordinates[INDEX2(i1,i0,numDim)] = tempCoords[i0*numDim+i1];
182 gross 2748 }
183 jfenwick 3126 }
184 ksteube 1360 }
185 jfenwick 3126 TMPMEMFREE(tempInts);
186     TMPMEMFREE(tempCoords);
187     }
188    
189     /* *********************************** read elements ******************************************************************/
190     if (Dudley_noError())
191     {
192     /* Read the element typeID */
193     if (mpi_info->rank == 0)
194     {
195     scan_ret = fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
196     FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
197 jfenwick 3220 typeID=eltTypeFromString(element_type);
198 gross 2748 }
199 jfenwick 3126 #ifdef PASO_MPI
200     if (mpi_info->size > 1)
201     {
202     int temp1[2], mpi_error;
203     temp1[0] = (int) typeID;
204     temp1[1] = numEle;
205     mpi_error = MPI_Bcast (temp1, 2, MPI_INT, 0, mpi_info->comm);
206     if (mpi_error != MPI_SUCCESS)
207     {
208     Dudley_setError(PASO_MPI_ERROR, "Dudley_Mesh_read: broadcast of Element typeID failed");
209     return NULL;
210     }
211     typeID = (ElementTypeId) temp1[0];
212     numEle = temp1[1];
213     }
214     #endif
215     if (typeID==NoRef) {
216     sprintf(error_msg, "Dudley_Mesh_read: Unidentified element type %s", element_type);
217     Dudley_setError(VALUE_ERROR, error_msg);
218     }
219     }
220 ksteube 1360
221 jfenwick 3126 /* Allocate the ElementFile */
222     if (Dudley_noError()) {
223 jfenwick 3216 mesh_p->Elements=Dudley_ElementFile_alloc(typeID, mpi_info);
224     numNodes = mesh_p->Elements->numNodes; /* New meaning for numNodes: num nodes per element */
225 jfenwick 3126 }
226 ksteube 1360
227 jfenwick 3126 /* *************************** Read the element data *******************************************************************/
228     if (Dudley_noError())
229     {
230     int chunkSize = numEle / mpi_info->size + 1, totalEle=0, nextCPU=1, chunkEle=0;
231     int *tempInts = TMPMEMALLOC(chunkSize*(2+numNodes)+1, index_t); /* Store Id + Tag + node list (+ one int at end for chunkEle) */
232     /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
233     if (mpi_info->rank == 0) /* Master */
234     {
235     for (;;) /* Infinite loop */
236     {
237     #pragma omp parallel for private (i0) schedule(static)
238     for (i0=0; i0<chunkSize*(2+numNodes)+1; i0++) tempInts[i0] = -1;
239     chunkEle = 0;
240     for (i0=0; i0<chunkSize; i0++)
241     {
242     if (totalEle >= numEle) break; /* End inner loop */
243     scan_ret = fscanf(fileHandle_p, "%d %d", &tempInts[i0*(2+numNodes)+0], &tempInts[i0*(2+numNodes)+1]);
244 jfenwick 3086 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
245 jfenwick 3126 for (i1 = 0; i1 < numNodes; i1++)
246     {
247     scan_ret = fscanf(fileHandle_p, " %d", &tempInts[i0*(2+numNodes)+2+i1]);
248     FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
249     }
250     scan_ret = fscanf(fileHandle_p, "\n");
251     FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
252     totalEle++;
253     chunkEle++;
254 gross 2748 }
255     #ifdef PASO_MPI
256 jfenwick 3126 /* Eventually we'll send chunk of elements to each CPU except 0 itself, here goes one of them */
257     if (nextCPU < mpi_info->size) {
258     tempInts[chunkSize*(2+numNodes)] = chunkEle;
259     MPI_Send(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, nextCPU, 81722, mpi_info->comm);
260     }
261 gross 2748 #endif
262 jfenwick 3126 nextCPU++;
263     /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
264     if (nextCPU > mpi_info->size) break; /* End infinite loop */
265     } /* Infinite loop */
266     } /* End master */
267     else
268     { /* Worker */
269     #ifdef PASO_MPI
270     /* Each worker receives one message */
271     MPI_Status status;
272     MPI_Recv(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, 0, 81722, mpi_info->comm, &status);
273     chunkEle = tempInts[chunkSize*(2+numNodes)];
274     #endif
275     } /* Worker */
276     Dudley_ElementFile_allocTable(mesh_p->Elements, chunkEle);
277 gross 2748
278 jfenwick 3126 /* Copy Element data from tempInts to mesh_p */
279     if (Dudley_noError())
280     {
281     mesh_p->Elements->minColor=0;
282     mesh_p->Elements->maxColor=chunkEle-1;
283     #pragma omp parallel for private (i0, i1) schedule(static)
284     for (i0=0; i0<chunkEle; i0++)
285     {
286     mesh_p->Elements->Id[i0] = tempInts[i0*(2+numNodes)+0];
287     mesh_p->Elements->Tag[i0] = tempInts[i0*(2+numNodes)+1];
288     mesh_p->Elements->Owner[i0] =mpi_info->rank;
289     mesh_p->Elements->Color[i0] = i0;
290     for (i1 = 0; i1 < numNodes; i1++)
291     {
292     mesh_p->Elements->Nodes[INDEX2(i1, i0, numNodes)] = tempInts[i0*(2+numNodes)+2+i1];
293 gross 2748 }
294     }
295 jfenwick 3126 }
296     TMPMEMFREE(tempInts);
297     }
298     /* ******************** end of Read the element data ******************************************************/
299 ksteube 1732
300 gross 2748 /* ********************* read face elements ***************************************************************/
301 jfenwick 3126 if (Dudley_noError())
302     {
303     /* Read the element typeID */
304     if (mpi_info->rank == 0)
305     {
306     scan_ret = fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
307     FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
308 jfenwick 3220 typeID=eltTypeFromString(element_type);
309 jfenwick 3126 }
310     #ifdef PASO_MPI
311     if (mpi_info->size > 1)
312     {
313     int temp1[2];
314     temp1[0] = (int) typeID;
315     temp1[1] = numEle;
316     MPI_Bcast (temp1, 2, MPI_INT, 0, mpi_info->comm);
317     typeID = (ElementTypeId) temp1[0];
318     numEle = temp1[1];
319     }
320     #endif
321     if (typeID==NoRef)
322     {
323 jfenwick 3086 sprintf(error_msg, "Dudley_Mesh_read: Unidentified element type %s", element_type);
324     Dudley_setError(VALUE_ERROR, error_msg);
325 gross 2748 }
326 jfenwick 3126 if (Dudley_noError())
327     {
328     /* Allocate the ElementFile */
329 jfenwick 3216 mesh_p->FaceElements=Dudley_ElementFile_alloc(typeID, mpi_info);
330     numNodes = mesh_p->FaceElements->numNodes; /* New meaning for numNodes: num nodes per element */
331 ksteube 1732 }
332 jfenwick 3126 }
333 gross 2748 /* ********************** Read the face element data ********************************************************* */
334 ksteube 1732
335 jfenwick 3126 if (Dudley_noError())
336     {
337     int chunkSize = numEle / mpi_info->size + 1, totalEle=0, nextCPU=1, chunkEle=0;
338 gross 2748 int *tempInts = TMPMEMALLOC(chunkSize*(2+numNodes)+1, index_t); /* Store Id + Tag + node list (+ one int at end for chunkEle) */
339 jfenwick 3126 /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
340     if (mpi_info->rank == 0) /* Master */
341     {
342     for (;;) /* Infinite loop */
343     {
344     #pragma omp parallel for private (i0) schedule(static)
345     for (i0=0; i0<chunkSize*(2+numNodes)+1; i0++) tempInts[i0] = -1;
346     chunkEle = 0;
347     for (i0=0; i0<chunkSize; i0++)
348     {
349     if (totalEle >= numEle) break; /* End inner loop */
350     scan_ret = fscanf(fileHandle_p, "%d %d", &tempInts[i0*(2+numNodes)+0], &tempInts[i0*(2+numNodes)+1]);
351     FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
352     for (i1 = 0; i1 < numNodes; i1++)
353     {
354     scan_ret = fscanf(fileHandle_p, " %d", &tempInts[i0*(2+numNodes)+2+i1]);
355     FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
356 gross 2748 }
357 jfenwick 3126 scan_ret = fscanf(fileHandle_p, "\n");
358 jfenwick 3086 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
359 jfenwick 3126 totalEle++;
360     chunkEle++;
361 gross 2748 }
362     #ifdef PASO_MPI
363 jfenwick 3126 /* Eventually we'll send chunk of elements to each CPU except 0 itself, here goes one of them */
364     if (nextCPU < mpi_info->size)
365     {
366     tempInts[chunkSize*(2+numNodes)] = chunkEle;
367     MPI_Send(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, nextCPU, 81723, mpi_info->comm);
368     }
369 gross 2748 #endif
370 jfenwick 3126 nextCPU++;
371     /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
372     if (nextCPU > mpi_info->size) break; /* End infinite loop */
373     } /* Infinite loop */
374     } /* End master */
375     else /* Worker */
376     {
377     #ifdef PASO_MPI
378     /* Each worker receives one message */
379     MPI_Status status;
380     MPI_Recv(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, 0, 81723, mpi_info->comm, &status);
381     chunkEle = tempInts[chunkSize*(2+numNodes)];
382     #endif
383     } /* Worker */
384     Dudley_ElementFile_allocTable(mesh_p->FaceElements, chunkEle);
385     if (Dudley_noError())
386     {
387 gross 2748 /* Copy Element data from tempInts to mesh_p */
388 jfenwick 3126 mesh_p->FaceElements->minColor=0;
389     mesh_p->FaceElements->maxColor=chunkEle-1;
390     #pragma omp parallel for private (i0, i1)
391     for (i0=0; i0<chunkEle; i0++)
392     {
393     mesh_p->FaceElements->Id[i0] = tempInts[i0*(2+numNodes)+0];
394     mesh_p->FaceElements->Tag[i0] = tempInts[i0*(2+numNodes)+1];
395     mesh_p->FaceElements->Owner[i0] =mpi_info->rank;
396     mesh_p->FaceElements->Color[i0] = i0;
397     for (i1 = 0; i1 < numNodes; i1++)
398     {
399     mesh_p->FaceElements->Nodes[INDEX2(i1, i0, numNodes)] = tempInts[i0*(2+numNodes)+2+i1];
400     }
401     }
402 ksteube 1732 }
403    
404 jfenwick 3126 TMPMEMFREE(tempInts);
405     }
406     /* ************************************* end of Read the face element data *************************************** */
407 ksteube 1732
408 jfenwick 3126 /* ********************************* read nodal elements ****************************************************** */
409 gross 2748 /* ******************************* Read the element typeID */
410 jfenwick 3126 if (Dudley_noError())
411     {
412     if (mpi_info->rank == 0)
413     {
414     scan_ret = fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
415     FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
416 jfenwick 3220 typeID=eltTypeFromString(element_type);
417 jfenwick 3126 }
418     #ifdef PASO_MPI
419     if (mpi_info->size > 1)
420     {
421     int temp1[2];
422     temp1[0] = (int) typeID;
423     temp1[1] = numEle;
424     MPI_Bcast (temp1, 2, MPI_INT, 0, mpi_info->comm);
425     typeID = (ElementTypeId) temp1[0];
426     numEle = temp1[1];
427     }
428     #endif
429     if (typeID==NoRef)
430     {
431     sprintf(error_msg, "Dudley_Mesh_read: Unidentified element type %s", element_type);
432     Dudley_setError(VALUE_ERROR, error_msg);
433     }
434     }
435     if (Dudley_noError())
436     {
437     /* Allocate the ElementFile */
438 jfenwick 3216 mesh_p->Points=Dudley_ElementFile_alloc(typeID, mpi_info);
439     numNodes = mesh_p->Points->numNodes; /* New meaning for numNodes: num nodes per element */
440 jfenwick 3126 }
441     /********************************** Read the nodal element data **************************************************/
442     if (Dudley_noError())
443     {
444     int chunkSize = numEle / mpi_info->size + 1, totalEle=0, nextCPU=1, chunkEle=0;
445     int *tempInts = TMPMEMALLOC(chunkSize*(2+numNodes)+1, index_t); /* Store Id + Tag + node list (+ one int at end for chunkEle) */
446     /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
447     if (mpi_info->rank == 0) /* Master */
448     {
449     for (;;) /* Infinite loop */
450     {
451     #pragma omp parallel for private (i0) schedule(static)
452     for (i0=0; i0<chunkSize*(2+numNodes)+1; i0++) tempInts[i0] = -1;
453     chunkEle = 0;
454     for (i0=0; i0<chunkSize; i0++)
455     {
456     if (totalEle >= numEle) break; /* End inner loop */
457     scan_ret = fscanf(fileHandle_p, "%d %d", &tempInts[i0*(2+numNodes)+0], &tempInts[i0*(2+numNodes)+1]);
458 jfenwick 3086 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
459 jfenwick 3126 for (i1 = 0; i1 < numNodes; i1++)
460     {
461     scan_ret = fscanf(fileHandle_p, " %d", &tempInts[i0*(2+numNodes)+2+i1]);
462     FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
463     }
464     scan_ret = fscanf(fileHandle_p, "\n");
465     FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
466     totalEle++;
467     chunkEle++;
468 gross 2748 }
469     #ifdef PASO_MPI
470 jfenwick 3126 /* Eventually we'll send chunk of elements to each CPU except 0 itself, here goes one of them */
471     if (nextCPU < mpi_info->size)
472     {
473     tempInts[chunkSize*(2+numNodes)] = chunkEle;
474     MPI_Send(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, nextCPU, 81725, mpi_info->comm);
475     }
476 gross 2748 #endif
477 jfenwick 3126 nextCPU++;
478     /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
479     if (nextCPU > mpi_info->size) break; /* End infinite loop */
480     } /* Infinite loop */
481     } /* End master */
482     else /* Worker */
483     {
484     #ifdef PASO_MPI
485     /* Each worker receives one message */
486     MPI_Status status;
487     MPI_Recv(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, 0, 81725, mpi_info->comm, &status);
488     chunkEle = tempInts[chunkSize*(2+numNodes)];
489     #endif
490     } /* Worker */
491 gross 2748
492 jfenwick 3126 /* Copy Element data from tempInts to mesh_p */
493     Dudley_ElementFile_allocTable(mesh_p->Points, chunkEle);
494     if (Dudley_noError())
495     {
496     mesh_p->Points->minColor=0;
497     mesh_p->Points->maxColor=chunkEle-1;
498     #pragma omp parallel for private (i0, i1) schedule(static)
499     for (i0=0; i0<chunkEle; i0++)
500     {
501     mesh_p->Points->Id[i0] = tempInts[i0*(2+numNodes)+0];
502     mesh_p->Points->Tag[i0] = tempInts[i0*(2+numNodes)+1];
503     mesh_p->Points->Owner[i0] =mpi_info->rank;
504     mesh_p->Points->Color[i0] = i0;
505     for (i1 = 0; i1 < numNodes; i1++)
506     {
507     mesh_p->Points->Nodes[INDEX2(i1, i0, numNodes)] = tempInts[i0*(2+numNodes)+2+i1];
508     }
509     }
510 ksteube 1732 }
511    
512 jfenwick 3126 TMPMEMFREE(tempInts);
513     } /* ******************************** end of Read the nodal element data ************************************ */
514     /****************** get the name tags *****************************************/
515     if (Dudley_noError())
516     {
517     char *remainder=0, *ptr;
518     size_t len=0;
519     #ifdef PASO_MPI
520     int len_i;
521     #endif
522     int tag_key;
523     if (mpi_info->rank == 0) /* Master */
524     {
525     /* Read the word 'Tag' */
526     if (! feof(fileHandle_p))
527     {
528     scan_ret = fscanf(fileHandle_p, "%s\n", name);
529     FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
530     }
531     #if defined(_WIN32) /* windows ftell lies on unix formatted text files */
532     remainder = NULL;
533     len=0;
534     while (1)
535     {
536     size_t malloc_chunk = 1024;
537     size_t buff_size = 0;
538     int ch;
539     ch = fgetc(fileHandle_p);
540     if( ch == '\r' )
541     {
542     continue;
543 gross 2748 }
544 jfenwick 3126 if( len+1 > buff_size )
545     {
546     TMPMEMREALLOC(remainder,remainder,buff_size+malloc_chunk,char);
547     }
548     if( ch == EOF )
549     {
550     /* hit EOF */
551     remainder[len] = (char)0;
552     break;
553     }
554     remainder[len] = (char)ch;
555     len++;
556 gross 2748 }
557 jfenwick 3126 #else
558     /* Read rest of file in one chunk, after using seek to find length */
559     {
560     long cur_pos, end_pos;
561     cur_pos = ftell(fileHandle_p);
562     fseek(fileHandle_p, 0L, SEEK_END);
563     end_pos = ftell(fileHandle_p);
564     fseek(fileHandle_p, (long)cur_pos, SEEK_SET);
565     remainder = TMPMEMALLOC(end_pos-cur_pos+1, char);
566     if (! feof(fileHandle_p))
567     {
568     scan_ret = fread(remainder, (size_t) end_pos-cur_pos,
569     sizeof(char), fileHandle_p);
570 jfenwick 3086 FSCANF_CHECK(scan_ret, "Dudley_Mesh_read")
571 jfenwick 3126 remainder[end_pos-cur_pos] = 0;
572 gross 2748 }
573 jfenwick 3126 }
574     #endif
575     len = strlen(remainder);
576     while ((len>1) && isspace(remainder[--len])) {remainder[len]=0;}
577     len = strlen(remainder);
578     TMPMEMREALLOC(remainder,remainder,len+1,char);
579 caltinay 2752 } /* Master */
580 jfenwick 3126 #ifdef PASO_MPI
581 phornby 1919
582 jfenwick 3126 len_i=(int) len;
583     MPI_Bcast (&len_i, 1, MPI_INT, 0, mpi_info->comm);
584     len=(size_t) len_i;
585     if (mpi_info->rank != 0)
586     {
587     remainder = TMPMEMALLOC(len+1, char);
588     remainder[0] = 0;
589     }
590     if (MPI_Bcast (remainder, len+1, MPI_CHAR, 0, mpi_info->comm)!=MPI_SUCCESS)
591     Dudley_setError(PASO_MPI_ERROR, "Dudley_Mesh_read: broadcast of remainder failed");
592     #endif
593 gross 2308
594 jfenwick 3126 if (remainder[0])
595     {
596     ptr = remainder;
597     do
598     {
599     sscanf(ptr, "%s %d\n", name, &tag_key);
600     if (*name) Dudley_Mesh_addTagMap(mesh_p,name,tag_key);
601     ptr++;
602     } while(NULL != (ptr = strchr(ptr, '\n')) && *ptr);
603 ksteube 1754 }
604 jfenwick 3126 if (remainder)
605     TMPMEMFREE(remainder);
606     }
607 ksteube 1732
608 jfenwick 3126 /* close file */
609     if (mpi_info->rank == 0) fclose(fileHandle_p);
610 ksteube 1402
611 jfenwick 3126 /* resolve id's : */
612     /* rearrange elements: */
613     if (Dudley_noError()) Dudley_Mesh_resolveNodeIds(mesh_p);
614     if (Dudley_noError()) Dudley_Mesh_prepare(mesh_p, optimize);
615 ksteube 1402
616 jfenwick 3126 /* that's it */
617     if (! Dudley_noError())
618     {
619     Dudley_Mesh_free(mesh_p);
620     }
621     /* free up memory */
622     Paso_MPIInfo_free( mpi_info );
623     return mesh_p;
624 ksteube 1360 }
625 caltinay 2752

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26