/[escript]/branches/doubleplusgood/finley/src/Mesh_read.cpp
ViewVC logotype

Contents of /branches/doubleplusgood/finley/src/Mesh_read.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1739 - (show annotations)
Fri Aug 29 06:19:53 2008 UTC (11 years, 2 months ago) by gross
Original Path: trunk/finley/src/Mesh_read.c
File MIME type: text/plain
File size: 34286 byte(s)
Fix in the MPI mesh reader: Owner need to be set.


1
2 /* $Id$ */
3
4 /*******************************************************
5 *
6 * Copyright 2003-2007 by ACceSS MNRF
7 * Copyright 2007 by University of Queensland
8 *
9 * http://esscc.uq.edu.au
10 * Primary Business: Queensland, Australia
11 * Licensed under the Open Software License version 3.0
12 * http://www.opensource.org/licenses/osl-3.0.php
13 *
14 *******************************************************/
15
16 /**************************************************************/
17
18 /* Finley: read mesh */
19
20 /**************************************************************/
21
22 #include "Mesh.h"
23
24 /**************************************************************/
25
26 /* reads a mesh from a Finley file of name fname */
27
28 Finley_Mesh* Finley_Mesh_read(char* fname,index_t order, index_t reduced_order, bool_t optimize)
29
30 {
31
32 Paso_MPIInfo *mpi_info = Paso_MPIInfo_alloc( MPI_COMM_WORLD );
33 dim_t numNodes, numDim, numEle, i0, i1;
34 index_t tag_key;
35 Finley_Mesh *mesh_p=NULL;
36 char name[LenString_MAX],element_type[LenString_MAX],frm[20];
37 char error_msg[LenErrorMsg_MAX];
38 double time0=Finley_timer();
39 FILE *fileHandle_p = NULL;
40 ElementTypeId typeID, faceTypeID, contactTypeID, pointTypeID;
41
42 Finley_resetError();
43
44 if (mpi_info->size > 1) {
45 Finley_setError(SYSTEM_ERROR,"Finley_Mesh_read: MPI is not suporrted yet.");
46 } else {
47 /* get file handle */
48 fileHandle_p = fopen(fname, "r");
49 if (fileHandle_p==NULL) {
50 sprintf(error_msg,"Finley_Mesh_read: Opening file %s for reading failed.",fname);
51 Finley_setError(IO_ERROR,error_msg);
52 Paso_MPIInfo_free( mpi_info );
53 return NULL;
54 }
55
56 /* read header */
57 sprintf(frm,"%%%d[^\n]",LenString_MAX-1);
58 fscanf(fileHandle_p, frm, name);
59
60 /* get the nodes */
61
62 fscanf(fileHandle_p, "%1d%*s %d\n", &numDim,&numNodes);
63 /* allocate mesh */
64 mesh_p = Finley_Mesh_alloc(name,numDim,order,reduced_order,mpi_info);
65 if (Finley_noError()) {
66
67 /* read nodes */
68 Finley_NodeFile_allocTable(mesh_p->Nodes, numNodes);
69 if (Finley_noError()) {
70 if (1 == numDim) {
71 for (i0 = 0; i0 < numNodes; i0++)
72 fscanf(fileHandle_p, "%d %d %d %le\n", &mesh_p->Nodes->Id[i0],
73 &mesh_p->Nodes->globalDegreesOfFreedom[i0], &mesh_p->Nodes->Tag[i0],
74 &mesh_p->Nodes->Coordinates[INDEX2(0,i0,numDim)]);
75 } else if (2 == numDim) {
76 for (i0 = 0; i0 < numNodes; i0++)
77 fscanf(fileHandle_p, "%d %d %d %le %le\n", &mesh_p->Nodes->Id[i0],
78 &mesh_p->Nodes->globalDegreesOfFreedom[i0], &mesh_p->Nodes->Tag[i0],
79 &mesh_p->Nodes->Coordinates[INDEX2(0,i0,numDim)],
80 &mesh_p->Nodes->Coordinates[INDEX2(1,i0,numDim)]);
81 } else if (3 == numDim) {
82 for (i0 = 0; i0 < numNodes; i0++)
83 fscanf(fileHandle_p, "%d %d %d %le %le %le\n", &mesh_p->Nodes->Id[i0],
84 &mesh_p->Nodes->globalDegreesOfFreedom[i0], &mesh_p->Nodes->Tag[i0],
85 &mesh_p->Nodes->Coordinates[INDEX2(0,i0,numDim)],
86 &mesh_p->Nodes->Coordinates[INDEX2(1,i0,numDim)],
87 &mesh_p->Nodes->Coordinates[INDEX2(2,i0,numDim)]);
88 } /* if else else */
89 }
90 /* read elements */
91 if (Finley_noError()) {
92
93 fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
94 typeID=Finley_RefElement_getTypeId(element_type);
95 if (typeID==NoType) {
96 sprintf(error_msg,"Finley_Mesh_read :Unidentified element type %s",element_type);
97 Finley_setError(VALUE_ERROR,error_msg);
98 } else {
99 /* read the elements */
100 mesh_p->Elements=Finley_ElementFile_alloc(typeID,mesh_p->order, mesh_p->reduced_order, mpi_info);
101 if (Finley_noError()) {
102 Finley_ElementFile_allocTable(mesh_p->Elements, numEle);
103 mesh_p->Elements->minColor=0;
104 mesh_p->Elements->maxColor=numEle-1;
105 if (Finley_noError()) {
106 for (i0 = 0; i0 < numEle; i0++) {
107 fscanf(fileHandle_p, "%d %d", &mesh_p->Elements->Id[i0], &mesh_p->Elements->Tag[i0]);
108 mesh_p->Elements->Color[i0]=i0;
109 mesh_p->Elements->Owner[i0]=0;
110 for (i1 = 0; i1 < mesh_p->Elements->ReferenceElement->Type->numNodes; i1++) {
111 fscanf(fileHandle_p, " %d",
112 &mesh_p->Elements->Nodes[INDEX2(i1, i0, mesh_p->Elements->ReferenceElement->Type->numNodes)]);
113 } /* for i1 */
114 fscanf(fileHandle_p, "\n");
115 } /* for i0 */
116 }
117 }
118 }
119 }
120 /* get the face elements */
121 if (Finley_noError()) {
122 fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
123 faceTypeID=Finley_RefElement_getTypeId(element_type);
124 if (faceTypeID==NoType) {
125 sprintf(error_msg,"Finley_Mesh_read :Unidentified element type %s for face elements",element_type);
126 Finley_setError(VALUE_ERROR,error_msg);
127 } else {
128 mesh_p->FaceElements=Finley_ElementFile_alloc(faceTypeID,mesh_p->order, mesh_p->reduced_order, mpi_info);
129 if (Finley_noError()) {
130 Finley_ElementFile_allocTable(mesh_p->FaceElements, numEle);
131 if (Finley_noError()) {
132 mesh_p->FaceElements->minColor=0;
133 mesh_p->FaceElements->maxColor=numEle-1;
134 for (i0 = 0; i0 < numEle; i0++) {
135 fscanf(fileHandle_p, "%d %d", &mesh_p->FaceElements->Id[i0], &mesh_p->FaceElements->Tag[i0]);
136 mesh_p->FaceElements->Color[i0]=i0;
137 mesh_p->FaceElements->Owner[i0]=0;
138 for (i1 = 0; i1 < mesh_p->FaceElements->ReferenceElement->Type->numNodes; i1++) {
139 fscanf(fileHandle_p, " %d",
140 &mesh_p->FaceElements->Nodes[INDEX2(i1, i0, mesh_p->FaceElements->ReferenceElement->Type->numNodes)]);
141 } /* for i1 */
142 fscanf(fileHandle_p, "\n");
143 } /* for i0 */
144 }
145 }
146 }
147 }
148 /* get the Contact face element */
149 if (Finley_noError()) {
150 fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
151 contactTypeID=Finley_RefElement_getTypeId(element_type);
152 if (contactTypeID==NoType) {
153 sprintf(error_msg,"Finley_Mesh_read: Unidentified element type %s for contact elements",element_type);
154 Finley_setError(VALUE_ERROR,error_msg);
155 } else {
156 mesh_p->ContactElements=Finley_ElementFile_alloc(contactTypeID,mesh_p->order, mesh_p->reduced_order, mpi_info);
157 if (Finley_noError()) {
158 Finley_ElementFile_allocTable(mesh_p->ContactElements, numEle);
159 if (Finley_noError()) {
160 mesh_p->ContactElements->minColor=0;
161 mesh_p->ContactElements->maxColor=numEle-1;
162 for (i0 = 0; i0 < numEle; i0++) {
163 fscanf(fileHandle_p, "%d %d", &mesh_p->ContactElements->Id[i0], &mesh_p->ContactElements->Tag[i0]);
164 mesh_p->ContactElements->Color[i0]=i0;
165 mesh_p->ContactElements->Owner[i0]=0;
166 for (i1 = 0; i1 < mesh_p->ContactElements->ReferenceElement->Type->numNodes; i1++) {
167 fscanf(fileHandle_p, " %d",
168 &mesh_p->ContactElements->Nodes[INDEX2(i1, i0, mesh_p->ContactElements->ReferenceElement->Type->numNodes)]);
169 } /* for i1 */
170 fscanf(fileHandle_p, "\n");
171 } /* for i0 */
172 }
173 }
174 }
175 }
176 /* get the nodal element */
177 if (Finley_noError()) {
178 fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
179 pointTypeID=Finley_RefElement_getTypeId(element_type);
180 if (pointTypeID==NoType) {
181 sprintf(error_msg,"Finley_Mesh_read: Unidentified element type %s for points",element_type);
182 Finley_setError(VALUE_ERROR,error_msg);
183 }
184 mesh_p->Points=Finley_ElementFile_alloc(pointTypeID,mesh_p->order, mesh_p->reduced_order, mpi_info);
185 if (Finley_noError()) {
186 Finley_ElementFile_allocTable(mesh_p->Points, numEle);
187 if (Finley_noError()) {
188 mesh_p->Points->minColor=0;
189 mesh_p->Points->maxColor=numEle-1;
190 for (i0 = 0; i0 < numEle; i0++) {
191 fscanf(fileHandle_p, "%d %d", &mesh_p->Points->Id[i0], &mesh_p->Points->Tag[i0]);
192 mesh_p->Points->Color[i0]=i0;
193 mesh_p->Points->Owner[i0]=0;
194 for (i1 = 0; i1 < mesh_p->Points->ReferenceElement->Type->numNodes; i1++) {
195 fscanf(fileHandle_p, " %d",
196 &mesh_p->Points->Nodes[INDEX2(i1, i0, mesh_p->Points->ReferenceElement->Type->numNodes)]);
197 } /* for i1 */
198 fscanf(fileHandle_p, "\n");
199 } /* for i0 */
200 }
201 }
202 }
203 /* get the name tags */
204 if (Finley_noError()) {
205 if (feof(fileHandle_p) == 0) {
206 fscanf(fileHandle_p, "%s\n", name);
207 while (feof(fileHandle_p) == 0) {
208 fscanf(fileHandle_p, "%s %d\n", name, &tag_key);
209 Finley_Mesh_addTagMap(mesh_p,name,tag_key);
210 }
211 }
212 }
213 }
214 /* close file */
215 fclose(fileHandle_p);
216
217 /* resolve id's : */
218 /* rearrange elements: */
219
220 if (Finley_noError()) Finley_Mesh_resolveNodeIds(mesh_p);
221 if (Finley_noError()) Finley_Mesh_prepare(mesh_p, optimize);
222
223 /* that's it */
224 #ifdef Finley_TRACE
225 printf("timing: reading mesh: %.4e sec\n",Finley_timer()-time0);
226 #endif
227 }
228
229 /* that's it */
230 if (! Finley_noError()) {
231 Finley_Mesh_free(mesh_p);
232 }
233 Paso_MPIInfo_free( mpi_info );
234 return mesh_p;
235 }
236
237 Finley_Mesh* Finley_Mesh_read_MPI(char* fname,index_t order, index_t reduced_order, bool_t optimize)
238
239 {
240
241 Paso_MPIInfo *mpi_info = Paso_MPIInfo_alloc( MPI_COMM_WORLD );
242 dim_t numNodes, numDim, numEle, i0, i1;
243 Finley_Mesh *mesh_p=NULL;
244 char name[LenString_MAX],element_type[LenString_MAX],frm[20];
245 char error_msg[LenErrorMsg_MAX];
246 double time0=Finley_timer();
247 FILE *fileHandle_p = NULL;
248 ElementTypeId typeID, faceTypeID, contactTypeID, pointTypeID;
249 index_t tag_key;
250
251 Finley_resetError();
252
253 if (mpi_info->rank == 0) {
254 /* get file handle */
255 fileHandle_p = fopen(fname, "r");
256 if (fileHandle_p==NULL) {
257 sprintf(error_msg,"Finley_Mesh_read: Opening file %s for reading failed.",fname);
258 Finley_setError(IO_ERROR,error_msg);
259 Paso_MPIInfo_free( mpi_info );
260 return NULL;
261 }
262
263 /* read header */
264 sprintf(frm,"%%%d[^\n]",LenString_MAX-1);
265 fscanf(fileHandle_p, frm, name);
266
267 /* get the number of nodes */
268 fscanf(fileHandle_p, "%1d%*s %d\n", &numDim,&numNodes);
269 }
270
271 #ifdef PASO_MPI
272 /* MPI Broadcast numDim, numNodes, name if there are multiple MPI procs*/
273 if (mpi_info->size > 1) {
274 int temp1[3], error_code;
275 temp1[0] = numDim;
276 temp1[1] = numNodes;
277 temp1[2] = strlen(name) + 1;
278 error_code = MPI_Bcast (temp1, 3, MPI_INT, 0, mpi_info->comm);
279 if (error_code != MPI_SUCCESS) {
280 Finley_setError(PASO_MPI_ERROR, "Finley_Mesh_read: broadcast of temp1 failed");
281 return NULL;
282 }
283 numDim = temp1[0];
284 numNodes = temp1[1];
285 error_code = MPI_Bcast (name, temp1[2], MPI_CHAR, 0, mpi_info->comm);
286 if (error_code != MPI_SUCCESS) {
287 Finley_setError(PASO_MPI_ERROR, "Finley_Mesh_read: broadcast of name failed");
288 return NULL;
289 }
290 }
291 #endif
292
293 /* allocate mesh */
294 mesh_p = Finley_Mesh_alloc(name,numDim,order,reduced_order,mpi_info);
295 if (Finley_noError()) {
296 /* Each CPU will get at most chunkSize nodes so the message has to be sufficiently large */
297 int chunkSize = numNodes / mpi_info->size + 1, totalNodes=0, chunkNodes=0, chunkEle=0, nextCPU=1;
298 int *tempInts = TMPMEMALLOC(chunkSize*3+1, index_t); /* Stores the integer message data */
299 double *tempCoords = TMPMEMALLOC(chunkSize*numDim, double); /* Stores the double message data */
300
301 /*
302 Read chunkSize nodes, send it in a chunk to worker CPU which copies chunk into its local mesh_p
303 It doesn't matter that a CPU has the wrong nodes for its elements, this is sorted out later
304 First chunk sent to CPU 1, second to CPU 2, ...
305 Last chunk stays on CPU 0 (the master)
306 The three columns of integers (Id, gDOF, Tag) are gathered into a single array tempInts and sent together in a single MPI message
307 */
308
309 if (mpi_info->rank == 0) { /* Master */
310 for (;;) { /* Infinite loop */
311 for (i0=0; i0<chunkSize*3+1; i0++) tempInts[i0] = -1;
312 for (i0=0; i0<chunkSize*numDim; i0++) tempCoords[i0] = -1.0;
313 chunkNodes = 0;
314 for (i1=0; i1<chunkSize; i1++) {
315 if (totalNodes >= numNodes) break; /* End of inner loop */
316 if (1 == numDim)
317 fscanf(fileHandle_p, "%d %d %d %le\n",
318 &tempInts[0+i1], &tempInts[chunkSize+i1], &tempInts[chunkSize*2+i1],
319 &tempCoords[i1*numDim+0]);
320 if (2 == numDim)
321 fscanf(fileHandle_p, "%d %d %d %le %le\n",
322 &tempInts[0+i1], &tempInts[chunkSize+i1], &tempInts[chunkSize*2+i1],
323 &tempCoords[i1*numDim+0], &tempCoords[i1*numDim+1]);
324 if (3 == numDim)
325 fscanf(fileHandle_p, "%d %d %d %le %le %le\n",
326 &tempInts[0+i1], &tempInts[chunkSize+i1], &tempInts[chunkSize*2+i1],
327 &tempCoords[i1*numDim+0], &tempCoords[i1*numDim+1], &tempCoords[i1*numDim+2]);
328 totalNodes++; /* When do we quit the infinite loop? */
329 chunkNodes++; /* How many nodes do we actually have in this chunk? It may be smaller than chunkSize. */
330 }
331 if (chunkNodes > chunkSize) {
332 Finley_setError(PASO_MPI_ERROR, "Finley_Mesh_read: error reading chunks of mesh, data too large for message size");
333 return NULL;
334 }
335 #ifdef PASO_MPI
336 /* Eventually we'll send chunkSize nodes to each CPU numbered 1 ... mpi_info->size-1, here goes one of them */
337 if (nextCPU < mpi_info->size) {
338 int mpi_error;
339 tempInts[chunkSize*3] = chunkNodes; /* The message has one more int to send chunkNodes */
340 mpi_error = MPI_Send(tempInts, chunkSize*3+1, MPI_INT, nextCPU, 81720, mpi_info->comm);
341 if ( mpi_error != MPI_SUCCESS ) {
342 Finley_setError(PASO_MPI_ERROR, "Finley_Mesh_read: send of tempInts failed");
343 return NULL;
344 }
345 mpi_error = MPI_Send(tempCoords, chunkSize*numDim, MPI_DOUBLE, nextCPU, 81721, mpi_info->comm);
346 if ( mpi_error != MPI_SUCCESS ) {
347 Finley_setError(PASO_MPI_ERROR, "Finley_Mesh_read: send of tempCoords failed");
348 return NULL;
349 }
350 }
351 #endif
352 nextCPU++;
353 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
354 if (nextCPU > mpi_info->size) break; /* End infinite loop */
355 } /* Infinite loop */
356 } /* End master */
357 else { /* Worker */
358 #ifdef PASO_MPI
359 /* Each worker receives two messages */
360 MPI_Status status;
361 int mpi_error;
362 mpi_error = MPI_Recv(tempInts, chunkSize*3+1, MPI_INT, 0, 81720, mpi_info->comm, &status);
363 if ( mpi_error != MPI_SUCCESS ) {
364 Finley_setError(PASO_MPI_ERROR, "Finley_Mesh_read: receive of tempInts failed");
365 return NULL;
366 }
367 mpi_error = MPI_Recv(tempCoords, chunkSize*numDim, MPI_DOUBLE, 0, 81721, mpi_info->comm, &status);
368 if ( mpi_error != MPI_SUCCESS ) {
369 Finley_setError(PASO_MPI_ERROR, "Finley_Mesh_read: receive of tempCoords failed");
370 return NULL;
371 }
372 chunkNodes = tempInts[chunkSize*3]; /* How many nodes are in this workers chunk? */
373 #endif
374 } /* Worker */
375
376 /* Copy node data from tempMem to mesh_p */
377 Finley_NodeFile_allocTable(mesh_p->Nodes, chunkNodes);
378 if (Finley_noError()) {
379 for (i0=0; i0<chunkNodes; i0++) {
380 mesh_p->Nodes->Id[i0] = tempInts[0+i0];
381 mesh_p->Nodes->globalDegreesOfFreedom[i0] = tempInts[chunkSize+i0];
382 mesh_p->Nodes->Tag[i0] = tempInts[chunkSize*2+i0];
383 for (i1=0; i1<numDim; i1++) {
384 mesh_p->Nodes->Coordinates[INDEX2(i1,i0,numDim)] = tempCoords[i0*numDim+i1];
385 }
386 }
387 }
388
389 TMPMEMFREE(tempInts);
390 TMPMEMFREE(tempCoords);
391
392 /* read elements */
393
394 /* Read the element typeID */
395 if (Finley_noError()) {
396 if (mpi_info->rank == 0) {
397 fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
398 typeID=Finley_RefElement_getTypeId(element_type);
399 }
400 #ifdef PASO_MPI
401 if (mpi_info->size > 1) {
402 int temp1[2], mpi_error;
403 temp1[0] = (int) typeID;
404 temp1[1] = numEle;
405 mpi_error = MPI_Bcast (temp1, 2, MPI_INT, 0, mpi_info->comm);
406 if (mpi_error != MPI_SUCCESS) {
407 Finley_setError(PASO_MPI_ERROR, "Finley_Mesh_read: broadcast of Element typeID failed");
408 return NULL;
409 }
410 typeID = (ElementTypeId) temp1[0];
411 numEle = temp1[1];
412 }
413 #endif
414 if (typeID==NoType) {
415 sprintf(error_msg, "Finley_Mesh_read: Unidentified element type %s", element_type);
416 Finley_setError(VALUE_ERROR, error_msg);
417 }
418 }
419
420 /* Allocate the ElementFile */
421 mesh_p->Elements=Finley_ElementFile_alloc(typeID,mesh_p->order, mesh_p->reduced_order, mpi_info);
422 numNodes = mesh_p->Elements->ReferenceElement->Type->numNodes; /* New meaning for numNodes: num nodes per element */
423
424 /* Read the element data */
425 if (Finley_noError()) {
426 int chunkSize = numEle / mpi_info->size + 1, totalEle=0, nextCPU=1;
427 int *tempInts = TMPMEMALLOC(chunkSize*(2+numNodes)+1, index_t); /* Store Id + Tag + node list (+ one int at end for chunkEle) */
428 /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
429 if (mpi_info->rank == 0) { /* Master */
430 for (;;) { /* Infinite loop */
431 for (i0=0; i0<chunkSize*(2+numNodes)+1; i0++) tempInts[i0] = -1;
432 chunkEle = 0;
433 for (i0=0; i0<chunkSize; i0++) {
434 if (totalEle >= numEle) break; /* End inner loop */
435 fscanf(fileHandle_p, "%d %d", &tempInts[i0*(2+numNodes)+0], &tempInts[i0*(2+numNodes)+1]);
436 for (i1 = 0; i1 < numNodes; i1++) fscanf(fileHandle_p, " %d", &tempInts[i0*(2+numNodes)+2+i1]);
437 fscanf(fileHandle_p, "\n");
438 totalEle++;
439 chunkEle++;
440 }
441 #ifdef PASO_MPI
442 /* Eventually we'll send chunk of elements to each CPU except 0 itself, here goes one of them */
443 if (nextCPU < mpi_info->size) {
444 int mpi_error;
445 tempInts[chunkSize*(2+numNodes)] = chunkEle;
446 mpi_error = MPI_Send(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, nextCPU, 81722, mpi_info->comm);
447 if ( mpi_error != MPI_SUCCESS ) {
448 Finley_setError(PASO_MPI_ERROR, "Finley_Mesh_read: send of tempInts for Elements failed");
449 return NULL;
450 }
451 }
452 #endif
453 nextCPU++;
454 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
455 if (nextCPU > mpi_info->size) break; /* End infinite loop */
456 } /* Infinite loop */
457 } /* End master */
458 else { /* Worker */
459 #ifdef PASO_MPI
460 /* Each worker receives one message */
461 MPI_Status status;
462 int mpi_error;
463 mpi_error = MPI_Recv(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, 0, 81722, mpi_info->comm, &status);
464 if ( mpi_error != MPI_SUCCESS ) {
465 Finley_setError(PASO_MPI_ERROR, "Finley_Mesh_read: receive of tempInts for Elements failed");
466 return NULL;
467 }
468 chunkEle = tempInts[chunkSize*(2+numNodes)];
469 #endif
470 } /* Worker */
471
472 /* Copy Element data from tempInts to mesh_p */
473 Finley_ElementFile_allocTable(mesh_p->Elements, chunkEle);
474 mesh_p->Elements->minColor=0;
475 mesh_p->Elements->maxColor=chunkEle-1;
476 if (Finley_noError()) {
477 #pragma omp parallel for private (i0, i1)
478 for (i0=0; i0<chunkEle; i0++) {
479 mesh_p->Elements->Id[i0] = tempInts[i0*(2+numNodes)+0];
480 mesh_p->Elements->Tag[i0] = tempInts[i0*(2+numNodes)+1];
481 mesh_p->Elements->Owner[i0] =mpi_info->rank;
482 mesh_p->Elements->Color[i0] = i0;
483 for (i1 = 0; i1 < numNodes; i1++) {
484 mesh_p->Elements->Nodes[INDEX2(i1, i0, numNodes)] = tempInts[i0*(2+numNodes)+2+i1];
485 }
486 }
487 }
488
489 TMPMEMFREE(tempInts);
490 } /* end of Read the element data */
491
492 /* read face elements */
493
494 /* Read the element typeID */
495 if (Finley_noError()) {
496 if (mpi_info->rank == 0) {
497 fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
498 typeID=Finley_RefElement_getTypeId(element_type);
499 }
500 #ifdef PASO_MPI
501 if (mpi_info->size > 1) {
502 int temp1[2], mpi_error;
503 temp1[0] = (int) typeID;
504 temp1[1] = numEle;
505 mpi_error = MPI_Bcast (temp1, 2, MPI_INT, 0, mpi_info->comm);
506 if (mpi_error != MPI_SUCCESS) {
507 Finley_setError(PASO_MPI_ERROR, "Finley_Mesh_read: broadcast of Element typeID failed");
508 return NULL;
509 }
510 typeID = (ElementTypeId) temp1[0];
511 numEle = temp1[1];
512 }
513 #endif
514 if (typeID==NoType) {
515 sprintf(error_msg, "Finley_Mesh_read: Unidentified element type %s", element_type);
516 Finley_setError(VALUE_ERROR, error_msg);
517 }
518 }
519
520 /* Allocate the ElementFile */
521 mesh_p->FaceElements=Finley_ElementFile_alloc(typeID,mesh_p->order, mesh_p->reduced_order, mpi_info);
522 numNodes = mesh_p->FaceElements->ReferenceElement->Type->numNodes; /* New meaning for numNodes: num nodes per element */
523
524 /* Read the face element data */
525 if (Finley_noError()) {
526 int chunkSize = numEle / mpi_info->size + 1, totalEle=0, nextCPU=1;
527 int *tempInts = TMPMEMALLOC(chunkSize*(2+numNodes)+1, index_t); /* Store Id + Tag + node list (+ one int at end for chunkEle) */
528 /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
529 if (mpi_info->rank == 0) { /* Master */
530 for (;;) { /* Infinite loop */
531 for (i0=0; i0<chunkSize*(2+numNodes)+1; i0++) tempInts[i0] = -1;
532 chunkEle = 0;
533 for (i0=0; i0<chunkSize; i0++) {
534 if (totalEle >= numEle) break; /* End inner loop */
535 fscanf(fileHandle_p, "%d %d", &tempInts[i0*(2+numNodes)+0], &tempInts[i0*(2+numNodes)+1]);
536 for (i1 = 0; i1 < numNodes; i1++) fscanf(fileHandle_p, " %d", &tempInts[i0*(2+numNodes)+2+i1]);
537 fscanf(fileHandle_p, "\n");
538 totalEle++;
539 chunkEle++;
540 }
541 #ifdef PASO_MPI
542 /* Eventually we'll send chunk of elements to each CPU except 0 itself, here goes one of them */
543 if (nextCPU < mpi_info->size) {
544 int mpi_error;
545 tempInts[chunkSize*(2+numNodes)] = chunkEle;
546 mpi_error = MPI_Send(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, nextCPU, 81723, mpi_info->comm);
547 if ( mpi_error != MPI_SUCCESS ) {
548 Finley_setError(PASO_MPI_ERROR, "Finley_Mesh_read: send of tempInts for FaceElements failed");
549 return NULL;
550 }
551 }
552 #endif
553 nextCPU++;
554 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
555 if (nextCPU > mpi_info->size) break; /* End infinite loop */
556 } /* Infinite loop */
557 } /* End master */
558 else { /* Worker */
559 #ifdef PASO_MPI
560 /* Each worker receives one message */
561 MPI_Status status;
562 int mpi_error;
563 mpi_error = MPI_Recv(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, 0, 81723, mpi_info->comm, &status);
564 if ( mpi_error != MPI_SUCCESS ) {
565 Finley_setError(PASO_MPI_ERROR, "Finley_Mesh_read: receive of tempInts for FaceElements failed");
566 return NULL;
567 }
568 chunkEle = tempInts[chunkSize*(2+numNodes)];
569 #endif
570 } /* Worker */
571
572 /* Copy Element data from tempInts to mesh_p */
573 Finley_ElementFile_allocTable(mesh_p->FaceElements, chunkEle);
574 mesh_p->FaceElements->minColor=0;
575 mesh_p->FaceElements->maxColor=chunkEle-1;
576 if (Finley_noError()) {
577 #pragma omp parallel for private (i0, i1)
578 for (i0=0; i0<chunkEle; i0++) {
579 mesh_p->FaceElements->Id[i0] = tempInts[i0*(2+numNodes)+0];
580 mesh_p->FaceElements->Tag[i0] = tempInts[i0*(2+numNodes)+1];
581 mesh_p->FaceElements->Owner[i0] =mpi_info->rank;
582 mesh_p->FaceElements->Color[i0] = i0;
583 for (i1 = 0; i1 < numNodes; i1++) {
584 mesh_p->FaceElements->Nodes[INDEX2(i1, i0, numNodes)] = tempInts[i0*(2+numNodes)+2+i1];
585 }
586 }
587 }
588
589 TMPMEMFREE(tempInts);
590 } /* end of Read the face element data */
591
592 /* read contact elements */
593
594 /* Read the element typeID */
595 if (Finley_noError()) {
596 if (mpi_info->rank == 0) {
597 fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
598 typeID=Finley_RefElement_getTypeId(element_type);
599 }
600 #ifdef PASO_MPI
601 if (mpi_info->size > 1) {
602 int temp1[2], mpi_error;
603 temp1[0] = (int) typeID;
604 temp1[1] = numEle;
605 mpi_error = MPI_Bcast (temp1, 2, MPI_INT, 0, mpi_info->comm);
606 if (mpi_error != MPI_SUCCESS) {
607 Finley_setError(PASO_MPI_ERROR, "Finley_Mesh_read: broadcast of Element typeID failed");
608 return NULL;
609 }
610 typeID = (ElementTypeId) temp1[0];
611 numEle = temp1[1];
612 }
613 #endif
614 if (typeID==NoType) {
615 sprintf(error_msg, "Finley_Mesh_read: Unidentified element type %s", element_type);
616 Finley_setError(VALUE_ERROR, error_msg);
617 }
618 }
619
620 /* Allocate the ElementFile */
621 mesh_p->ContactElements=Finley_ElementFile_alloc(typeID,mesh_p->order, mesh_p->reduced_order, mpi_info);
622 numNodes = mesh_p->ContactElements->ReferenceElement->Type->numNodes; /* New meaning for numNodes: num nodes per element */
623
624 /* Read the contact element data */
625 if (Finley_noError()) {
626 int chunkSize = numEle / mpi_info->size + 1, totalEle=0, nextCPU=1;
627 int *tempInts = TMPMEMALLOC(chunkSize*(2+numNodes)+1, index_t); /* Store Id + Tag + node list (+ one int at end for chunkEle) */
628 /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
629 if (mpi_info->rank == 0) { /* Master */
630 for (;;) { /* Infinite loop */
631 for (i0=0; i0<chunkSize*(2+numNodes)+1; i0++) tempInts[i0] = -1;
632 chunkEle = 0;
633 for (i0=0; i0<chunkSize; i0++) {
634 if (totalEle >= numEle) break; /* End inner loop */
635 fscanf(fileHandle_p, "%d %d", &tempInts[i0*(2+numNodes)+0], &tempInts[i0*(2+numNodes)+1]);
636 for (i1 = 0; i1 < numNodes; i1++) fscanf(fileHandle_p, " %d", &tempInts[i0*(2+numNodes)+2+i1]);
637 fscanf(fileHandle_p, "\n");
638 totalEle++;
639 chunkEle++;
640 }
641 #ifdef PASO_MPI
642 /* Eventually we'll send chunk of elements to each CPU except 0 itself, here goes one of them */
643 if (nextCPU < mpi_info->size) {
644 int mpi_error;
645 tempInts[chunkSize*(2+numNodes)] = chunkEle;
646 mpi_error = MPI_Send(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, nextCPU, 81724, mpi_info->comm);
647 if ( mpi_error != MPI_SUCCESS ) {
648 Finley_setError(PASO_MPI_ERROR, "Finley_Mesh_read: send of tempInts for ContactElements failed");
649 return NULL;
650 }
651 }
652 #endif
653 nextCPU++;
654 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
655 if (nextCPU > mpi_info->size) break; /* End infinite loop */
656 } /* Infinite loop */
657 } /* End master */
658 else { /* Worker */
659 #ifdef PASO_MPI
660 /* Each worker receives one message */
661 MPI_Status status;
662 int mpi_error;
663 mpi_error = MPI_Recv(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, 0, 81724, mpi_info->comm, &status);
664 if ( mpi_error != MPI_SUCCESS ) {
665 Finley_setError(PASO_MPI_ERROR, "Finley_Mesh_read: receive of tempInts for ContactElements failed");
666 return NULL;
667 }
668 chunkEle = tempInts[chunkSize*(2+numNodes)];
669 #endif
670 } /* Worker */
671
672 /* Copy Element data from tempInts to mesh_p */
673 Finley_ElementFile_allocTable(mesh_p->ContactElements, chunkEle);
674 mesh_p->ContactElements->minColor=0;
675 mesh_p->ContactElements->maxColor=chunkEle-1;
676 if (Finley_noError()) {
677 #pragma omp parallel for private (i0, i1)
678 for (i0=0; i0<chunkEle; i0++) {
679 mesh_p->ContactElements->Id[i0] = tempInts[i0*(2+numNodes)+0];
680 mesh_p->ContactElements->Tag[i0] = tempInts[i0*(2+numNodes)+1];
681 mesh_p->ContactElements->Owner[i0] =mpi_info->rank;
682 mesh_p->ContactElements->Color[i0] = i0;
683 for (i1 = 0; i1 < numNodes; i1++) {
684 mesh_p->ContactElements->Nodes[INDEX2(i1, i0, numNodes)] = tempInts[i0*(2+numNodes)+2+i1];
685 }
686 }
687 }
688
689 TMPMEMFREE(tempInts);
690 } /* end of Read the contact element data */
691
692 /* read nodal elements */
693
694 /* Read the element typeID */
695 if (Finley_noError()) {
696 if (mpi_info->rank == 0) {
697 fscanf(fileHandle_p, "%s %d\n", element_type, &numEle);
698 typeID=Finley_RefElement_getTypeId(element_type);
699 }
700 #ifdef PASO_MPI
701 if (mpi_info->size > 1) {
702 int temp1[2], mpi_error;
703 temp1[0] = (int) typeID;
704 temp1[1] = numEle;
705 mpi_error = MPI_Bcast (temp1, 2, MPI_INT, 0, mpi_info->comm);
706 if (mpi_error != MPI_SUCCESS) {
707 Finley_setError(PASO_MPI_ERROR, "Finley_Mesh_read: broadcast of Element typeID failed");
708 return NULL;
709 }
710 typeID = (ElementTypeId) temp1[0];
711 numEle = temp1[1];
712 }
713 #endif
714 if (typeID==NoType) {
715 sprintf(error_msg, "Finley_Mesh_read: Unidentified element type %s", element_type);
716 Finley_setError(VALUE_ERROR, error_msg);
717 }
718 }
719
720 /* Allocate the ElementFile */
721 mesh_p->Points=Finley_ElementFile_alloc(typeID,mesh_p->order, mesh_p->reduced_order, mpi_info);
722 numNodes = mesh_p->Points->ReferenceElement->Type->numNodes; /* New meaning for numNodes: num nodes per element */
723
724 /* Read the nodal element data */
725 if (Finley_noError()) {
726 int chunkSize = numEle / mpi_info->size + 1, totalEle=0, nextCPU=1;
727 int *tempInts = TMPMEMALLOC(chunkSize*(2+numNodes)+1, index_t); /* Store Id + Tag + node list (+ one int at end for chunkEle) */
728 /* Elements are specified as a list of integers...only need one message instead of two as with the nodes */
729 if (mpi_info->rank == 0) { /* Master */
730 for (;;) { /* Infinite loop */
731 for (i0=0; i0<chunkSize*(2+numNodes)+1; i0++) tempInts[i0] = -1;
732 chunkEle = 0;
733 for (i0=0; i0<chunkSize; i0++) {
734 if (totalEle >= numEle) break; /* End inner loop */
735 fscanf(fileHandle_p, "%d %d", &tempInts[i0*(2+numNodes)+0], &tempInts[i0*(2+numNodes)+1]);
736 for (i1 = 0; i1 < numNodes; i1++) fscanf(fileHandle_p, " %d", &tempInts[i0*(2+numNodes)+2+i1]);
737 fscanf(fileHandle_p, "\n");
738 totalEle++;
739 chunkEle++;
740 }
741 #ifdef PASO_MPI
742 /* Eventually we'll send chunk of elements to each CPU except 0 itself, here goes one of them */
743 if (nextCPU < mpi_info->size) {
744 int mpi_error;
745 tempInts[chunkSize*(2+numNodes)] = chunkEle;
746 mpi_error = MPI_Send(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, nextCPU, 81725, mpi_info->comm);
747 if ( mpi_error != MPI_SUCCESS ) {
748 Finley_setError(PASO_MPI_ERROR, "Finley_Mesh_read: send of tempInts for Points failed");
749 return NULL;
750 }
751 }
752 #endif
753 nextCPU++;
754 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
755 if (nextCPU > mpi_info->size) break; /* End infinite loop */
756 } /* Infinite loop */
757 } /* End master */
758 else { /* Worker */
759 #ifdef PASO_MPI
760 /* Each worker receives one message */
761 MPI_Status status;
762 int mpi_error;
763 mpi_error = MPI_Recv(tempInts, chunkSize*(2+numNodes)+1, MPI_INT, 0, 81725, mpi_info->comm, &status);
764 if ( mpi_error != MPI_SUCCESS ) {
765 Finley_setError(PASO_MPI_ERROR, "Finley_Mesh_read: receive of tempInts for Points failed");
766 return NULL;
767 }
768 chunkEle = tempInts[chunkSize*(2+numNodes)];
769 #endif
770 } /* Worker */
771
772 /* Copy Element data from tempInts to mesh_p */
773 Finley_ElementFile_allocTable(mesh_p->Points, chunkEle);
774 mesh_p->Points->minColor=0;
775 mesh_p->Points->maxColor=chunkEle-1;
776 if (Finley_noError()) {
777 #pragma omp parallel for private (i0, i1)
778 for (i0=0; i0<chunkEle; i0++) {
779 mesh_p->Points->Id[i0] = tempInts[i0*(2+numNodes)+0];
780 mesh_p->Points->Tag[i0] = tempInts[i0*(2+numNodes)+1];
781 mesh_p->Points->Owner[i0] =mpi_info->rank;
782 mesh_p->Points->Color[i0] = i0;
783 for (i1 = 0; i1 < numNodes; i1++) {
784 mesh_p->Points->Nodes[INDEX2(i1, i0, numNodes)] = tempInts[i0*(2+numNodes)+2+i1];
785 }
786 }
787 }
788
789 TMPMEMFREE(tempInts);
790 } /* end of Read the nodal element data */
791
792 /* ksteube TODO: read tags */
793
794 }
795
796 /* close file */
797 if (mpi_info->rank == 0) fclose(fileHandle_p);
798
799 /* resolve id's : */
800 /* rearrange elements: */
801
802 /* return mesh_p; */ /* ksteube temp return for debugging */
803
804 if (Finley_noError()) Finley_Mesh_resolveNodeIds(mesh_p);
805 if (Finley_noError()) Finley_Mesh_prepare(mesh_p, optimize);
806
807 /* that's it */
808 #ifdef Finley_TRACE
809 printf("timing: reading mesh: %.4e sec\n",Finley_timer()-time0);
810 #endif
811
812 /* that's it */
813 if (! Finley_noError()) {
814 Finley_Mesh_free(mesh_p);
815 }
816 Paso_MPIInfo_free( mpi_info );
817 return mesh_p;
818 }

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26