/[escript]/trunk/finley/src/Mesh_readGmsh.cpp
ViewVC logotype

Contents of /trunk/finley/src/Mesh_readGmsh.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 5207 - (show annotations)
Tue Oct 21 00:51:22 2014 UTC (4 years, 6 months ago) by jduplessis
File size: 31996 byte(s)
fixed compiler errors

1
2 /*****************************************************************************
3 *
4 * Copyright (c) 2003-2014 by University of Queensland
5 * http://www.uq.edu.au
6 *
7 * Primary Business: Queensland, Australia
8 * Licensed under the Open Software License version 3.0
9 * http://www.opensource.org/licenses/osl-3.0.php
10 *
11 * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12 * Development 2012-2013 by School of Earth Sciences
13 * Development from 2014 by Centre for Geoscience Computing (GeoComp)
14 *
15 *****************************************************************************/
16
17
18 /****************************************************************************
19
20 Finley: read mesh from gmsh file
21
22 *****************************************************************************/
23
24 #include "Mesh.h"
25 #include <cstdio>
26 #include "CPPAdapter/FinleyAdapterException.h"
27
28 //can't return because the flag need to be shared across all nodes
29 #define FSCANF_CHECK(scan_ret) { if (scan_ret == EOF) { errorFlag = 1;} }
30 #define MAX_numNodes_gmsh 20
31
32 /*
33 error flags include:
34
35 0 - all ok
36 1 - early eof
37 2 - EOF before nodes section found
38 3 - EOF before elements section found
39 4 - throw error_msg
40 5 - eof at apropriate time.
41 6 - !noError
42 */
43
44 namespace finley {
45
46 int getElements(esysUtils::JMPI& mpi_info, Mesh * mesh_p, FILE * fileHandle_p, char * error_msg,
47 bool useMacroElements, const std::string fname, int numDim,double version, int order, int reduced_order) {
48 /* This function should read in the elements and distribute them to the apropriate process.
49 *
50 */
51 int errorFlag=0, scan_ret, i, j, e,gmsh_type, element_dim, partition_id, itmp, elementary_id, numTags=0;
52 ElementTypeId final_element_type = NoRef;
53 ElementTypeId final_face_element_type = NoRef;
54 ElementTypeId contact_element_type = NoRef;
55 int numElements=0, numFaceElements=0, totalNumElements=0, numNodesPerElement=0, numNodesPerElement2;
56 const_ReferenceElementSet_ptr refPoints, refContactElements, refFaceElements, refElements;
57 int *id, *tag, * vertices;
58 ElementTypeId * element_type;
59 if (mpi_info->rank == 0) {
60 scan_ret = fscanf(fileHandle_p, "%d", &totalNumElements);
61 FSCANF_CHECK(scan_ret);
62 }
63
64
65 #ifdef ESYS_MPI
66 // Broadcast numNodes if there are multiple mpi procs
67 if (mpi_info->size > 1) {
68 int msg[1];
69 if (mpi_info->rank == 0) {
70 msg[0] = totalNumElements;
71 } else {
72 msg[0] = 0;
73 }
74 MPI_Bcast(msg, 1, MPI_INT, 0, mpi_info->comm);
75 totalNumElements = msg[0];
76 }
77 #endif
78
79 int chunkSize = totalNumElements / mpi_info->size + 1, chunkElements=0, chunkFaceElements=0, count=0;
80 id = new int[chunkSize+1];
81 tag = new int[chunkSize+1];
82 vertices = new int[chunkSize*MAX_numNodes_gmsh];
83 element_type = new ElementTypeId[chunkSize+1];
84 std::vector<int> elementIndecies (chunkSize);
85 std::vector<int> faceElementIndecies (chunkSize);
86
87
88
89 #ifdef ESYS_MPI
90 int chunkInfo[2];//chunkInfo stores the number of element and number of face elements
91 #endif
92
93 #pragma omp parallel for private (i) schedule(static)
94 for (i=0; i<chunkSize*MAX_numNodes_gmsh; i++) vertices[i] = -1;
95
96 #pragma omp parallel for private (i) schedule(static)
97 for (i=0; i<chunkSize; i++) {
98 id[i] = -1;
99 tag[i] = -1;
100 element_type[i] = NoRef;
101 elementIndecies[i]=-1;
102 faceElementIndecies[i]=-1;
103 }
104
105 if (mpi_info->rank == 0) {
106 /* read all in */
107 #ifdef ESYS_MPI
108 int cpuId = 0;
109 #endif
110 for(e = 0; e < totalNumElements; e++) {
111 count = (chunkElements + chunkFaceElements);
112 scan_ret = fscanf(fileHandle_p, "%d %d", &id[count], &gmsh_type);
113 FSCANF_CHECK(scan_ret);
114 switch (gmsh_type) {
115 case 1: /* line order 1 */
116 element_type[count]=Line2;
117 element_dim=1;
118 numNodesPerElement=2;
119 break;
120 case 2: /* triangle order 1 */
121 element_type[count]=Tri3;
122 numNodesPerElement= 3;
123 element_dim=2;
124 break;
125 case 3: /* quadrilateral order 1 */
126 element_type[count]=Rec4;
127 numNodesPerElement= 4;
128 element_dim=2;
129 break;
130 case 4: /* tetrahedron order 1 */
131 element_type[count]=Tet4;
132 numNodesPerElement= 4;
133 element_dim=3;
134 break;
135 case 5: /* hexahedron order 1 */
136 element_type[count]=Hex8;
137 numNodesPerElement= 8;
138 element_dim=3;
139 break;
140 case 8: /* line order 2 */
141 if (useMacroElements) {
142 element_type[count]=Line3Macro;
143 } else {
144 element_type[count]=Line3;
145 }
146 numNodesPerElement= 3;
147 element_dim=1;
148 break;
149 case 9: /* triangle order 2 */
150 if (useMacroElements) {
151 element_type[count]=Tri6Macro;
152 } else {
153 element_type[count]=Tri6;
154 }
155 numNodesPerElement= 6;
156 element_dim=2;
157 break;
158 case 10: /* quadrilateral order 2 */
159 if (useMacroElements) {
160 element_type[count]=Rec9Macro;
161 } else {
162 element_type[count]=Rec9;
163 }
164 numNodesPerElement= 9;
165 element_dim=2;
166 break;
167 case 11: /* tetrahedron order 2 */
168 if (useMacroElements) {
169 element_type[count]=Tet10Macro;
170 } else {
171 element_type[count]=Tet10;
172 }
173 numNodesPerElement= 10;
174 element_dim=3;
175 break;
176 case 16: /* rectangular order 2 */
177 element_type[count]=Rec8;
178 numNodesPerElement= 8;
179 element_dim=2;
180 break;
181 case 17: /* hexahedron order 2 */
182 element_type[count]=Hex20;
183 numNodesPerElement= 20;
184 element_dim=3;
185 break;
186 case 15 : /* point */
187 element_type[count]=Point1;
188 numNodesPerElement= 1;
189 element_dim=0;
190 break;
191 default:
192 element_type[count]=NoRef;
193 sprintf(error_msg,"Unexpected gmsh element type %d in mesh file %s.", gmsh_type, fname.c_str());
194 errorFlag = 4;
195 }
196 if (element_dim == numDim) {
197 if (final_element_type == NoRef) {
198 final_element_type = element_type[count];
199 } else if (final_element_type != element_type[count]) {
200 sprintf(error_msg,"Finley can handle a single type of internal elements only.");
201 errorFlag = 4;
202
203 }
204 elementIndecies[chunkElements]=count;
205 numElements++;
206 chunkElements++;
207 } else if (element_dim == numDim-1) {
208 if (final_face_element_type == NoRef) {
209 final_face_element_type = element_type[count];
210 } else if (final_face_element_type != element_type[count]) {
211 sprintf(error_msg,"Finley can handle a single type of face elements only.");
212 errorFlag = 4;
213 }
214 faceElementIndecies[chunkFaceElements]=count;
215 numFaceElements++;
216 chunkFaceElements++;
217 }
218 if(version <= 1.0){
219 scan_ret = fscanf(fileHandle_p, "%d %d %d", &tag[count], &elementary_id, &numNodesPerElement2);
220 FSCANF_CHECK(scan_ret);
221 partition_id = 1;
222 if (numNodesPerElement2 != numNodesPerElement) {
223 sprintf(error_msg,"Illegal number of nodes for element %d in mesh file %s.", id[count], fname.c_str());
224 errorFlag = 4;
225 }
226 } else {
227 scan_ret = fscanf(fileHandle_p, "%d", &numTags);
228 FSCANF_CHECK(scan_ret);
229 elementary_id = tag[count] = partition_id = 1;
230 numNodesPerElement2=-1;
231 for(j = 0; j < numTags; j++){
232 scan_ret = fscanf(fileHandle_p, "%d", &itmp);
233 FSCANF_CHECK(scan_ret);
234 if (j == 0) {
235 tag[count] = itmp;
236 } else if (j == 1) {
237 elementary_id = itmp;
238 } else if (j == 2) {
239 partition_id = itmp;
240 }
241 /* ignore any other tags */
242 }
243 }
244
245 if (!noError()) {
246 errorFlag = 6;
247 }
248 // fprintf(stderr,"num elements=%d, chunkElements = %d\n",(numElements+numFaceElements),(chunkFaceElements+chunkElements));
249 for(j = 0; j < numNodesPerElement; j++) {
250 scan_ret = fscanf(fileHandle_p, "%d", &vertices[INDEX2(j,count,MAX_numNodes_gmsh)]);
251 FSCANF_CHECK(scan_ret);
252 }
253 /* for tet10 the last two nodes need to be swapped */
254 if ((element_type[count]==Tet10) || (element_type[count]==Tet10Macro)) {
255 itmp=vertices[INDEX2(9,count,MAX_numNodes_gmsh)];
256 vertices[INDEX2(9,count,MAX_numNodes_gmsh)]=vertices[INDEX2(8,count,MAX_numNodes_gmsh)];
257 vertices[INDEX2(8,count,MAX_numNodes_gmsh)]=itmp;
258 }
259 #ifdef ESYS_MPI
260 if(chunkElements+chunkFaceElements==chunkSize) {
261 cpuId++;
262 chunkInfo[0]=chunkElements;
263 chunkInfo[1]=chunkFaceElements;
264
265 if(cpuId < mpi_info->size) {
266 if(!errorFlag){
267 MPI_Send(&errorFlag, 1, MPI_INT, cpuId, 81719, mpi_info->comm);
268 MPI_Send(vertices, chunkSize*MAX_numNodes_gmsh, MPI_INT, cpuId, 81720, mpi_info->comm);
269 MPI_Send(id, chunkSize, MPI_INT, cpuId, 81721, mpi_info->comm);
270 MPI_Send(tag, chunkSize, MPI_INT, cpuId, 81722, mpi_info->comm);
271 MPI_Send(element_type, chunkSize, MPI_INT, cpuId, 81723, mpi_info->comm);
272 MPI_Send(chunkInfo, 2, MPI_INT, cpuId, 81724, mpi_info->comm);
273 MPI_Send(&(elementIndecies[0]), chunkElements, MPI_INT, cpuId, 81725, mpi_info->comm);
274 MPI_Send(&(faceElementIndecies[0]), chunkFaceElements, MPI_INT, cpuId, 81726, mpi_info->comm);
275 } else{
276 //let the remaining processes know something has gone wrong
277 for(; cpuId<mpi_info->size; cpuId++) {
278 MPI_Send(&errorFlag, 1, MPI_INT, cpuId, 81719, mpi_info->comm);
279 }
280 break;
281 }
282
283 // reset arrays for next cpu
284 // for i in vertices vertices[i]=-1 etc also use openMp
285 #pragma omp parallel for private (i) schedule(static)
286 for (i=0; i<chunkSize*MAX_numNodes_gmsh; i++) vertices[i] = -1;
287 #pragma omp parallel for private (i) schedule(static)
288 for (i=0; i<chunkSize; i++) {
289 id[i] = -1;
290 tag[i] = -1;
291 element_type[i] = NoRef;
292 }
293 chunkElements=0;
294 chunkFaceElements=0;
295 }
296 }
297 #endif
298
299 //end elment reading for loop
300 }
301 } else {
302 #ifdef ESYS_MPI
303 /* Each worker receives messages */
304 if(mpi_info->size>1){
305 MPI_Status status;
306
307 MPI_Recv(&errorFlag, 1, MPI_INT,0, 81719, mpi_info->comm, &status);
308 if(!errorFlag){
309 MPI_Recv(vertices, chunkSize*MAX_numNodes_gmsh, MPI_INT, 0, 81720, mpi_info->comm, &status);
310 MPI_Recv(id, chunkSize, MPI_INT, 0, 81721, mpi_info->comm, &status);
311 MPI_Recv(tag, chunkSize, MPI_INT, 0, 81722, mpi_info->comm, &status);
312 MPI_Recv(element_type, chunkSize, MPI_INT, 0, 81723, mpi_info->comm, &status);
313 MPI_Recv(chunkInfo, 2, MPI_INT, 0, 81724, mpi_info->comm, &status);
314 chunkElements = chunkInfo[0];
315 chunkFaceElements = chunkInfo[1];
316 MPI_Recv(&(elementIndecies[0]), chunkElements, MPI_INT, 0, 81725, mpi_info->comm,&status);
317 MPI_Recv(&(faceElementIndecies[0]), chunkFaceElements, MPI_INT, 0, 81726, mpi_info->comm,&status);
318 } else {
319 return errorFlag;
320 }
321 }
322 #endif
323
324 }
325
326 // fprintf(stderr,"in elements rank=,%d\n",mpi_info->rank);
327
328 #ifdef ESYS_MPI
329 if(mpi_info->size>1){
330 MPI_Bcast(&errorFlag, 1, MPI_INT, 0, mpi_info->comm);
331 }
332 #endif
333 if(errorFlag){
334 return errorFlag;
335 }
336
337 // all elements have been read and shared, now we have to identify the
338 // elements for finley
339 if (!noError()) {
340 return 6;
341 }
342 if (mpi_info->rank == 0) {
343 /* first we have to identify the elements to define Elements and FaceElements */
344 if (final_element_type == NoRef) {
345 if (numDim==1) {
346 final_element_type=Line2;
347 } else if (numDim==2) {
348 final_element_type=Tri3;
349 } else if (numDim==3) {
350 final_element_type=Tet4;
351 }
352 }
353 if (final_face_element_type == NoRef) {
354 if (numDim==1) {
355 final_face_element_type=Point1;
356 } else if (numDim==2) {
357 final_face_element_type=Line2;
358 } else if (numDim==3) {
359 final_face_element_type=Tri3;
360 }
361 }
362 if (final_face_element_type == Line2) {
363 contact_element_type=Line2_Contact;
364 } else if ( (final_face_element_type == Line3) || (final_face_element_type == Line3Macro) ) {
365 contact_element_type=Line3_Contact;
366 } else if (final_face_element_type == Tri3) {
367 contact_element_type=Tri3_Contact;
368 } else if ( (final_face_element_type == Tri6) || (final_face_element_type == Tri6Macro)) {
369 contact_element_type=Tri6_Contact;
370 } else {
371 contact_element_type=Point1_Contact;
372 }
373 }
374
375 #ifdef ESYS_MPI
376 // Broadcast numNodes if there are multiple mpi procs
377 if (mpi_info->size > 1) {
378 int msg[3];
379 if (mpi_info->rank == 0) {
380 msg[0] = final_element_type;
381 msg[1] = final_face_element_type;
382 msg[2] = contact_element_type;
383 } else {
384 msg[0] = 0;
385 msg[1] = 0;
386 msg[2] = 0;
387 }
388 MPI_Bcast(msg, 3, MPI_INT, 0, mpi_info->comm);
389 final_element_type = static_cast<ElementTypeId>(msg[0]);
390 final_face_element_type = static_cast<ElementTypeId>(msg[1]);
391 contact_element_type = static_cast<ElementTypeId>(msg[2]);
392 }
393 #endif
394
395
396
397 refElements.reset(new ReferenceElementSet(final_element_type, order, reduced_order));
398 refFaceElements.reset(new ReferenceElementSet(final_face_element_type, order, reduced_order));
399 refContactElements.reset(new ReferenceElementSet(contact_element_type, order, reduced_order));
400 refPoints.reset(new ReferenceElementSet(Point1, order, reduced_order));
401 mesh_p->Elements=new ElementFile(refElements, mpi_info);
402 mesh_p->FaceElements=new ElementFile(refFaceElements, mpi_info);
403 mesh_p->ContactElements=new ElementFile(refContactElements, mpi_info);
404 mesh_p->Points=new ElementFile(refPoints, mpi_info);
405
406
407 if (noError()) {
408 mesh_p->Elements->allocTable(chunkElements);
409 mesh_p->FaceElements->allocTable(chunkFaceElements);
410 mesh_p->ContactElements->allocTable(0);
411 mesh_p->Points->allocTable(0);
412 if (noError()) {
413 mesh_p->Elements->minColor=0;
414 mesh_p->Elements->maxColor=chunkElements-1;
415 mesh_p->FaceElements->minColor=0;
416 mesh_p->FaceElements->maxColor=chunkFaceElements-1;
417 mesh_p->ContactElements->minColor=0;
418 mesh_p->ContactElements->maxColor=0;
419 mesh_p->Points->minColor=0;
420 mesh_p->Points->maxColor=0;
421
422
423 chunkElements=0;
424 chunkFaceElements=0;
425 for(e = 0; e < chunkSize; e++) {
426 if (element_type[e] == final_element_type) {
427 mesh_p->Elements->Id[chunkElements]=id[e];
428 mesh_p->Elements->Tag[chunkElements]=tag[e];
429 mesh_p->Elements->Color[chunkElements]=e;
430 mesh_p->Elements->Owner[chunkElements]=mpi_info->rank;
431 // fprintf(stderr,"element id%d: ",mesh_p->Elements->Id[chunkElements]);
432 for (j = 0; j< mesh_p->Elements->numNodes; ++j) {
433 mesh_p->Elements->Nodes[INDEX2(j, chunkElements, mesh_p->Elements->numNodes)]=vertices[INDEX2(j,e,MAX_numNodes_gmsh)];
434 // fprintf(stderr,"nodes[%d]=%d ",INDEX2(j, chunkElements, mesh_p->Elements->numNodes),vertices[INDEX2(j,e,MAX_numNodes_gmsh)]);
435 }
436 // fprintf(stderr,"\n");
437 chunkElements++;
438 } else if (element_type[e] == final_face_element_type) {
439 mesh_p->FaceElements->Id[chunkFaceElements]=id[e];
440 mesh_p->FaceElements->Tag[chunkFaceElements]=tag[e];
441 mesh_p->FaceElements->Color[chunkFaceElements]=chunkFaceElements;
442 mesh_p->FaceElements->Owner[chunkFaceElements]=mpi_info->rank;
443 for (j=0; j<mesh_p->FaceElements->numNodes; ++j) {
444 mesh_p->FaceElements->Nodes[INDEX2(j, chunkFaceElements, mesh_p->FaceElements->numNodes)]=vertices[INDEX2(j,e,MAX_numNodes_gmsh)];
445 }
446 chunkFaceElements++;
447 }
448 }
449 } else {
450 return 6;
451 }
452
453 } else {
454 return 6;
455 }
456 // fprintf(stderr,"in elements rank=,%d\n",mpi_info->rank);
457
458 /* and clean up */
459 delete[] id;
460 delete[] tag;
461 delete[] element_type;
462 delete[] vertices;
463 return errorFlag;
464 }
465
466
467 int getNodes(esysUtils::JMPI& mpi_info, Mesh * mesh_p, FILE * fileHandle_p, int numDim, char * error_msg) {
468 int i, j, scan_ret, numNodes=0, errorFlag=0;
469 double rtmp0, rtmp1;
470
471 if (mpi_info->rank == 0) { /* Master */
472 scan_ret = fscanf(fileHandle_p, "%d", &numNodes);
473 FSCANF_CHECK(scan_ret);
474 }
475 #ifdef ESYS_MPI
476 // Broadcast numNodes if there are multiple mpi procs
477 if (mpi_info->size > 1) {
478 int msg[1];
479 if (mpi_info->rank == 0) {
480 msg[0] = numNodes;
481 } else {
482 msg[0] = 0;
483 }
484 MPI_Bcast(msg, 1, MPI_INT, 0, mpi_info->comm);
485 numNodes = msg[0];
486 }
487 #endif
488 int chunkSize = (numNodes / mpi_info->size) + 1, totalNodes=0, chunkNodes=0, nextCPU=1;
489 int *tempInts = new int[chunkSize+1]; /* Stores the integer message data */
490 double *tempCoords = new double[chunkSize*numDim]; /* Stores the double message data */
491 if (mpi_info->rank == 0) { /* Master */
492 while(1){
493 #pragma omp parallel for private (i) schedule(static)
494 for (i=0; i<chunkSize+1; i++) tempInts[i] = -1;
495 #pragma omp parallel for private (i) schedule(static)
496 for (i=0; i<chunkSize*numDim; i++) tempCoords[i] = -1.0;
497
498 if(!errorFlag){
499 chunkNodes=0;
500 for(i=0;i<chunkSize;i++){
501 if(totalNodes>=numNodes) break;//sprintf(error_msg, "more "); errorFlag=4;
502 if (1 == numDim) {
503 scan_ret = fscanf(fileHandle_p, "%d %le %le %le\n", &tempInts[i], &tempCoords[0+i*numDim], &rtmp0, &rtmp1);
504 FSCANF_CHECK(scan_ret);
505 } else if (2 == numDim) {
506 scan_ret = fscanf(fileHandle_p, "%d %le %le %le\n", &tempInts[i], &tempCoords[0+i*numDim], &tempCoords[1+i*numDim], &rtmp1);
507 FSCANF_CHECK(scan_ret);
508 } else if (3 == numDim) {
509 scan_ret = fscanf(fileHandle_p, "%d %le %le %le\n", &tempInts[i], &tempCoords[0+i*numDim], &tempCoords[1+i*numDim], &tempCoords[2+i*numDim]);
510 FSCANF_CHECK(scan_ret);
511 }
512 totalNodes++; /* When do we quit the infinite loop? */
513 chunkNodes++; /* How many nodes do we actually have in this chunk? It may be smaller than chunkSize. */
514 }
515 }
516 #ifdef ESYS_MPI
517 if(mpi_info->size>1 && (nextCPU < mpi_info->size)) {
518 /* Eventually we'll send chunkSize nodes to each CPU numbered 1 ... mpi_info->size-1, here goes one of them */
519 MPI_Send(&errorFlag, 1, MPI_INT, nextCPU, 81719, mpi_info->comm);
520 if(!errorFlag){
521 tempInts[chunkSize] = chunkNodes; /* The message has one more int to send chunkNodes */
522 MPI_Send(tempInts, chunkSize+1, MPI_INT, nextCPU, 81720, mpi_info->comm);
523 MPI_Send(tempCoords, chunkSize*numDim, MPI_DOUBLE, nextCPU, 81721, mpi_info->comm);
524 }
525 }
526 #endif
527 nextCPU++;
528 /* Infinite loop ends when I've read a chunk for each of the worker nodes plus one more chunk for the master */
529 if (nextCPU > mpi_info->size) break; /* End infinite loop */
530 }
531 } else {
532 #ifdef ESYS_MPI
533 /* Each worker receives two messages */
534 MPI_Status status;
535 MPI_Recv(&errorFlag, 1, MPI_INT,0, 81719, mpi_info->comm, &status);
536 if(!errorFlag){
537 MPI_Recv(tempInts, chunkSize+1, MPI_INT, 0, 81720, mpi_info->comm, &status);
538 MPI_Recv(tempCoords, chunkSize*numDim, MPI_DOUBLE, 0, 81721, mpi_info->comm, &status);
539 chunkNodes = tempInts[chunkSize]; /* How many nodes are in this workers chunk? */
540 }
541 #endif
542 }
543
544 // fprintf(stderr, "chunkNodes=%d on rank %d\n",chunkNodes, mpi_info->rank);
545 // throw FinleyAdapterException("die");
546
547 #ifdef ESYS_MPI
548 if(mpi_info->size>1){
549 MPI_Bcast(&errorFlag, 1, MPI_INT, 0, mpi_info->comm);
550 }
551 #endif
552 if(errorFlag){
553 return errorFlag;
554 }
555
556 if (!noError()) return 6;
557 mesh_p->Nodes->allocTable(chunkNodes);
558 if (!noError()) return 6;
559
560 #pragma omp parallel for private (i, j) schedule(static)
561 for (i=0; i<chunkNodes; i++) {
562 mesh_p->Nodes->Id[i] = tempInts[i];
563 mesh_p->Nodes->globalDegreesOfFreedom[i] = tempInts[i];
564 mesh_p->Nodes->Tag[i]=0;
565 // fprintf(stderr,"node id%d %d: ",mesh_p->Nodes->Id[i], mesh_p->Nodes->globalDegreesOfFreedom[i]);
566
567 // mesh_p->Nodes->Tag[i] = tempInts[chunkSize*2+i];
568 for (j=0; j<numDim; j++) {
569 mesh_p->Nodes->Coordinates[INDEX2(j,i,numDim)] = tempCoords[i*numDim+j];
570 // fprintf(stderr, "%g ", tempCoords[i*numDim+j]);
571 }
572 // fprintf(stderr, "\n");
573
574 }
575
576 delete[] tempInts;
577 delete[] tempCoords;
578 return errorFlag;
579 }
580
581
582
583
584 Mesh* Mesh::readGmsh(esysUtils::JMPI& mpi_info, const std::string fname, int numDim, int order,
585 int reduced_order, bool optimize, bool useMacroElements)
586 {
587 double version = 1.0;
588 bool nodesRead=false, elementsRead=false;
589 int format = 0, size = sizeof(double), scan_ret, errorFlag=0, logicFlag=0;
590 int numNames=0;
591 int i, tag_info[2], itmp;
592 char line[LenString_MAX+1], name[LenString_MAX+1];
593 char error_msg[LenErrorMsg_MAX];
594
595 #ifdef Finley_TRACE
596 double time0=timer();
597 #endif
598 FILE * fileHandle_p = NULL;
599
600 resetError();
601 // allocate mesh
602 Mesh* mesh_p = new Mesh(fname, numDim, mpi_info);
603
604 // get file handle
605 if (mpi_info->rank == 0) {
606 fileHandle_p = fopen(fname.c_str(), "r");
607 if (fileHandle_p==NULL) {
608 sprintf(error_msg, "Opening Gmsh file %s for reading failed.", fname.c_str());
609 throw FinleyAdapterException(error_msg);
610 }
611 }
612 /* start reading */
613 while(noError() && errorFlag==0) {
614 /* find line staring with $ */
615 if (mpi_info->rank == 0) {
616 do {
617 if(fgets(line,LenString_MAX,fileHandle_p) == NULL) {
618 //check to see we atleast have some nodes and elements, if we do end the outer loop otherwise throw early eof.
619 if (!nodesRead){
620 //EOF before nodes section found
621 errorFlag = 2;
622 } else if(!elementsRead){
623 //EOF before elements section found
624 errorFlag = 3;
625 } else{
626 errorFlag=5;
627 }
628 }
629 } while(line[0] != '$');
630 }
631
632
633 if (mpi_info->rank == 0 ) {
634 if (!strncmp(&line[1], "MeshFormat", 10)) {
635 logicFlag = 1;
636 }
637
638 else if ( !strncmp(&line[1], "NOD", 3) ||
639 !strncmp(&line[1], "NOE", 3) ||
640 !strncmp(&line[1], "Nodes", 5) ) {
641 logicFlag = 2;
642 }
643 else if(!strncmp(&line[1], "ELM", 3) ||
644 !strncmp(&line[1], "Elements", 8) ) {
645 logicFlag = 3;
646 }
647 else if (!strncmp(&line[1], "PhysicalNames", 13)) {
648 logicFlag = 4;
649 }
650 }
651 #ifdef ESYS_MPI
652 int flags[2];
653 // Broadcast line
654 if (mpi_info->size > 1) {
655 if (mpi_info -> rank==0) {
656 flags[0] = errorFlag;
657 flags[1] = logicFlag;
658
659 } else {
660 flags[0] = 0;
661 flags[1] = 0;
662 }
663 MPI_Bcast(&flags, 2, MPI_INT, 0, mpi_info->comm);
664 errorFlag = flags[0];
665 logicFlag = flags[1];
666 }
667 //fprintf(stderr,"broadcasted\n");
668 #endif
669 // MPI_Barrier(mpi_info->comm);
670 //fprintf(stderr,"logic flag:%d on rank %d at line %d\n", logicFlag,mpi_info->rank,__LINE__);
671 /* format */
672 if (mpi_info->rank==0 && logicFlag ==1) {
673 scan_ret = fscanf(fileHandle_p, "%lf %d %d\n", &version, &format, &size);
674 FSCANF_CHECK(scan_ret);
675 //fprintf(stderr,"mesh format errorFlag:%d on rank %d \n",errorFlag,mpi_info->rank);
676 }
677 /* nodes are read */
678 else if (logicFlag == 2) {
679
680 nodesRead=true;
681 errorFlag = getNodes(mpi_info, mesh_p, fileHandle_p, numDim,error_msg);
682 //fprintf(stderr,"nodes errorFlag:%d on rank %d \n",errorFlag,mpi_info->rank);
683 logicFlag=0;
684 }
685
686 /* elements */
687 else if(logicFlag==3) {
688 elementsRead=true;
689 errorFlag=getElements(mpi_info, mesh_p, fileHandle_p, error_msg, useMacroElements, fname, numDim, version, order, reduced_order);
690 //fprintf(stderr,"elements errorFlag:%d on rank %d \n",errorFlag,mpi_info->rank);
691 logicFlag=0;
692 }
693 /* name tags (thanks to Antoine Lefebvre, antoine.lefebvre2@mail.mcgill.ca ) */
694 else if (logicFlag==4) {
695
696 if (! noError()) errorFlag=6;
697 if(mpi_info->rank == 0) {
698 scan_ret = fscanf(fileHandle_p, "%d", &numNames);
699 FSCANF_CHECK(scan_ret);
700 }
701 #ifdef ESYS_MPI
702 // Broadcast numNodes if there are multiple mpi procs
703 if (mpi_info->size > 1) {
704 MPI_Bcast(&numNames, 1, MPI_INT, 0, mpi_info->comm);
705 }
706 #endif
707
708 for (i = 0; i < numNames; i++) {
709 if(mpi_info->rank == 0) {
710 scan_ret = fscanf(fileHandle_p, "%d %d %s\n", &itmp, &(tag_info[0]), name);
711 FSCANF_CHECK(scan_ret);
712 //if (! (itmp == 2)) setError(IO_ERROR,"Mesh_readGmsh: expecting two entries per physical name.");
713 if ( strlen(name) < 3 ) setError(IO_ERROR,"Mesh_readGmsh: illegal tagname (\" missing?)");
714 if (! noError()) errorFlag=6 ;
715 name[strlen(name)-1]='\0';
716 }
717 //mpi broadcast the tag info
718
719 #ifdef ESYS_MPI
720 if (mpi_info->size > 1) {
721 if(mpi_info->rank==0){
722 tag_info[1]=strlen(name)+1;
723 } else{
724 tag_info[0]=0;
725 tag_info[1]=0;
726 }
727
728 MPI_Bcast(tag_info, 2, MPI_INT, 0, mpi_info->comm);
729 MPI_Bcast(&name, tag_info[1], MPI_CHAR, 0, mpi_info->comm); //strlen + 1 for null terminator
730 }
731 #endif
732 mesh_p->addTagMap(&name[1], tag_info[0]);
733 //fprintf(stderr,"elements errorFlag:%d on rank %d \n",errorFlag,mpi_info->rank);
734
735 }
736 //fprintf(stderr,"physical errorFlag:%d on rank %d \n",errorFlag,mpi_info->rank);
737 logicFlag=0;
738 }
739
740 //read closing tag $EndElements
741 if(mpi_info->rank==0 && errorFlag!=5) {
742 do {
743 if(fgets(line,LenString_MAX,fileHandle_p) == NULL) {
744 errorFlag = 1;
745 break;
746 }
747 } while( line[0]!='$' );
748 }
749
750 //handle errors
751 if(errorFlag){
752 switch(errorFlag) {
753 case 1: //early eof while scanning
754 throw FinleyAdapterException("early eof while scanning");
755 break;
756 case 2: //EOF before nodes section found
757 throw FinleyAdapterException("EOF before nodes section found");
758 break;
759 case 3:
760 throw FinleyAdapterException("EOF before elements section found");
761 break;
762 case 4: // throw error_msg
763 throw FinleyAdapterException(error_msg);
764 break;
765 case 5: // eof at apropriate time.
766 if(mpi_info->rank == 0) {
767 fclose(fileHandle_p);
768 }
769 break;
770 case 6:
771 fprintf(stderr,"returning NULL\n");
772 throw FinleyAdapterException("not noError");
773 return NULL;
774 default:
775 throw FinleyAdapterException("an unknown error has occured in readGmsh");
776 }
777 if(errorFlag == 5) {
778 break;
779 }
780 }
781
782
783 //end while loop
784 }
785
786 // clean up
787 if (!noError()) {
788 delete mesh_p;
789 return NULL;
790 }
791 // resolve id's
792 if (noError()) mesh_p->resolveNodeIds();
793 // rearrange elements
794 if (noError()) mesh_p->prepare(optimize);
795
796 if (!noError()) {
797 delete mesh_p;
798 return NULL;
799 }
800 return mesh_p;
801 }
802
803 } // namespace finley
804

Properties

Name Value
svn:mergeinfo /branches/diaplayground/finley/src/Mesh_readGmsh.cpp:4940-5147 /branches/lapack2681/finley/src/Mesh_readGmsh.cpp:2682-2741 /branches/pasowrap/finley/src/Mesh_readGmsh.cpp:3661-3674 /branches/py3_attempt2/finley/src/Mesh_readGmsh.cpp:3871-3891 /branches/restext/finley/src/Mesh_readGmsh.cpp:2610-2624 /branches/ripleygmg_from_3668/finley/src/Mesh_readGmsh.cpp:3669-3791 /branches/stage3.0/finley/src/Mesh_readGmsh.cpp:2569-2590 /branches/symbolic_from_3470/finley/src/Mesh_readGmsh.cpp:3471-3974 /release/3.0/finley/src/Mesh_readGmsh.cpp:2591-2601 /trunk/finley/src/Mesh_readGmsh.cpp:4257-4344

  ViewVC Help
Powered by ViewVC 1.1.26