18 |
/**************************************************************/ |
/**************************************************************/ |
19 |
|
|
20 |
/* Author: Paul Cochrane, cochrane@esscc.uq.edu.au */ |
/* Author: Paul Cochrane, cochrane@esscc.uq.edu.au */ |
21 |
/* MPI-IO version: Derick Hawcroft, hawcroft@gmail.com */ |
/* MPI-IO version: Derick Hawcroft, d.hawcroft@uq.edu.au */ |
22 |
|
|
23 |
/* Version: $Id$ */ |
/* Version: $Id$ */ |
24 |
|
|
29 |
#include "vtkCellType.h" /* copied from vtk source directory !!! */ |
#include "vtkCellType.h" /* copied from vtk source directory !!! */ |
30 |
|
|
31 |
/* |
/* |
32 |
|
MPI version notes: |
33 |
|
|
34 |
In the MPI version, the rank==0 process writes *all* opening and closing |
****************************************************************************** |
35 |
|
*** **** |
36 |
|
*** WARNING: Won't work for meshes with peridodic boundary conditions yet **** |
37 |
|
*** **** |
38 |
|
****************************************************************************** |
39 |
|
|
40 |
|
In this version, the rank==0 process writes *all* opening and closing |
41 |
XML tags. |
XML tags. |
42 |
Individual process data is copied to a buffer before being written |
Individual process data is copied to a buffer before being written |
43 |
out. The routines are collectively called and will be called in the natural |
out. The routines are collectively called and will be called in the natural |
44 |
ordering i.e 0 to maxProcs-1. |
ordering i.e 0 to maxProcs-1. |
45 |
|
|
46 |
*/ |
*/ |
47 |
|
|
48 |
#ifdef PASO_MPI |
#ifdef PASO_MPI |
49 |
|
|
50 |
|
|
51 |
//#define MPIO_HINTS |
//#define MPIO_HINTS |
52 |
|
|
53 |
; |
|
54 |
|
|
55 |
#define MPIO_DEBUG(str) \ |
#define MPIO_DEBUG(str) \ |
56 |
{ \ |
{ \ |
80 |
|
|
81 |
// Local element info (for debugging) |
// Local element info (for debugging) |
82 |
size_t numLocalCells, |
size_t numLocalCells, |
83 |
numInternalCells, |
numInternalCells, |
84 |
numBoundaryCells; |
numBoundaryCells; |
85 |
|
|
86 |
int rank; |
int rank; |
87 |
|
|
115 |
|
|
116 |
// Local node info |
// Local node info |
117 |
int numInternalNodes, |
int numInternalNodes, |
118 |
numLocalNodes; |
numLocalNodes, |
119 |
|
numBoundaryNodes, |
120 |
|
localDOF; |
121 |
|
|
122 |
|
|
123 |
nDim = mesh_p->Nodes->numDim; |
nDim = mesh_p->Nodes->numDim; |
124 |
|
|
139 |
#endif |
#endif |
140 |
|
|
141 |
// Holds a local node/element values to help minimize the number of times we need to loop & test |
// Holds a local node/element values to help minimize the number of times we need to loop & test |
142 |
struct localCache |
struct localIndexCache |
143 |
{ |
{ |
144 |
index_t *values; |
index_t *values; |
145 |
int size; |
int size; |
146 |
}; |
}; |
147 |
typedef struct localCache localCache; |
typedef struct localIndexCache localIndexCache; |
148 |
|
|
149 |
localCache nodeCache, |
localIndexCache nodeCache, |
150 |
elementCache; |
elementCache; |
151 |
|
|
152 |
// Collective Call |
// Collective Call |
153 |
MPI_File_open(mesh_p->Nodes->MPIInfo->comm, (char*)filename_p, amode,infoHints, &fh); |
MPI_File_open(mesh_p->Nodes->MPIInfo->comm, (char*)filename_p, amode,infoHints, &fh); |
298 |
} |
} |
299 |
|
|
300 |
numInternalNodes = dist->numInternal; |
numInternalNodes = dist->numInternal; |
301 |
int numBoundaryNodes = dist->numBoundary; |
numBoundaryNodes = dist->numBoundary; |
302 |
|
|
303 |
int localDOF = dist->numLocal; |
localDOF = dist->numLocal; |
304 |
|
|
305 |
numPoints = dist->numGlobal; |
numPoints = dist->numGlobal; |
306 |
|
|
326 |
return ; |
return ; |
327 |
} |
} |
328 |
|
|
|
|
|
329 |
numCells = elements->numElements; |
numCells = elements->numElements; |
330 |
numGlobalCells = elements->elementDistribution->vtxdist[gsize]; |
numGlobalCells = elements->elementDistribution->vtxdist[gsize]; |
331 |
numLocalCells = elements->elementDistribution->numLocal; |
numLocalCells = elements->elementDistribution->numLocal; |
462 |
|
|
463 |
MPIO_DEBUG(" Writing Coordinate Points... ") |
MPIO_DEBUG(" Writing Coordinate Points... ") |
464 |
|
|
465 |
numLocalNodes=0; |
numLocalNodes=localDOF; |
466 |
for (i = 0; i < mesh_p->Nodes->numNodes; i++) |
|
|
if( mesh_p->Nodes->degreeOfFreedom[i] < localDOF ) |
|
|
numLocalNodes++; |
|
|
|
|
|
/* index_t* NodeDist = MEMALLOC( gsize+1, index_t ); |
|
|
|
|
|
NodeDist[0] = 0; |
|
|
MPI_Allgather( &numLocalNodes, 1, MPI_INT, NodeDist+1, 1, MPI_INT, mesh_p->MPIInfo->comm ); |
|
|
for( i=0; i<gsize; i++ ) |
|
|
NodeDist[i+1] += NodeDist[i]; |
|
|
*/ |
|
|
|
|
467 |
// values vary from 13-14 chars hence the strlen() |
// values vary from 13-14 chars hence the strlen() |
468 |
char* largebuf = MEMALLOC( numLocalNodes*14*nDim + numLocalNodes*2 + 1 ,char); |
char* largebuf = MEMALLOC( numLocalNodes*14*nDim + numLocalNodes*2 + 1 ,char); |
469 |
largebuf[0] = '\0'; |
largebuf[0] = '\0'; |
474 |
|
|
475 |
index_t *vtxdist = NULL, *DOFNodes=NULL,*forwardBuffer=NULL,*backwardBuffer=NULL; |
index_t *vtxdist = NULL, *DOFNodes=NULL,*forwardBuffer=NULL,*backwardBuffer=NULL; |
476 |
|
|
477 |
DOFNodes = MEMALLOC(mesh_p->Nodes->numNodes,index_t); |
DOFNodes = MEMALLOC(mesh_p->Nodes->numNodes,index_t); |
478 |
|
nodeCache.values = MEMALLOC( numLocalNodes, index_t); |
479 |
/* we will allocate slightly more that what is needed */ |
index_t bc_pos = 0; |
|
nodeCache.values = MEMALLOC( numLocalNodes, index_t); |
|
480 |
for (i = 0; i < mesh_p->Nodes->numNodes; i++) |
for (i = 0; i < mesh_p->Nodes->numNodes; i++) |
481 |
|
|
482 |
{ |
{ |
483 |
// this is the bit that will break for periodic BCs because it assumes that there is a one to one |
// This is the bit that will break for periodic BCs because it assumes that there is a one to one |
484 |
// correspondance between nodes and DOF |
// correspondance between nodes and Degrees of freedom |
485 |
DOFNodes[mesh_p->Nodes->degreeOfFreedom[i]] = i; |
DOFNodes[mesh_p->Nodes->degreeOfFreedom[i]] = i; |
486 |
|
|
487 |
/* local node ?*/ |
/* local node ?*/ |
488 |
if( mesh_p->Nodes->degreeOfFreedom[i] < localDOF ) |
if( mesh_p->Nodes->degreeOfFreedom[i] < localDOF ) |
489 |
{ |
{ |
508 |
|
|
509 |
MPI_File_write_ordered(fh, largebuf,tsz, MPI_CHAR, &status); |
MPI_File_write_ordered(fh, largebuf,tsz, MPI_CHAR, &status); |
510 |
MEMFREE(largebuf); |
MEMFREE(largebuf); |
511 |
|
|
512 |
nodesGlobal = MEMALLOC(mesh_p->Nodes->numNodes,index_t); |
nodesGlobal = MEMALLOC(mesh_p->Nodes->numNodes ,index_t); |
513 |
|
|
514 |
// form distribution info on who output which nodes |
// form distribution info on who output which nodes |
515 |
vtxdist = MEMALLOC( gsize+1, index_t ); |
vtxdist = MEMALLOC( gsize+1, index_t ); |
519 |
vtxdist[i+1]+=vtxdist[i]; |
vtxdist[i+1]+=vtxdist[i]; |
520 |
|
|
521 |
// will not work for periodic boundary conditions |
// will not work for periodic boundary conditions |
|
|
|
522 |
// calculate the local nodes file positions |
// calculate the local nodes file positions |
523 |
pos = 0; |
pos = 0; |
524 |
for( i=0; i<mesh_p->Nodes->numNodes; i++ ) |
for( i=0; i<mesh_p->Nodes->numNodes; i++ ) |
533 |
|
|
534 |
// communicate the local Nodes file position to the interested parties |
// communicate the local Nodes file position to the interested parties |
535 |
// send local info |
// send local info |
|
|
|
536 |
forwardBuffer = MEMALLOC( mesh_p->Nodes->numNodes, index_t ); |
forwardBuffer = MEMALLOC( mesh_p->Nodes->numNodes, index_t ); |
|
|
|
537 |
for( n=0; n < dist->numNeighbours; n++ ) |
for( n=0; n < dist->numNeighbours; n++ ) |
538 |
{ |
{ |
539 |
if( dist->edges[n]->numForward) |
if( dist->edges[n]->numForward) |
562 |
MEMFREE(backwardBuffer); |
MEMFREE(backwardBuffer); |
563 |
MEMFREE(forwardBuffer); |
MEMFREE(forwardBuffer); |
564 |
|
|
|
|
|
|
/* |
|
|
char fn[20]; |
|
|
sprintf(fn,"info_%d.txt",myRank); |
|
|
|
|
|
FILE* fid = fopen(fn,"w"); |
|
|
fprintf(fid,"proc %d\n",myRank); |
|
|
fprintf(fid,"Nodes => numLoc = %d, numInternal = %d, numBoundary = %d \nCells => numLoc = %d, numInt=%d, numBd=%d\n",numLocalNodes,numInternalNodes, |
|
|
numBoundaryNodes,numLocalCells,numInternalCells,numBoundaryCells); |
|
|
*/ |
|
565 |
if( myRank == 0) |
if( myRank == 0) |
566 |
{ |
{ |
567 |
char* tags = "</DataArray>\n</Points>\n<Cells>\n<DataArray Name=\"connectivity\" type=\"Int32\" " \ |
char* tags = "</DataArray>\n</Points>\n<Cells>\n<DataArray Name=\"connectivity\" type=\"Int32\" " \ |
577 |
|
|
578 |
// Collective |
// Collective |
579 |
MPIO_DEBUG(" Writing Connectivity... ") |
MPIO_DEBUG(" Writing Connectivity... ") |
580 |
int cnt = 0; |
|
581 |
size_t cellBufsz = numCells*6*numVTKNodesPerElement + numCells; |
// TODO: Improve on upper bound |
582 |
char *cellBuf = MEMALLOC(cellBufsz,char); |
size_t sz = numLocalCells*6*numVTKNodesPerElement + numLocalCells; |
583 |
|
char *cellBuf = MEMALLOC(sz,char); |
584 |
cellBuf[0] = '\0'; |
cellBuf[0] = '\0'; |
585 |
tsz=0; |
tsz=0; |
|
|
|
586 |
pos = 0; |
pos = 0; |
587 |
//numLocalCells |
// numCells? |
588 |
elementCache.values = MEMALLOC(numCells,index_t); |
elementCache.values = MEMALLOC(numLocalCells,index_t); |
589 |
if (nodetype == FINLEY_REDUCED_DEGREES_OF_FREEDOM) |
if (nodetype == FINLEY_REDUCED_DEGREES_OF_FREEDOM) |
590 |
{ |
{ |
591 |
for (i = 0; i < numCells; i++) |
for (i = 0; i < numCells; i++) |
607 |
} |
} |
608 |
else if (VTK_QUADRATIC_HEXAHEDRON==cellType) |
else if (VTK_QUADRATIC_HEXAHEDRON==cellType) |
609 |
{ |
{ |
610 |
char tmpbuf2[20*20+8]; |
char tmpbuf2[20*20]; |
611 |
for (i = 0; i < numCells; i++) |
for (i = 0; i < numCells; i++) |
612 |
{ |
{ |
613 |
|
|
670 |
strcat(cellBuf,"\n"); |
strcat(cellBuf,"\n"); |
671 |
tsz+=1; |
tsz+=1; |
672 |
elementCache.values[pos++]=i; |
elementCache.values[pos++]=i; |
|
|
|
673 |
} |
} |
674 |
} |
} |
675 |
|
|
676 |
elementCache.size = pos; |
elementCache.size = pos; |
677 |
|
|
678 |
MPI_File_write_ordered(fh, cellBuf,tsz, MPI_CHAR, &status); |
MPI_File_write_ordered(fh, cellBuf,tsz, MPI_CHAR, &status); |
679 |
MEMFREE(cellBuf); |
MEMFREE(cellBuf); |
680 |
MPIO_DEBUG(" Done Writing Connectivity ") |
MPIO_DEBUG(" Done Writing Connectivity ") |
681 |
MPIO_DEBUG(" Writing Offsets... ") |
MPIO_DEBUG(" Writing Offsets & Types... ") |
682 |
|
|
683 |
// Non-Collective |
// Non-Collective |
684 |
if( myRank == 0) |
if( myRank == 0) |
694 |
int sz=0; |
int sz=0; |
695 |
int lg = log10(numGlobalCells * n) + 1; |
int lg = log10(numGlobalCells * n) + 1; |
696 |
sz += numGlobalCells*lg; |
sz += numGlobalCells*lg; |
697 |
sz += numGlobalCells; // #newlines |
sz += numGlobalCells; |
698 |
|
|
699 |
char* largebuf = MEMALLOC(sz + strlen(tag1) + strlen(tag2) + strlen(tag3) + strlen(tag4),char); |
char* largebuf = MEMALLOC(sz + strlen(tag1) + strlen(tag2) + strlen(tag3) + strlen(tag4),char); |
700 |
largebuf[0] ='\0'; |
largebuf[0] ='\0'; |
727 |
MEMFREE(largebuf); |
MEMFREE(largebuf); |
728 |
} |
} |
729 |
|
|
730 |
MPIO_DEBUG(" Done Writing Offsets ") |
MPIO_DEBUG(" Done Writing Offsets & Types ") |
731 |
|
|
732 |
// Write Point Data Header Tags |
// Write Point Data Header Tags |
733 |
if( myRank == 0) |
if( myRank == 0) |
1143 |
strcat(largebuf,"\n"); |
strcat(largebuf,"\n"); |
1144 |
tsz+=1; |
tsz+=1; |
1145 |
} |
} |
|
// Write out local data |
|
1146 |
MPI_File_write_ordered(fh,largebuf,tsz,MPI_CHAR,&status); |
MPI_File_write_ordered(fh,largebuf,tsz,MPI_CHAR,&status); |
1147 |
MEMFREE(largebuf); |
MEMFREE(largebuf); |
1148 |
if( myRank == 0) |
if( myRank == 0) |
1154 |
|
|
1155 |
} |
} |
1156 |
} |
} |
1157 |
|
// closing celldata tag |
|
// Closing Celldata tag |
|
1158 |
if(myRank == 0) |
if(myRank == 0) |
1159 |
{ |
{ |
1160 |
char* tag = "</CellData>\n"; |
char* tag = "</CellData>\n"; |
1180 |
#endif |
#endif |
1181 |
|
|
1182 |
MPI_File_close(&fh); |
MPI_File_close(&fh); |
|
|
|
|
// fclose(fid); |
|
1183 |
MPIO_DEBUG(" ***** Exit saveVTK ***** ") |
MPIO_DEBUG(" ***** Exit saveVTK ***** ") |
|
|
|
|
// MEMFREE( NodeDist ); |
|
1184 |
} |
} |
1185 |
|
|
1186 |
#undef MPIO_DEBUG |
#undef MPIO_DEBUG |