43 |
out. The routines are collectively called and will be called in the natural |
out. The routines are collectively called and will be called in the natural |
44 |
ordering i.e 0 to maxProcs-1. |
ordering i.e 0 to maxProcs-1. |
45 |
|
|
46 |
|
Notable Notables: |
47 |
|
the struct localIndexCache: stores local domain indices for faster reference |
48 |
*/ |
*/ |
49 |
|
|
50 |
#ifdef PASO_MPI |
#ifdef PASO_MPI |
62 |
|
|
63 |
void Finley_Mesh_saveVTK_MPIO(const char * filename_p, Finley_Mesh *mesh_p, const dim_t num_data,char* *names_p, escriptDataC* *data_pp) |
void Finley_Mesh_saveVTK_MPIO(const char * filename_p, Finley_Mesh *mesh_p, const dim_t num_data,char* *names_p, escriptDataC* *data_pp) |
64 |
{ |
{ |
|
double time0 = Paso_timer(); |
|
65 |
int numPoints, |
int numPoints, |
66 |
numCells = -1, |
numCells = -1, |
67 |
myRank,comm,gsize, |
myRank,comm,gsize, |
69 |
nDim, |
nDim, |
70 |
shape; |
shape; |
71 |
size_t __n; |
size_t __n; |
|
int* failSend; |
|
72 |
int i,j,k,m,n,count; |
int i,j,k,m,n,count; |
73 |
int numGlobalCells = 0; |
int numGlobalCells = 0; |
74 |
index_t *nodesGlobal=NULL; // used to get the connectivity right for VTK |
index_t *nodesGlobal=NULL; // used to get the connectivity right for VTK |
118 |
int numInternalNodes, |
int numInternalNodes, |
119 |
numLocalNodes, |
numLocalNodes, |
120 |
numBoundaryNodes, |
numBoundaryNodes, |
121 |
localDOF; |
localDOF; // equals to (DOF of Internal Nodes) + (DOF of Boundary Nodes) of local domain |
122 |
|
|
123 |
|
|
124 |
nDim = mesh_p->Nodes->numDim; |
nDim = mesh_p->Nodes->numDim; |
139 |
infoHints = MPI_INFO_NULL; |
infoHints = MPI_INFO_NULL; |
140 |
#endif |
#endif |
141 |
|
|
142 |
// Holds a local node/element index to their global arrays |
// Holds a local node/element index into the global array |
143 |
struct localIndexCache |
struct localIndexCache |
144 |
{ |
{ |
145 |
index_t *values; |
index_t *values; |
146 |
int size; |
int size; |
147 |
}; |
}; |
|
typedef struct localIndexCache localIndexCache; |
|
148 |
|
|
149 |
localIndexCache nodeCache, |
struct localIndexCache nodeCache, |
150 |
elementCache; |
elementCache; |
151 |
|
|
152 |
// Collective Call |
// Collective Call |
153 |
MPI_File_open(mesh_p->Nodes->MPIInfo->comm, (char*)filename_p, amode,infoHints, &fh); |
MPI_File_open(mesh_p->Nodes->MPIInfo->comm, (char*)filename_p, amode,infoHints, &fh); |
464 |
|
|
465 |
numLocalNodes=localDOF; |
numLocalNodes=localDOF; |
466 |
|
|
467 |
// values vary from 13-14 chars hence the strlen() |
// we will be writing values which vary from 13-15 chars hence the strlen() |
468 |
char* largebuf = MEMALLOC( numLocalNodes*14*nDim + numLocalNodes*2 + 1 ,char); |
char* largebuf = MEMALLOC( numLocalNodes*15*nDim + numLocalNodes*2 + 1 ,char); |
469 |
largebuf[0] = '\0'; |
largebuf[0] = '\0'; |
470 |
char tmpbuf[14]; |
char tmpbuf[15]; |
471 |
int tsz=0; |
int tsz=0; |
472 |
int numNodesOutput=0; |
int numNodesOutput=0; |
473 |
index_t pos=0; |
index_t pos=0; |
478 |
nodeCache.values = MEMALLOC( numLocalNodes, index_t); |
nodeCache.values = MEMALLOC( numLocalNodes, index_t); |
479 |
index_t bc_pos = 0; |
index_t bc_pos = 0; |
480 |
|
|
481 |
// Custom strcat: avoids expensive strlen(3) call by strcat(3) |
// Custom string concat: avoids expensive strlen(3) call by strcat(3) |
482 |
|
// Note the implicit assumption on the variable "tsz" |
483 |
int __len,__j; |
int __len,__j; |
484 |
char *zero = "0.000000e+00 "; |
char *zero = "0.000000e+00 "; |
485 |
char *newline = "\n"; |
char *newline = "\n"; |
486 |
#define __STRCAT(__buf,x) \ |
|
487 |
|
#define __STRCAT(dest,chunk,tsz) \ |
488 |
{ \ |
{ \ |
489 |
|
__len = strlen(chunk); \ |
490 |
__j = -1; \ |
__j = -1; \ |
491 |
while(__j++ < __len) \ |
while(__j++ < __len) \ |
492 |
*(__buf+tsz+__j)=*(x+__j); \ |
*(dest+tsz+__j)=*(chunk+__j); \ |
493 |
|
tsz+=__len; \ |
494 |
} |
} |
495 |
|
|
496 |
|
// Loop over all nodes |
497 |
for (i = 0; i < mesh_p->Nodes->numNodes; i++) |
for (i = 0; i < mesh_p->Nodes->numNodes; i++) |
498 |
{ |
{ |
499 |
// This is the bit that will break for periodic BCs because it assumes that there is a one to one |
// This is the bit that will break for periodic BCs because it assumes that there is a one to one |
500 |
// correspondance between nodes and Degrees of freedom |
// correspondance between nodes and Degrees of freedom |
501 |
|
//TODO: handle periodic BC's |
502 |
DOFNodes[mesh_p->Nodes->degreeOfFreedom[i]] = i; |
DOFNodes[mesh_p->Nodes->degreeOfFreedom[i]] = i; |
503 |
|
|
504 |
/* local node ?*/ |
// Is this node local to the domain ? |
505 |
if( mesh_p->Nodes->degreeOfFreedom[i] < localDOF ) |
if( mesh_p->Nodes->degreeOfFreedom[i] < localDOF ) |
506 |
{ |
{ |
507 |
for (j = 0; j < nDim; j++) |
for (j = 0; j < nDim; j++) |
508 |
{ |
{ |
509 |
sprintf(tmpbuf,"%e ", mesh_p->Nodes->Coordinates[INDEX2(j, i, nDim)] ); |
sprintf(tmpbuf,"%e ", mesh_p->Nodes->Coordinates[INDEX2(j, i, nDim)] ); |
510 |
__len = strlen(tmpbuf); |
__STRCAT(largebuf,tmpbuf,tsz) |
|
__STRCAT(largebuf,tmpbuf) |
|
|
tsz+=__len; |
|
511 |
} |
} |
512 |
for (k=0; k<3-nDim; k++) |
for (k=0; k<3-nDim; k++) |
513 |
{ |
{ |
514 |
__len = 13; |
__STRCAT(largebuf,zero,tsz) |
|
__STRCAT(largebuf,zero) |
|
|
tsz+=__len; |
|
515 |
} |
} |
516 |
__len=1; |
__STRCAT(largebuf,newline,tsz) |
|
__STRCAT(largebuf,newline) |
|
|
tsz += 1; |
|
517 |
nodeCache.values[numNodesOutput++]=i; |
nodeCache.values[numNodesOutput++]=i; |
518 |
} |
} |
519 |
} |
} |
567 |
{ |
{ |
568 |
Paso_CommBuffer_recv(mesh_p->Nodes->CommBuffer, dist->neighbours[n], sizeof(index_t)); |
Paso_CommBuffer_recv(mesh_p->Nodes->CommBuffer, dist->neighbours[n], sizeof(index_t)); |
569 |
Paso_CommBuffer_unpack(mesh_p->Nodes->CommBuffer, dist->neighbours[n], NULL, backwardBuffer, sizeof(index_t), 0 ); |
Paso_CommBuffer_unpack(mesh_p->Nodes->CommBuffer, dist->neighbours[n], NULL, backwardBuffer, sizeof(index_t), 0 ); |
570 |
|
/* TODO: voodoo to handle perdiodic BC's */ |
571 |
for( i=0; i<dist->edges[n]->numBackward; i++ ) |
for( i=0; i<dist->edges[n]->numBackward; i++ ) |
572 |
nodesGlobal[DOFNodes[dist->edges[n]->indexBackward[i] ]] = backwardBuffer[i]; |
nodesGlobal[DOFNodes[dist->edges[n]->indexBackward[i] ]] = backwardBuffer[i]; |
573 |
} |
} |
574 |
} |
} |
575 |
|
|
576 |
|
|
577 |
|
|
578 |
MEMFREE(vtxdist); |
MEMFREE(vtxdist); |
579 |
MEMFREE(DOFNodes); |
MEMFREE(DOFNodes); |
580 |
MEMFREE(backwardBuffer); |
MEMFREE(backwardBuffer); |
596 |
// Collective |
// Collective |
597 |
MPIO_DEBUG(" Writing Connectivity... ") |
MPIO_DEBUG(" Writing Connectivity... ") |
598 |
|
|
|
// TODO: Improve on upper bound , will fail for very very large meshes!! |
|
599 |
size_t sz = numLocalCells*6*numVTKNodesPerElement + numLocalCells; |
size_t sz = numLocalCells*6*numVTKNodesPerElement + numLocalCells; |
600 |
largebuf = MEMALLOC(sz,char); |
largebuf = MEMALLOC(sz,char); |
601 |
largebuf[0] = '\0'; |
largebuf[0] = '\0'; |
613 |
for (j = 0; j < numVTKNodesPerElement; j++) |
for (j = 0; j < numVTKNodesPerElement; j++) |
614 |
{ |
{ |
615 |
sprintf(tmpbuf,"%d ",nodesGlobal[mesh_p->Nodes->toReduced[elements->Nodes[INDEX2(elements->ReferenceElement->Type->linearNodes[j], i, NN)]]]); |
sprintf(tmpbuf,"%d ",nodesGlobal[mesh_p->Nodes->toReduced[elements->Nodes[INDEX2(elements->ReferenceElement->Type->linearNodes[j], i, NN)]]]); |
616 |
__len=strlen(tmpbuf); |
__STRCAT(largebuf,tmpbuf,tsz) |
617 |
__STRCAT(largebuf,tmpbuf) |
} |
618 |
tsz+=__len; |
__STRCAT(largebuf,newline,tsz) |
|
} |
|
|
__len=1; |
|
|
__STRCAT(largebuf,newline) |
|
|
tsz+=1; |
|
|
|
|
619 |
elementCache.values[pos++]=i; |
elementCache.values[pos++]=i; |
620 |
} |
} |
621 |
} |
} |
622 |
} |
} |
623 |
else if (VTK_QUADRATIC_HEXAHEDRON==cellType) |
else if (VTK_QUADRATIC_HEXAHEDRON==cellType) |
624 |
{ |
{ |
625 |
char tmpbuf2[20*20]; |
char tmpbuf2[20*20*2]; |
626 |
|
|
627 |
for (i = 0; i < numCells; i++) |
for (i = 0; i < numCells; i++) |
628 |
{ |
{ |
649 |
nodesGlobal[elements->Nodes[INDEX2(13, i, NN)]], |
nodesGlobal[elements->Nodes[INDEX2(13, i, NN)]], |
650 |
nodesGlobal[elements->Nodes[INDEX2(14, i, NN)]], |
nodesGlobal[elements->Nodes[INDEX2(14, i, NN)]], |
651 |
nodesGlobal[elements->Nodes[INDEX2(15, i, NN)]]); |
nodesGlobal[elements->Nodes[INDEX2(15, i, NN)]]); |
652 |
__len=strlen(tmpbuf2); |
__STRCAT(largebuf,tmpbuf2,tsz) |
|
__STRCAT(largebuf,tmpbuf2) |
|
|
tsz+=__len; |
|
653 |
elementCache.values[pos++]=i; |
elementCache.values[pos++]=i; |
654 |
} |
} |
655 |
} |
} |
659 |
|
|
660 |
for (i = 0; i < numCells; i++) |
for (i = 0; i < numCells; i++) |
661 |
{ |
{ |
662 |
for (j = 0; j < numVTKNodesPerElement; j++) |
if (elements->Id[i] >= elements->elementDistribution->vtxdist[i] && elements->Id[i] <= elements->elementDistribution->vtxdist[i+1] - 1 ) |
663 |
{ |
{ |
664 |
sprintf(tmpbuf,"%d ", nodesGlobal[elements->Nodes[INDEX2(elements->ReferenceElement->Type->geoNodes[j], i, NN)]]); |
for (j = 0; j < numVTKNodesPerElement; j++) |
665 |
__len=strlen(tmpbuf); |
{ |
666 |
__STRCAT(largebuf,tmpbuf) |
sprintf(tmpbuf,"%d ", nodesGlobal[elements->Nodes[INDEX2(elements->ReferenceElement->Type->geoNodes[j], i, NN)]]); |
667 |
tsz+=__len; |
__STRCAT(largebuf,tmpbuf,tsz) |
668 |
|
} |
669 |
|
__STRCAT(largebuf,newline,tsz) |
670 |
|
elementCache.values[pos++]=i; |
671 |
} |
} |
|
__len=1; |
|
|
__STRCAT(largebuf,newline) |
|
|
tsz+=1; |
|
|
elementCache.values[pos++]=i; |
|
672 |
} |
} |
673 |
} |
} |
674 |
else |
else |
675 |
{ |
{ |
|
|
|
676 |
for(i = 0;i < numCells ; i++) |
for(i = 0;i < numCells ; i++) |
677 |
{ |
{ |
|
// is this element in domain of process with "myRank" |
|
|
|
|
678 |
if( elements->Id[i] >= elements->elementDistribution->vtxdist[myRank] && elements->Id[i] <= elements->elementDistribution->vtxdist[myRank+1]-1) |
if( elements->Id[i] >= elements->elementDistribution->vtxdist[myRank] && elements->Id[i] <= elements->elementDistribution->vtxdist[myRank+1]-1) |
679 |
{ |
{ |
680 |
for (j = 0; j < numVTKNodesPerElement; j++) |
for (j = 0; j < numVTKNodesPerElement; j++) |
681 |
{ |
{ |
682 |
sprintf(tmpbuf,"%d ", nodesGlobal[ elements->Nodes[INDEX2(j, i, NN) ] ] ); |
sprintf(tmpbuf,"%d ", nodesGlobal[ elements->Nodes[INDEX2(j, i, NN) ] ] ); |
683 |
__len=strlen(tmpbuf); |
__STRCAT(largebuf,tmpbuf,tsz) |
684 |
__STRCAT(largebuf,tmpbuf) |
} |
685 |
tsz+=__len; |
__STRCAT(largebuf,newline,tsz) |
|
} |
|
|
__len=1; |
|
|
__STRCAT(largebuf,newline) |
|
|
tsz+=1; |
|
686 |
elementCache.values[pos++]=i; |
elementCache.values[pos++]=i; |
687 |
} |
} |
688 |
} |
} |
689 |
} |
} |
690 |
|
|
691 |
elementCache.size = pos; |
elementCache.size = pos; |
692 |
|
|
693 |
|
largebuf[tsz] = '\0'; |
694 |
MPI_File_write_ordered(fh,largebuf,tsz, MPI_CHAR, &status); |
MPI_File_write_ordered(fh,largebuf,tsz, MPI_CHAR, &status); |
695 |
MEMFREE(largebuf); |
MEMFREE(largebuf); |
696 |
MPIO_DEBUG(" Done Writing Connectivity ") |
MPIO_DEBUG(" Done Writing Connectivity ") |
707 |
|
|
708 |
int n = numVTKNodesPerElement; |
int n = numVTKNodesPerElement; |
709 |
|
|
710 |
|
// allocate an upper bound on number of bytes needed |
711 |
int sz=0; |
int sz=0; |
712 |
int lg = log10(numGlobalCells * n) + 1; |
int lg = log10(numGlobalCells * n) + 1; |
713 |
sz += numGlobalCells*lg; |
sz += numGlobalCells*lg; |
714 |
sz += numGlobalCells; |
sz += numGlobalCells; |
715 |
tsz = 0; |
tsz = 0; |
716 |
|
|
717 |
char* largebuf = MEMALLOC(sz + strlen(tag1) + strlen(tag2) + strlen(tag3) + strlen(tag4),char); |
char* largebuf = MEMALLOC(sz + strlen(tag1) + strlen(tag2) + strlen(tag3) + strlen(tag4),char); |
718 |
largebuf[0] ='\0'; |
largebuf[0] ='\0'; |
719 |
char tmp[10]; |
char tmp[10]; |
720 |
|
__STRCAT(largebuf,tag1,tsz) |
|
__len = strlen(tag1); |
|
|
__STRCAT(largebuf,tag1) |
|
|
tsz += __len; |
|
|
|
|
721 |
for (i=numVTKNodesPerElement; i<=numGlobalCells*numVTKNodesPerElement; i+=numVTKNodesPerElement) |
for (i=numVTKNodesPerElement; i<=numGlobalCells*numVTKNodesPerElement; i+=numVTKNodesPerElement) |
722 |
{ |
{ |
723 |
sprintf(tmp,"%d\n", i); |
sprintf(tmp,"%d\n", i); |
724 |
__len=strlen(tmp); |
__STRCAT(largebuf,tmp,tsz) |
|
__STRCAT(largebuf,tmp) |
|
|
tsz+=__len; |
|
725 |
} |
} |
726 |
|
__STRCAT(largebuf,tag2,tsz) |
727 |
__len=strlen(tag2); |
largebuf[tsz] = '\0'; |
|
__STRCAT(largebuf,tag2) |
|
|
tsz+=__len; |
|
|
|
|
728 |
MPI_File_iwrite_shared(fh,largebuf, tsz,MPI_CHAR,&req); |
MPI_File_iwrite_shared(fh,largebuf, tsz,MPI_CHAR,&req); |
729 |
MPI_Wait(&req,&status); |
MPI_Wait(&req,&status); |
730 |
|
|
731 |
// Re-using buffer!! |
// re-using buffer! |
732 |
largebuf[0] = '\0'; |
largebuf[0] = '\0'; |
733 |
tsz = 0; |
tsz = 0; |
734 |
__len = strlen(tag3); |
__STRCAT(largebuf,tag3,tsz) |
|
__STRCAT(largebuf,tag3) |
|
|
tsz+=__len; |
|
735 |
for (i=0; i<numGlobalCells; i++) |
for (i=0; i<numGlobalCells; i++) |
736 |
{ |
{ |
737 |
sprintf(tmp, "%d\n", cellType); |
sprintf(tmp, "%d\n", cellType); |
738 |
__len=strlen(tmp); |
__STRCAT(largebuf,tmp,tsz) |
|
__STRCAT(largebuf,tmp) |
|
|
tsz+=__len; |
|
739 |
} |
} |
740 |
__len=strlen(tag4); |
__STRCAT(largebuf,tag4,tsz) |
741 |
__STRCAT(largebuf,tag4) |
largebuf[tsz] = '\0'; |
|
tsz+=__len; |
|
|
|
|
742 |
MPI_File_iwrite_shared(fh,largebuf,tsz,MPI_CHAR,&req); |
MPI_File_iwrite_shared(fh,largebuf,tsz,MPI_CHAR,&req); |
743 |
MPI_Wait(&req,&status); |
MPI_Wait(&req,&status); |
744 |
MEMFREE(largebuf); |
MEMFREE(largebuf); |
746 |
|
|
747 |
MPIO_DEBUG(" Done Writing Offsets & Types ") |
MPIO_DEBUG(" Done Writing Offsets & Types ") |
748 |
|
|
749 |
// Write Point Data Header Tags |
// Write Cell data header Tags |
750 |
if( myRank == 0) |
if(myRank == 0) |
751 |
{ |
{ |
752 |
char header[600]; |
MPIO_DEBUG(" Writing Cell Data ...") |
753 |
char tmpBuf[50]; |
if( write_celldata) |
|
|
|
|
if (write_pointdata) |
|
754 |
{ |
{ |
755 |
MPIO_DEBUG(" Writing Pointdata... ") |
char tmpBuf[80]; |
756 |
|
char header[600]; |
757 |
// mark the active data arrays |
// mark the active data arrays |
758 |
bool_t set_scalar=FALSE,set_vector=FALSE, set_tensor=FALSE; |
bool_t set_scalar=FALSE,set_vector=FALSE, set_tensor=FALSE; |
759 |
sprintf(header, "<PointData"); |
sprintf(tmpBuf, "<CellData"); |
760 |
|
strcat(header,tmpBuf); |
761 |
for (i_data =0 ;i_data<num_data;++i_data) |
for (i_data =0 ;i_data<num_data;++i_data) |
762 |
{ |
{ |
763 |
if (! isEmpty(data_pp[i_data]) && !isCellCentered[i_data]) |
if (! isEmpty(data_pp[i_data]) && isCellCentered[i_data]) |
764 |
{ |
{ |
765 |
// if the rank == 0: --> scalar data |
// if the rank == 0: --> scalar data |
766 |
// if the rank == 1: --> vector data |
// if the rank == 1: --> vector data |
780 |
if (! set_vector) |
if (! set_vector) |
781 |
{ |
{ |
782 |
sprintf(tmpBuf," Vectors=\"%s\"",names_p[i_data]); |
sprintf(tmpBuf," Vectors=\"%s\"",names_p[i_data]); |
783 |
strcat(header,tmpBuf); |
strcat(header,tmpBuf); |
784 |
set_vector=TRUE; |
set_vector=TRUE; |
785 |
} |
} |
786 |
break; |
break; |
788 |
if (! set_tensor) |
if (! set_tensor) |
789 |
{ |
{ |
790 |
sprintf(tmpBuf," Tensors=\"%s\"",names_p[i_data]); |
sprintf(tmpBuf," Tensors=\"%s\"",names_p[i_data]); |
791 |
strcat(header,tmpBuf); |
strcat(header,tmpBuf); |
792 |
set_tensor=TRUE; |
set_tensor=TRUE; |
793 |
} |
} |
794 |
break; |
break; |
805 |
} |
} |
806 |
} |
} |
807 |
|
|
808 |
// write actual data |
// write actual data (collective) |
809 |
if(write_pointdata) |
if(write_celldata) |
810 |
{ |
{ |
811 |
for (i_data =0 ;i_data<num_data;++i_data) |
for (i_data =0 ;i_data<num_data;++i_data) |
812 |
{ |
{ |
813 |
if (! isEmpty(data_pp[i_data]) && !isCellCentered[i_data]) |
if (! isEmpty(data_pp[i_data]) && isCellCentered[i_data]) |
814 |
{ |
{ |
815 |
numPointsPerSample = elements->ReferenceElement->numQuadNodes; |
numPointsPerSample = elements->ReferenceElement->numQuadNodes; |
816 |
rank = getDataPointRank(data_pp[i_data]); |
rank = getDataPointRank(data_pp[i_data]); |
845 |
if( myRank == 0) |
if( myRank == 0) |
846 |
{ |
{ |
847 |
char header[250]; |
char header[250]; |
848 |
sprintf(header, "<DataArray Name=\"%s\" type=\"Float32\" NumberOfComponents=\"%d\" format=\"ascii\">\n",names_p[i_data], nCompReqd); |
sprintf(header,"<DataArray Name=\"%s\" type=\"Float32\" NumberOfComponents=\"%d\" format=\"ascii\">\n",names_p[i_data], nCompReqd); |
849 |
MPI_File_iwrite_shared(fh,header,strlen(header),MPI_CHAR,&req); |
MPI_File_iwrite_shared(fh,header,strlen(header),MPI_CHAR,&req); |
850 |
MPI_Wait(&req,&status); |
MPI_Wait(&req,&status); |
851 |
} |
} |
|
// write out the data |
|
|
// if the number of required components is more than the number |
|
|
// of actual components, pad with zeros |
|
852 |
|
|
853 |
char tmpbuf[14]; |
// Write the actual data |
854 |
char* largebuf = MEMALLOC(nCompReqd*numLocalNodes*14 + numLocal*nCompReqd + nCompReqd + 14,char); |
char tmpbuf[15]; |
855 |
|
char* largebuf = MEMALLOC(nCompReqd*numLocalCells*15 + numLocalCells*nCompReqd + nCompReqd + 15,char); |
856 |
largebuf[0] = '\0'; |
largebuf[0] = '\0'; |
|
bool_t do_write=TRUE; |
|
857 |
size_t tsz = 0; |
size_t tsz = 0; |
858 |
|
|
859 |
for(k=0;k < nodeCache.size;k++) |
double sampleAvg[nComp]; |
860 |
|
|
861 |
|
for (k=0; k<elementCache.size; k++) |
862 |
{ |
{ |
863 |
i = nodeCache.values[k]; |
i = elementCache.values[k]; |
864 |
|
|
865 |
if (nodetype==FINLEY_REDUCED_DEGREES_OF_FREEDOM) |
values = getSampleData(data_pp[i_data], i); |
866 |
|
// averaging over the number of points in the sample |
867 |
|
for (n=0; n<nComp; n++) |
868 |
{ |
{ |
869 |
if (mesh_p->Nodes->toReduced[i]>=0) |
rtmp = 0.; |
870 |
{ |
for (j=0; j<numPointsPerSample; j++) rtmp += values[INDEX2(n,j,nComp)]; |
871 |
switch(getFunctionSpaceType(data_pp[i_data])) |
sampleAvg[k] = rtmp/numPointsPerSample; |
|
{ |
|
|
case FINLEY_DEGREES_OF_FREEDOM: |
|
|
values = getSampleData(data_pp[i_data],mesh_p->Nodes->degreeOfFreedom[i]); |
|
|
break; |
|
|
case FINLEY_REDUCED_DEGREES_OF_FREEDOM: |
|
|
values = getSampleData(data_pp[i_data],mesh_p->Nodes->reducedDegreeOfFreedom[i]); |
|
|
break; |
|
|
case FINLEY_NODES: |
|
|
values = getSampleData(data_pp[i_data],i); |
|
|
break; |
|
|
} |
|
|
do_write=TRUE; |
|
|
} |
|
|
else |
|
|
{ |
|
|
do_write=FALSE; |
|
|
} |
|
872 |
} |
} |
873 |
else |
// if the number of required components is more than the number |
874 |
|
// of actual components, pad with zeros |
875 |
|
|
876 |
|
// probably only need to get shape of first element |
877 |
|
// write the data different ways for scalar, vector and tensor |
878 |
|
if (nCompReqd == 1) |
879 |
{ |
{ |
880 |
do_write=TRUE; |
sprintf(tmpbuf, " %e", sampleAvg[0]); |
881 |
switch(getFunctionSpaceType(data_pp[i_data])) |
__STRCAT(largebuf,tmpbuf,tsz) |
|
{ |
|
|
case FINLEY_DEGREES_OF_FREEDOM: |
|
|
values = getSampleData(data_pp[i_data],mesh_p->Nodes->degreeOfFreedom[i]); |
|
|
break; |
|
|
case FINLEY_NODES: |
|
|
values = getSampleData(data_pp[i_data],i); |
|
|
break; |
|
|
} |
|
882 |
} |
} |
883 |
// write the data different ways for scalar, vector and tensor |
else if (nCompReqd == 3) |
|
if (do_write) |
|
884 |
{ |
{ |
885 |
if (nCompReqd == 1) |
// write out the data |
886 |
|
for (m=0; m<shape; m++) |
887 |
{ |
{ |
888 |
sprintf(tmpbuf," %e", values[0]); |
sprintf(tmpbuf, " %e", sampleAvg[m]); |
889 |
__len=strlen(tmpbuf); |
__STRCAT(largebuf,tmpbuf,tsz) |
|
__STRCAT(largebuf,tmpbuf) |
|
|
tsz+=__len; |
|
890 |
} |
} |
891 |
else if (nCompReqd == 3) |
for (m=0; m<nCompReqd-shape; m++) |
892 |
{ |
{ |
893 |
for (m=0; m<shape; m++) |
__STRCAT(largebuf,zero,tsz) |
|
{ |
|
|
|
|
|
sprintf(tmpbuf," %e",values[m]); |
|
|
__len=strlen(tmpbuf); |
|
|
__STRCAT(largebuf,tmpbuf) |
|
|
tsz+=__len; |
|
|
} |
|
|
for (m=0; m<nCompReqd-shape; m++) |
|
|
{ |
|
|
__len=13; |
|
|
__STRCAT(largebuf,zero) |
|
|
tsz+=__len; |
|
|
} |
|
894 |
} |
} |
895 |
else if (nCompReqd == 9) |
} |
896 |
|
else if (nCompReqd == 9) |
897 |
|
{ |
898 |
|
// tensor data, so have a 3x3 matrix to output as a row |
899 |
|
// of 9 data points |
900 |
|
count = 0; |
901 |
|
for (m=0; m<shape; m++) |
902 |
{ |
{ |
903 |
// tensor data, so have a 3x3 matrix to output as a row |
for (n=0; n<shape; n++) |
|
// of 9 data points |
|
|
count = 0; |
|
|
for (m=0; m<shape; m++) |
|
904 |
{ |
{ |
905 |
for (n=0; n<shape; n++) |
sprintf(tmpbuf, " %e", sampleAvg[count]); |
906 |
{ |
__STRCAT(largebuf,tmpbuf,tsz) |
907 |
sprintf(tmpbuf," %e", values[count]); |
count++; |
|
__len=strlen(tmpbuf); |
|
|
__STRCAT(largebuf,tmpbuf) |
|
|
tsz+=__len; |
|
|
count++; |
|
|
} |
|
|
for (n=0; n<3-shape; n++) |
|
|
{ |
|
|
__len=13; |
|
|
__STRCAT(largebuf,zero) |
|
|
tsz+=__len; |
|
|
} |
|
908 |
} |
} |
909 |
for (m=0; m<3-shape; m++) |
for (n=0; n<3-shape; n++) |
910 |
{ |
{ |
911 |
for (n=0; n<3; n++) |
__STRCAT(largebuf,zero,tsz) |
|
{ |
|
|
__len=13; |
|
|
__STRCAT(largebuf,zero) |
|
|
tsz+=__len; |
|
|
|
|
|
} |
|
912 |
} |
} |
913 |
} |
} |
914 |
__len=1; |
for (m=0; m<3-shape; m++) |
915 |
__STRCAT(largebuf,newline) |
for (n=0; n<3; n++) |
916 |
tsz+=1; |
{ |
917 |
|
__STRCAT(largebuf,zero,tsz) |
918 |
|
} |
919 |
} |
} |
920 |
|
__STRCAT(largebuf,newline,tsz) |
921 |
} |
} |
922 |
// Write out local data |
largebuf[tsz] = '\0'; |
923 |
MPI_File_write_ordered(fh,largebuf,tsz,MPI_CHAR,&status); |
MPI_File_write_ordered(fh,largebuf,tsz,MPI_CHAR,&status); |
924 |
MEMFREE(largebuf); |
MEMFREE(largebuf); |
925 |
if( myRank == 0) |
if( myRank == 0) |
928 |
MPI_File_iwrite_shared(fh,tag,strlen(tag),MPI_CHAR,&req); |
MPI_File_iwrite_shared(fh,tag,strlen(tag),MPI_CHAR,&req); |
929 |
MPI_Wait(&req,&status); |
MPI_Wait(&req,&status); |
930 |
} |
} |
931 |
|
|
932 |
} |
} |
933 |
} |
} |
934 |
// Finish off with closing tag |
// closing celldata tag |
935 |
if(myRank == 0) |
if(myRank == 0) |
936 |
{ |
{ |
937 |
char* tag = "</PointData>\n"; |
char* tag = "</CellData>\n"; |
938 |
MPI_File_iwrite_shared(fh,tag,strlen(tag),MPI_CHAR,&req); |
MPI_File_iwrite_shared(fh,tag,strlen(tag),MPI_CHAR,&req); |
939 |
MPI_Wait(&req,&status); |
MPI_Wait(&req,&status); |
940 |
} |
} |
941 |
MPIO_DEBUG(" Done Writing Pointdata ") |
|
942 |
|
MPIO_DEBUG(" Done Writing Cell Data ") |
943 |
} |
} |
|
// end write_pointdata |
|
944 |
|
|
945 |
// Write Cell data header Tags |
|
946 |
if(myRank == 0) |
// Write Point Data Header Tags |
947 |
|
if( myRank == 0) |
948 |
{ |
{ |
949 |
if( write_celldata) |
char header[600]; |
950 |
|
char tmpBuf[50]; |
951 |
|
|
952 |
|
if (write_pointdata) |
953 |
{ |
{ |
954 |
char tmpBuf[80]; |
MPIO_DEBUG(" Writing Pointdata... ") |
|
char header[600]; |
|
955 |
// mark the active data arrays |
// mark the active data arrays |
956 |
bool_t set_scalar=FALSE,set_vector=FALSE, set_tensor=FALSE; |
bool_t set_scalar=FALSE,set_vector=FALSE, set_tensor=FALSE; |
957 |
sprintf(tmpBuf, "<CellData"); |
sprintf(header, "<PointData"); |
|
strcat(header,tmpBuf); |
|
958 |
for (i_data =0 ;i_data<num_data;++i_data) |
for (i_data =0 ;i_data<num_data;++i_data) |
959 |
{ |
{ |
960 |
if (! isEmpty(data_pp[i_data]) && isCellCentered[i_data]) |
if (! isEmpty(data_pp[i_data]) && !isCellCentered[i_data]) |
961 |
{ |
{ |
962 |
// if the rank == 0: --> scalar data |
// if the rank == 0: --> scalar data |
963 |
// if the rank == 1: --> vector data |
// if the rank == 1: --> vector data |
977 |
if (! set_vector) |
if (! set_vector) |
978 |
{ |
{ |
979 |
sprintf(tmpBuf," Vectors=\"%s\"",names_p[i_data]); |
sprintf(tmpBuf," Vectors=\"%s\"",names_p[i_data]); |
980 |
|
strcat(header,tmpBuf); |
981 |
set_vector=TRUE; |
set_vector=TRUE; |
982 |
} |
} |
983 |
break; |
break; |
985 |
if (! set_tensor) |
if (! set_tensor) |
986 |
{ |
{ |
987 |
sprintf(tmpBuf," Tensors=\"%s\"",names_p[i_data]); |
sprintf(tmpBuf," Tensors=\"%s\"",names_p[i_data]); |
988 |
|
strcat(header,tmpBuf); |
989 |
set_tensor=TRUE; |
set_tensor=TRUE; |
990 |
} |
} |
991 |
break; |
break; |
1002 |
} |
} |
1003 |
} |
} |
1004 |
|
|
1005 |
// write actual data (collective) |
// write actual data |
1006 |
if(write_celldata) |
if(write_pointdata) |
1007 |
{ |
{ |
1008 |
for (i_data =0 ;i_data<num_data;++i_data) |
for (i_data =0 ;i_data<num_data;++i_data) |
1009 |
{ |
{ |
1010 |
if (! isEmpty(data_pp[i_data]) && isCellCentered[i_data]) |
if (! isEmpty(data_pp[i_data]) && !isCellCentered[i_data]) |
1011 |
{ |
{ |
1012 |
numPointsPerSample = elements->ReferenceElement->numQuadNodes; |
numPointsPerSample = elements->ReferenceElement->numQuadNodes; |
1013 |
rank = getDataPointRank(data_pp[i_data]); |
rank = getDataPointRank(data_pp[i_data]); |
1042 |
if( myRank == 0) |
if( myRank == 0) |
1043 |
{ |
{ |
1044 |
char header[250]; |
char header[250]; |
1045 |
sprintf(header,"<DataArray Name=\"%s\" type=\"Float32\" NumberOfComponents=\"%d\" format=\"ascii\">\n",names_p[i_data], nCompReqd); |
sprintf(header, "<DataArray Name=\"%s\" type=\"Float32\" NumberOfComponents=\"%d\" format=\"ascii\">\n",names_p[i_data], nCompReqd); |
1046 |
MPI_File_iwrite_shared(fh,header,strlen(header),MPI_CHAR,&req); |
MPI_File_iwrite_shared(fh,header,strlen(header),MPI_CHAR,&req); |
1047 |
MPI_Wait(&req,&status); |
MPI_Wait(&req,&status); |
1048 |
} |
} |
1049 |
|
// write out the data |
1050 |
|
// if the number of required components is more than the number |
1051 |
|
// of actual components, pad with zeros |
1052 |
|
|
1053 |
// Write the actual data */ |
char tmpbuf[15]; |
1054 |
char tmpbuf[14]; |
char* largebuf = MEMALLOC(nCompReqd*numLocalNodes*15 + numLocal*nCompReqd + nCompReqd + 15,char); |
|
char* largebuf = MEMALLOC(nCompReqd*numLocalCells*14 + numLocalCells*nCompReqd + nCompReqd + 14,char); |
|
1055 |
largebuf[0] = '\0'; |
largebuf[0] = '\0'; |
1056 |
|
bool_t do_write=TRUE; |
1057 |
size_t tsz = 0; |
size_t tsz = 0; |
1058 |
|
|
1059 |
double sampleAvg[nComp]; |
for(k=0;k < nodeCache.size;k++) |
|
|
|
|
for (k=0; i<elementCache.size; k++) |
|
1060 |
{ |
{ |
1061 |
i = elementCache.values[k]; |
i = nodeCache.values[k]; |
|
|
|
|
values = getSampleData(data_pp[i_data], i); |
|
|
// averaging over the number of points in the sample |
|
|
for (k=0; k<nComp; k++) |
|
|
{ |
|
|
rtmp = 0.; |
|
|
for (j=0; j<numPointsPerSample; j++) rtmp += values[INDEX2(k,j,nComp)]; |
|
|
sampleAvg[k] = rtmp/numPointsPerSample; |
|
|
} |
|
|
// if the number of required components is more than the number |
|
|
// of actual components, pad with zeros |
|
1062 |
|
|
1063 |
// probably only need to get shape of first element |
if (nodetype==FINLEY_REDUCED_DEGREES_OF_FREEDOM) |
|
// write the data different ways for scalar, vector and tensor |
|
|
if (nCompReqd == 1) |
|
|
{ |
|
|
sprintf(tmpbuf, " %e", sampleAvg[0]); |
|
|
__len=strlen(tmpbuf); |
|
|
__STRCAT(largebuf,tmpbuf) |
|
|
tsz+=__len; |
|
|
} |
|
|
else if (nCompReqd == 3) |
|
1064 |
{ |
{ |
1065 |
// write out the data |
if (mesh_p->Nodes->toReduced[i]>=0) |
|
for (m=0; m<shape; m++) |
|
1066 |
{ |
{ |
1067 |
sprintf(tmpbuf, " %e", sampleAvg[m]); |
switch(getFunctionSpaceType(data_pp[i_data])) |
1068 |
__len=strlen(tmpbuf); |
{ |
1069 |
__STRCAT(largebuf,tmpbuf) |
case FINLEY_DEGREES_OF_FREEDOM: |
1070 |
tsz+=__len; |
values = getSampleData(data_pp[i_data],mesh_p->Nodes->degreeOfFreedom[i]); |
1071 |
|
break; |
1072 |
|
case FINLEY_REDUCED_DEGREES_OF_FREEDOM: |
1073 |
|
values = getSampleData(data_pp[i_data],mesh_p->Nodes->reducedDegreeOfFreedom[i]); |
1074 |
|
break; |
1075 |
|
case FINLEY_NODES: |
1076 |
|
values = getSampleData(data_pp[i_data],i); |
1077 |
|
break; |
1078 |
|
} |
1079 |
|
do_write=TRUE; |
1080 |
} |
} |
1081 |
for (m=0; m<nCompReqd-shape; m++) |
else |
1082 |
{ |
{ |
1083 |
tsz+=13; |
do_write=FALSE; |
|
strcat(largebuf," 0.000000e+00"); |
|
|
|
|
1084 |
} |
} |
1085 |
} |
} |
1086 |
else if (nCompReqd == 9) |
else |
1087 |
{ |
{ |
1088 |
// tensor data, so have a 3x3 matrix to output as a row |
do_write=TRUE; |
1089 |
// of 9 data points |
switch(getFunctionSpaceType(data_pp[i_data])) |
|
count = 0; |
|
|
for (m=0; m<shape; m++) |
|
1090 |
{ |
{ |
1091 |
for (n=0; n<shape; n++) |
case FINLEY_DEGREES_OF_FREEDOM: |
1092 |
|
values = getSampleData(data_pp[i_data],mesh_p->Nodes->degreeOfFreedom[i]); |
1093 |
|
break; |
1094 |
|
case FINLEY_NODES: |
1095 |
|
values = getSampleData(data_pp[i_data],i); |
1096 |
|
break; |
1097 |
|
} |
1098 |
|
} |
1099 |
|
// write the data different ways for scalar, vector and tensor |
1100 |
|
if (do_write) |
1101 |
|
{ |
1102 |
|
if (nCompReqd == 1) |
1103 |
|
{ |
1104 |
|
sprintf(tmpbuf," %e", values[0]); |
1105 |
|
__STRCAT(largebuf,tmpbuf,tsz) |
1106 |
|
} |
1107 |
|
else if (nCompReqd == 3) |
1108 |
|
{ |
1109 |
|
for (m=0; m<shape; m++) |
1110 |
{ |
{ |
|
sprintf(tmpbuf, " %e", sampleAvg[count]); |
|
|
__len=strlen(tmpbuf); |
|
|
__STRCAT(largebuf,tmpbuf) |
|
|
tsz+=__len;; |
|
|
|
|
1111 |
|
|
1112 |
count++; |
sprintf(tmpbuf," %e",values[m]); |
1113 |
|
__STRCAT(largebuf,tmpbuf,tsz) |
1114 |
} |
} |
1115 |
for (n=0; n<3-shape; n++) |
for (m=0; m<nCompReqd-shape; m++) |
1116 |
{ |
{ |
1117 |
__len=13; |
__STRCAT(largebuf,zero,tsz) |
|
__STRCAT(largebuf,zero) |
|
|
tsz+=13; |
|
1118 |
} |
} |
1119 |
} |
} |
1120 |
for (m=0; m<3-shape; m++) |
else if (nCompReqd == 9) |
1121 |
for (n=0; n<3; n++) |
{ |
1122 |
|
// tensor data, so have a 3x3 matrix to output as a row |
1123 |
|
// of 9 data points |
1124 |
|
count = 0; |
1125 |
|
for (m=0; m<shape; m++) |
1126 |
{ |
{ |
1127 |
__len=13; |
for (n=0; n<shape; n++) |
1128 |
__STRCAT(largebuf,zero) |
{ |
1129 |
tsz+=13; |
sprintf(tmpbuf," %e", values[count]); |
1130 |
|
__STRCAT(largebuf,tmpbuf,tsz) |
1131 |
|
count++; |
1132 |
|
} |
1133 |
|
for (n=0; n<3-shape; n++) |
1134 |
|
{ |
1135 |
|
__STRCAT(largebuf,zero,tsz) |
1136 |
|
} |
1137 |
} |
} |
1138 |
|
for (m=0; m<3-shape; m++) |
1139 |
|
{ |
1140 |
|
for (n=0; n<3; n++) |
1141 |
|
{ |
1142 |
|
__STRCAT(largebuf,zero,tsz) |
1143 |
|
} |
1144 |
|
} |
1145 |
|
} |
1146 |
|
__STRCAT(largebuf,newline,tsz) |
1147 |
} |
} |
1148 |
__len=1; |
|
|
__STRCAT(largebuf,newline) |
|
|
tsz+=1; |
|
1149 |
} |
} |
1150 |
|
// Write out local data |
1151 |
|
|
1152 |
|
largebuf[tsz] = '\0'; |
1153 |
MPI_File_write_ordered(fh,largebuf,tsz,MPI_CHAR,&status); |
MPI_File_write_ordered(fh,largebuf,tsz,MPI_CHAR,&status); |
1154 |
MEMFREE(largebuf); |
MEMFREE(largebuf); |
1155 |
if( myRank == 0) |
if( myRank == 0) |
1158 |
MPI_File_iwrite_shared(fh,tag,strlen(tag),MPI_CHAR,&req); |
MPI_File_iwrite_shared(fh,tag,strlen(tag),MPI_CHAR,&req); |
1159 |
MPI_Wait(&req,&status); |
MPI_Wait(&req,&status); |
1160 |
} |
} |
|
|
|
1161 |
} |
} |
1162 |
} |
} |
1163 |
// closing celldata tag |
// Finish off with closing tag |
1164 |
if(myRank == 0) |
if(myRank == 0) |
1165 |
{ |
{ |
1166 |
char* tag = "</CellData>\n"; |
char* tag = "</PointData>\n"; |
1167 |
MPI_File_iwrite_shared(fh,tag,strlen(tag),MPI_CHAR,&req); |
MPI_File_iwrite_shared(fh,tag,strlen(tag),MPI_CHAR,&req); |
1168 |
MPI_Wait(&req,&status); |
MPI_Wait(&req,&status); |
1169 |
} |
} |
1170 |
|
MPIO_DEBUG(" Done Writing Pointdata ") |
1171 |
} |
} |
1172 |
|
// end write_pointdata |
1173 |
|
|
1174 |
/* tag and bag... */ |
// tag and bag... |
1175 |
if (myRank == 0) |
if (myRank == 0) |
1176 |
{ |
{ |
1177 |
char *footer = "</Piece>\n</UnstructuredGrid>\n</VTKFile>"; |
char *footer = "</Piece>\n</UnstructuredGrid>\n</VTKFile>"; |
1186 |
MPI_Info_free(&infoHints); |
MPI_Info_free(&infoHints); |
1187 |
#undef MPIO_HINTS |
#undef MPIO_HINTS |
1188 |
#endif |
#endif |
|
printf("\nTime: %f \n", Paso_timer() - time0); |
|
1189 |
MPI_File_close(&fh); |
MPI_File_close(&fh); |
1190 |
MPIO_DEBUG(" ***** Exit saveVTK ***** ") |
MPIO_DEBUG(" ***** Exit saveVTK ***** ") |
1191 |
#undef __STRCAT |
#undef __STRCAT |