/[escript]/trunk/finley/src/Mesh_saveVTK.c
ViewVC logotype

Diff of /trunk/finley/src/Mesh_saveVTK.c

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 2141 by caltinay, Tue Dec 9 04:32:32 2008 UTC revision 2744 by caltinay, Mon Nov 16 23:45:55 2009 UTC
# Line 1  Line 1 
1    
2  /*******************************************************  /*******************************************************
3  *  *
4  * Copyright (c) 2003-2008 by University of Queensland  * Copyright (c) 2003-2009 by University of Queensland
5  * Earth Systems Science Computational Center (ESSCC)  * Earth Systems Science Computational Center (ESSCC)
6  * http://www.uq.edu.au/esscc  * http://www.uq.edu.au/esscc
7  *  *
# Line 22  Line 22 
22  #include "paso/PasoUtil.h"  #include "paso/PasoUtil.h"
23    
24  #define INT_FORMAT "%d "  #define INT_FORMAT "%d "
25  #define LEN_INT_FORMAT (9+1)  #define LEN_INT_FORMAT (unsigned int)(9+2)
26  #define INT_NEWLINE_FORMAT "%d\n"  #define INT_NEWLINE_FORMAT "%d\n"
27  #define SCALAR_FORMAT "%12.6e\n"  #define SCALAR_FORMAT "%12.6e\n"
28  #define VECTOR_FORMAT "%12.6e %12.6e %12.6e\n"  #define VECTOR_FORMAT "%12.6e %12.6e %12.6e\n"
29  #define TENSOR_FORMAT "%12.6e %12.6e %12.6e %12.6e %12.6e %12.6e %12.6e %12.6e %12.6e\n"  #define TENSOR_FORMAT "%12.6e %12.6e %12.6e %12.6e %12.6e %12.6e %12.6e %12.6e %12.6e\n"
30  #define LEN_TENSOR_FORMAT (9*(12+1)+1)  /* strlen("-1.234567e+789 ") == 15 */
31    #define LEN_TENSOR_FORMAT (unsigned int)(9*15+2)
32  #define NEWLINE "\n"  #define NEWLINE "\n"
33  #define LEN_TMP_BUFFER LEN_TENSOR_FORMAT+(MAX_numNodes*LEN_INT_FORMAT+1)+2  #define LEN_TMP_BUFFER LEN_TENSOR_FORMAT+(MAX_numNodes*LEN_INT_FORMAT+1)+2
34  #define NCOMP_MAX 9  #define NCOMP_MAX (unsigned int)9
35    
36  #define __STRCAT(dest, chunk, dest_in_use) \  #define __STRCAT(dest, chunk, dest_in_use) \
37  do {\  do {\
# Line 39  do {\ Line 40  do {\
40  } while(0)  } while(0)
41    
42  #ifdef PASO_MPI  #ifdef PASO_MPI
   
43  /* writes buffer to file catching the empty buffer case which causes problems  /* writes buffer to file catching the empty buffer case which causes problems
44   * with some MPI versions */   * with some MPI versions */
45  #define MPI_WRITE_ORDERED(BUF, LEN) \  #define MPI_WRITE_ORDERED(BUF) \
46  do {\  do {\
47      if (LEN==0) { strcpy(BUF, " "); LEN=1; }\      int LLEN=0; \
48      MPI_File_write_ordered(mpi_fileHandle_p, BUF, LEN, MPI_CHAR, &mpi_status);\      LLEN=(int) strlen(BUF); \
49        if (LLEN==0) { strcpy(BUF, ""); LLEN=0; }\
50        MPI_File_write_ordered(mpi_fileHandle_p, BUF, LLEN, MPI_CHAR, &mpi_status);\
51  } while(0)  } while(0)
52    
53  /* writes buffer to file on master only */  /* writes buffer to file on master only */
54  #define MPI_RANK0_WRITE_SHARED(BUF) \  #define MPI_RANK0_WRITE_SHARED(BUF) \
55  do {\  do {\
56        int LLEN=0; \
57      if (my_mpi_rank == 0) {\      if (my_mpi_rank == 0) {\
58          MPI_File_iwrite_shared(mpi_fileHandle_p, BUF, strlen(BUF), MPI_CHAR, &mpi_req);\          LLEN=(int) strlen(BUF); \
59            if (LLEN==0) { strcpy(BUF,""); LLEN=0; }\
60            MPI_File_iwrite_shared(mpi_fileHandle_p, BUF, LLEN, MPI_CHAR, &mpi_req);\
61          MPI_Wait(&mpi_req, &mpi_status);\          MPI_Wait(&mpi_req, &mpi_status);\
62      }\      }\
63  } while(0)  } while(0)
# Line 75  void create_MPIInfo(MPI_Info& info) Line 80  void create_MPIInfo(MPI_Info& info)
80    
81  #else  #else
82    
83  #define MPI_WRITE_ORDERED(A, B)  #define MPI_WRITE_ORDERED(A)
84  #define MPI_RANK0_WRITE_SHARED(A)  #define MPI_RANK0_WRITE_SHARED(A)
85    
86  #endif /* PASO_MPI */  #endif /* PASO_MPI */
# Line 138  void Finley_Mesh_saveVTK(const char *fil Line 143  void Finley_Mesh_saveVTK(const char *fil
143                           Finley_Mesh *mesh_p,                           Finley_Mesh *mesh_p,
144                           const dim_t num_data,                           const dim_t num_data,
145                           char **names_p,                           char **names_p,
146                           escriptDataC **data_pp)                           escriptDataC **data_pp,
147                             const char* metadata,
148                             const char*metadata_schema)
149  {  {
150  #ifdef PASO_MPI  #ifdef PASO_MPI
151      MPI_File mpi_fileHandle_p;      MPI_File mpi_fileHandle_p;
# Line 167  void Finley_Mesh_saveVTK(const char *fil Line 174  void Finley_Mesh_saveVTK(const char *fil
174    
175      const char *vtkHeader = \      const char *vtkHeader = \
176        "<?xml version=\"1.0\"?>\n" \        "<?xml version=\"1.0\"?>\n" \
177        "<VTKFile type=\"UnstructuredGrid\" version=\"0.1\">\n" \        "<VTKFile type=\"UnstructuredGrid\" version=\"0.1\"%s%s>\n%s%s" \
178        "<UnstructuredGrid>\n" \        "<UnstructuredGrid>\n" \
179        "<Piece NumberOfPoints=\"%d\" NumberOfCells=\"%d\">\n" \        "<Piece NumberOfPoints=\"%d\" NumberOfCells=\"%d\">\n" \
180        "<Points>\n" \        "<Points>\n" \
# Line 205  void Finley_Mesh_saveVTK(const char *fil Line 212  void Finley_Mesh_saveVTK(const char *fil
212      my_mpi_rank = mesh_p->Nodes->MPIInfo->rank;      my_mpi_rank = mesh_p->Nodes->MPIInfo->rank;
213      mpi_size = mesh_p->Nodes->MPIInfo->size;      mpi_size = mesh_p->Nodes->MPIInfo->size;
214    
215      /************************************************************************/      /************************************************************************
216      /* open the file and check handle */       * open the file and check handle *
217         */
218      if (mpi_size > 1) {      if (mpi_size > 1) {
219  #ifdef PASO_MPI  #ifdef PASO_MPI
220          const int amode = MPI_MODE_CREATE|MPI_MODE_WRONLY|MPI_MODE_UNIQUE_OPEN;          const int amode = MPI_MODE_CREATE|MPI_MODE_WRONLY|MPI_MODE_UNIQUE_OPEN;
# Line 221  void Finley_Mesh_saveVTK(const char *fil Line 228  void Finley_Mesh_saveVTK(const char *fil
228              sprintf(errorMsg, "saveVTK: File %s could not be opened for writing in parallel.", filename_p);              sprintf(errorMsg, "saveVTK: File %s could not be opened for writing in parallel.", filename_p);
229              Finley_setError(IO_ERROR, errorMsg);              Finley_setError(IO_ERROR, errorMsg);
230          } else {          } else {
231              MPI_File_set_view(mpi_fileHandle_p, MPI_DISPLACEMENT_CURRENT,              ierr=MPI_File_set_view(mpi_fileHandle_p,MPI_DISPLACEMENT_CURRENT,
232                      MPI_CHAR, MPI_CHAR, "native", mpi_info);                      MPI_CHAR, MPI_CHAR, "native", mpi_info);
233          }          }
234  #endif /* PASO_MPI */  #endif /* PASO_MPI */
# Line 485  void Finley_Mesh_saveVTK(const char *fil Line 492  void Finley_Mesh_saveVTK(const char *fil
492    
493      /* allocate enough memory for text buffer */      /* allocate enough memory for text buffer */
494    
495      txtBufferSize = strlen(vtkHeader) + 3*LEN_INT_FORMAT + (30+3*maxNameLen);      txtBufferSize = strlen(vtkHeader) + 3*LEN_INT_FORMAT + (30+3*maxNameLen)+strlen(metadata)+strlen(metadata_schema);
   
496      if (mpi_size > 1) {      if (mpi_size > 1) {
497          txtBufferSize = MAX(txtBufferSize, myNumPoints * LEN_TMP_BUFFER);          txtBufferSize = MAX(txtBufferSize, myNumPoints * LEN_TMP_BUFFER);
498          txtBufferSize = MAX(txtBufferSize, numCellFactor * myNumCells *          txtBufferSize = MAX(txtBufferSize, numCellFactor * myNumCells *
# Line 519  void Finley_Mesh_saveVTK(const char *fil Line 525  void Finley_Mesh_saveVTK(const char *fil
525              nodeIndex = NULL;              nodeIndex = NULL;
526          }          }
527    
528          sprintf(txtBuffer, vtkHeader, globalNumPoints,          if (strlen(metadata)>0) {
529                  numCellFactor*globalNumCells, 3);             if (strlen(metadata_schema)>0) {
530                  sprintf(txtBuffer, vtkHeader," ",metadata_schema,metadata,"\n",globalNumPoints, numCellFactor*globalNumCells, 3);
531               } else {
532                  sprintf(txtBuffer, vtkHeader,"","",metadata,"\n",globalNumPoints, numCellFactor*globalNumCells, 3);
533               }
534            } else {
535               if (strlen(metadata_schema)>0) {
536                  sprintf(txtBuffer, vtkHeader," ",metadata_schema,"","",globalNumPoints, numCellFactor*globalNumCells, 3);
537               } else {
538                  sprintf(txtBuffer, vtkHeader,"","","","",globalNumPoints, numCellFactor*globalNumCells, 3);
539               }
540            }
541    
542          if (mpi_size > 1) {          if (mpi_size > 1) {
543              /* write the nodes */              /* write the nodes */
# Line 548  void Finley_Mesh_saveVTK(const char *fil Line 565  void Finley_Mesh_saveVTK(const char *fil
565                      }                      }
566                  }                  }
567              } /* nDim */              } /* nDim */
568              MPI_WRITE_ORDERED(txtBuffer, txtBufferInUse);              MPI_WRITE_ORDERED(txtBuffer);
569    
570              /* write the cells */              /* write the cells */
571              MPI_RANK0_WRITE_SHARED(tags_End_Points_and_Start_Conn);              MPI_RANK0_WRITE_SHARED(tags_End_Points_and_Start_Conn);
# Line 578  void Finley_Mesh_saveVTK(const char *fil Line 595  void Finley_Mesh_saveVTK(const char *fil
595                      }                      }
596                  }                  }
597              } /* nodeIndex */              } /* nodeIndex */
598              MPI_WRITE_ORDERED(txtBuffer, txtBufferInUse);              MPI_WRITE_ORDERED(txtBuffer);
599    
600              /* write the offsets */              /* write the offsets */
601              MPI_RANK0_WRITE_SHARED(tags_End_Conn_and_Start_Offset);              MPI_RANK0_WRITE_SHARED(tags_End_Conn_and_Start_Offset);
# Line 590  void Finley_Mesh_saveVTK(const char *fil Line 607  void Finley_Mesh_saveVTK(const char *fil
607                  sprintf(tmpBuffer, INT_NEWLINE_FORMAT, i);                  sprintf(tmpBuffer, INT_NEWLINE_FORMAT, i);
608                  __STRCAT(txtBuffer, tmpBuffer, txtBufferInUse);                  __STRCAT(txtBuffer, tmpBuffer, txtBufferInUse);
609              }              }
610              MPI_WRITE_ORDERED(txtBuffer, txtBufferInUse);              MPI_WRITE_ORDERED(txtBuffer);
611    
612              /* write element type */              /* write element type */
613              sprintf(tmpBuffer, INT_NEWLINE_FORMAT, cellType);              sprintf(tmpBuffer, INT_NEWLINE_FORMAT, cellType);
# Line 602  void Finley_Mesh_saveVTK(const char *fil Line 619  void Finley_Mesh_saveVTK(const char *fil
619              {              {
620                  __STRCAT(txtBuffer, tmpBuffer, txtBufferInUse);                  __STRCAT(txtBuffer, tmpBuffer, txtBufferInUse);
621              }              }
622              MPI_WRITE_ORDERED(txtBuffer, txtBufferInUse);              MPI_WRITE_ORDERED(txtBuffer);
623              /* finalize cell information */              /* finalize cell information */
624              strcpy(txtBuffer, "</DataArray>\n</Cells>\n");              strcpy(txtBuffer, "</DataArray>\n</Cells>\n");
625              MPI_RANK0_WRITE_SHARED(txtBuffer);              MPI_RANK0_WRITE_SHARED(txtBuffer);
# Line 762  void Finley_Mesh_saveVTK(const char *fil Line 779  void Finley_Mesh_saveVTK(const char *fil
779                  txtBufferInUse = 0;                  txtBufferInUse = 0;
780                  for (i=0; i<numCells; i++) {                  for (i=0; i<numCells; i++) {
781                      if (elements->Owner[i] == my_mpi_rank) {                      if (elements->Owner[i] == my_mpi_rank) {
782                          double *values = getSampleData(data_pp[dataIdx], i);                          void* sampleBuffer=allocSampleBuffer(data_pp[dataIdx]);
783                            __const double *values = getSampleDataRO(data_pp[dataIdx], i,sampleBuffer);
784                          for (l = 0; l < numCellFactor; l++) {                          for (l = 0; l < numCellFactor; l++) {
785                              double sampleAvg[NCOMP_MAX];                              double sampleAvg[NCOMP_MAX];
786                              dim_t nCompUsed = MIN(nComp, NCOMP_MAX);                              dim_t nCompUsed = MIN(nComp, NCOMP_MAX);
# Line 828  void Finley_Mesh_saveVTK(const char *fil Line 846  void Finley_Mesh_saveVTK(const char *fil
846                                  fputs(tmpBuffer, fileHandle_p);                                  fputs(tmpBuffer, fileHandle_p);
847                              }                              }
848                          } /* for l (numCellFactor) */                          } /* for l (numCellFactor) */
849                            freeSampleBuffer(sampleBuffer);
850                      } /* if I am the owner */                      } /* if I am the owner */
851                  } /* for i (numCells) */                  } /* for i (numCells) */
852    
853                  if ( mpi_size > 1) {                  if ( mpi_size > 1) {
854                      MPI_WRITE_ORDERED(txtBuffer, txtBufferInUse);                      MPI_WRITE_ORDERED(txtBuffer);
855                      MPI_RANK0_WRITE_SHARED(tag_End_DataArray);                      MPI_RANK0_WRITE_SHARED(tag_End_DataArray);
856                  } else {                  } else {
857                      fputs(tag_End_DataArray, fileHandle_p);                      fputs(tag_End_DataArray, fileHandle_p);
# Line 943  void Finley_Mesh_saveVTK(const char *fil Line 962  void Finley_Mesh_saveVTK(const char *fil
962                  for (i=0; i<mesh_p->Nodes->numNodes; i++) {                  for (i=0; i<mesh_p->Nodes->numNodes; i++) {
963                      k = globalNodeIndex[i];                      k = globalNodeIndex[i];
964                      if ( (myFirstNode <= k) && (k < myLastNode) ) {                      if ( (myFirstNode <= k) && (k < myLastNode) ) {
965                          double *values = getSampleData(data_pp[dataIdx], nodeMapping->target[i]);                          void* sampleBuffer=allocSampleBuffer(data_pp[dataIdx]);
966                            __const double *values = getSampleDataRO(data_pp[dataIdx], nodeMapping->target[i], sampleBuffer);
967                          /* if the number of mpi_required components is more than                          /* if the number of mpi_required components is more than
968                           * the number of actual components, pad with zeros.                           * the number of actual components, pad with zeros.
969                           * Probably only need to get shape of first element */                           * Probably only need to get shape of first element */
# Line 983  void Finley_Mesh_saveVTK(const char *fil Line 1003  void Finley_Mesh_saveVTK(const char *fil
1003                          } else {                          } else {
1004                              fputs(tmpBuffer, fileHandle_p);                              fputs(tmpBuffer, fileHandle_p);
1005                          }                          }
1006                            freeSampleBuffer(sampleBuffer);                 /* no-one needs values anymore */
1007                      } /* if this is my node */                      } /* if this is my node */
1008                  } /* for i (numNodes) */                  } /* for i (numNodes) */
1009    
1010                  if ( mpi_size > 1) {                  if ( mpi_size > 1) {
1011                      MPI_WRITE_ORDERED(txtBuffer, txtBufferInUse);                      MPI_WRITE_ORDERED(txtBuffer);
1012                      MPI_RANK0_WRITE_SHARED(tag_End_DataArray);                      MPI_RANK0_WRITE_SHARED(tag_End_DataArray);
1013                  } else {                  } else {
1014                      fputs(tag_End_DataArray, fileHandle_p);                      fputs(tag_End_DataArray, fileHandle_p);
# Line 1015  void Finley_Mesh_saveVTK(const char *fil Line 1036  void Finley_Mesh_saveVTK(const char *fil
1036      if ( mpi_size > 1) {      if ( mpi_size > 1) {
1037  #ifdef PASO_MPI  #ifdef PASO_MPI
1038          MPI_File_close(&mpi_fileHandle_p);          MPI_File_close(&mpi_fileHandle_p);
1039            MPI_Barrier(mesh_p->Nodes->MPIInfo->comm);
1040  #endif  #endif
1041      } else {      } else {
1042          fclose(fileHandle_p);          fclose(fileHandle_p);

Legend:
Removed from v.2141  
changed lines
  Added in v.2744

  ViewVC Help
Powered by ViewVC 1.1.26