/[escript]/trunk/dataexporter/src/EscriptDataset.cpp
ViewVC logotype

Contents of /trunk/dataexporter/src/EscriptDataset.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2886 - (show annotations)
Thu Jan 28 05:39:23 2010 UTC (9 years, 6 months ago) by caltinay
File size: 28677 byte(s)
New version of saveVTK within dataexporter. Not pythonised yet since it needs
more testing and release 3.1 will not ship dataexporter.

1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2010 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14 #include <escriptexport/EscriptDataset.h>
15 #include <escriptexport/DataVar.h>
16 #include <escriptexport/ElementData.h>
17 #include <escriptexport/FinleyMesh.h>
18 #include <escriptexport/NodeData.h>
19 #include <escript/Data.h>
20
21 #include <iostream>
22 #include <numeric> // for std::accumulate
23
24 #if USE_SILO
25 #include <silo.h>
26
27 #if HAVE_MPI
28 #include <pmpio.h>
29 #endif
30
31 #endif // USE_SILO
32
33 using namespace std;
34
35 namespace escriptexport {
36
37 const char* MESH_VARS = "mesh_vars/";
38 const int NUM_SILO_FILES = 1;
39
40 //
41 // Default constructor
42 //
43 EscriptDataset::EscriptDataset() :
44 numParts(0),
45 cycle(0),
46 time(0.),
47 keepMesh(false),
48 externalMesh(false),
49 mpiRank(0),
50 mpiSize(1)
51 {
52 }
53
54 //
55 // Constructor with communicator
56 //
57 #if HAVE_MPI
58 EscriptDataset::EscriptDataset(MPI_Comm comm) :
59 numParts(0),
60 cycle(0),
61 time(0.),
62 keepMesh(false),
63 externalMesh(false),
64 mpiComm(comm)
65 {
66 MPI_Comm_rank(mpiComm, &mpiRank);
67 MPI_Comm_size(mpiComm, &mpiSize);
68 }
69 #endif
70
71 //
72 // Destructor
73 //
74 EscriptDataset::~EscriptDataset()
75 {
76 }
77
78 //
79 //
80 //
81 bool EscriptDataset::initFromEscript(escript::const_Domain_ptr escriptDomain,
82 DataVec& escriptVars,
83 const StringVec& varNames)
84 {
85 int myError;
86 numParts = 1;
87 FinleyMesh_ptr mesh(new FinleyMesh());
88 if (mesh->initFromEscript(escriptDomain)) {
89 if (mpiSize > 1)
90 mesh->reorderGhostZones(mpiRank);
91 meshBlocks.push_back(mesh);
92 myError = false;
93 } else {
94 mesh.reset();
95 myError = true;
96 }
97
98 int gError;
99 #if HAVE_MPI
100 MPI_Allreduce(&myError, &gError, 1, MPI_LOGICAL, MPI_LOR, mpiComm);
101 #else
102 gError = myError;
103 #endif
104
105 if (!gError) {
106 // initialize variables
107 DataVec::iterator varIt = escriptVars.begin();
108 StringVec::const_iterator nameIt = varNames.begin();
109 for (; varIt != escriptVars.end(); varIt++, nameIt++) {
110 VarInfo vi;
111 vi.varName = *nameIt;
112
113 DataVar_ptr var(new DataVar(vi.varName));
114 if (var->initFromEscript(*varIt, mesh)) {
115 vi.dataBlocks.push_back(var);
116 updateSampleDistribution(vi);
117 vi.valid = true;
118 } else {
119 var.reset();
120 vi.valid = false;
121 }
122 variables.push_back(vi);
123 }
124
125 // Convert mesh data to variables
126 convertMeshVariables();
127 }
128
129 return !gError;
130 }
131
132 //
133 //
134 //
135 bool EscriptDataset::loadNetCDF(const string meshFilePattern,
136 const StringVec& varFiles,
137 const StringVec& varNames, int nBlocks)
138 {
139 if (mpiSize > 1 && nBlocks != mpiSize) {
140 cerr << "Cannot load " << nBlocks << " chunks on " << mpiSize
141 << " MPI ranks!" << endl;
142 return false;
143 }
144
145 numParts = nBlocks;
146 meshFmt = meshFilePattern;
147
148 // Load the mesh files
149 if (!loadMeshFromNetCDF()) {
150 cerr << "Reading the domain failed." << endl;
151 return false;
152 }
153
154 // Convert mesh data to variables
155 convertMeshVariables();
156
157 // Load the variables
158 StringVec::const_iterator fileIt = varFiles.begin();
159 StringVec::const_iterator nameIt = varNames.begin();
160 for (; fileIt != varFiles.end(); fileIt++, nameIt++) {
161 loadVarFromNetCDF(*fileIt, *nameIt);
162 }
163
164 return true;
165 }
166
167 //
168 // Load only variables using provided mesh
169 //
170 bool EscriptDataset::loadNetCDF(const MeshBlocks& mesh,
171 const StringVec& varFiles,
172 const StringVec& varNames)
173 {
174 if (mpiSize > 1 && mesh.size() > 1) {
175 cerr << "Can only read one mesh block per rank when using MPI!" << endl;
176 return false;
177 }
178
179 externalMesh = true;
180 meshBlocks = mesh;
181 numParts = meshBlocks.size();
182 keepMesh = true;
183
184 // Load the variables
185 StringVec::const_iterator fileIt = varFiles.begin();
186 StringVec::const_iterator nameIt = varNames.begin();
187 for (; fileIt != varFiles.end(); fileIt++, nameIt++) {
188 loadVarFromNetCDF(*fileIt, *nameIt);
189 }
190
191 return true;
192 }
193
194 //
195 //
196 //
197 bool EscriptDataset::saveSilo(string fileName, bool useMultiMesh)
198 {
199 #if USE_SILO
200 if (numParts == 0)
201 return false;
202
203 const char* blockDirFmt = "/block%04d";
204 string siloPath;
205 DBfile* dbfile = NULL;
206 #if HAVE_MPI
207 PMPIO_baton_t* baton = NULL;
208 #endif
209
210 if (mpiSize > 1) {
211 #if HAVE_MPI
212 baton = PMPIO_Init(NUM_SILO_FILES, PMPIO_WRITE,
213 mpiComm, 0x1337, PMPIO_DefaultCreate, PMPIO_DefaultOpen,
214 PMPIO_DefaultClose, NULL);
215 if (baton) {
216 char str[64];
217 snprintf(str, 64, blockDirFmt, PMPIO_RankInGroup(baton, mpiRank));
218 siloPath = str;
219 dbfile = (DBfile*) PMPIO_WaitForBaton(
220 baton, fileName.c_str(), siloPath.c_str());
221 }
222 #endif
223 } else {
224 dbfile = DBCreate(fileName.c_str(), DB_CLOBBER, DB_LOCAL,
225 "escriptData", DB_PDB);
226 }
227
228 if (!dbfile) {
229 cerr << "Could not create Silo file." << endl;
230 return false;
231 }
232
233 MeshBlocks::iterator meshIt;
234 VarVector::iterator viIt;
235 int idx = 0;
236 for (meshIt = meshBlocks.begin(); meshIt != meshBlocks.end(); meshIt++, idx++) {
237 if (mpiSize == 1) {
238 char str[64];
239 snprintf(str, 64, blockDirFmt, idx);
240 siloPath = str;
241 DBMkdir(dbfile, siloPath.c_str());
242 }
243 // write block of the mesh if we don't use an external mesh
244 if (!externalMesh) {
245 if (! (*meshIt)->writeToSilo(dbfile, siloPath)) {
246 cerr << "Error writing block " << idx << " of mesh to Silo file!\n";
247 break;
248 }
249 }
250
251 // write variables for current mesh block
252 for (viIt = variables.begin(); viIt != variables.end(); viIt++) {
253 // do not attempt to write this variable if previous steps failed
254 if (!(*viIt).valid) continue;
255 DataVar_ptr var = (*viIt).dataBlocks[idx];
256 if (!var->writeToSilo(dbfile, siloPath)) {
257 cerr << "Error writing block " << idx << " of '"
258 << var->getName() << "' to Silo file!" << endl;
259 (*viIt).valid = false;
260 }
261 }
262 }
263
264 // rank 0 writes additional data that describe how the parts fit together
265 if (mpiRank == 0) {
266 if (useMultiMesh) {
267 const StringVec& meshNames = meshBlocks[0]->getMeshNames();
268 StringVec::const_iterator it;
269 for (it = meshNames.begin(); it != meshNames.end(); it++)
270 putSiloMultiMesh(dbfile, *it);
271
272 DBMkdir(dbfile, MESH_VARS);
273 for (viIt = meshVariables.begin(); viIt != meshVariables.end(); viIt++)
274 putSiloMultiVar(dbfile, *viIt, true);
275
276 for (viIt = variables.begin(); viIt != variables.end(); viIt++) {
277 if (!(*viIt).valid) continue;
278 DataVar_ptr var = (*viIt).dataBlocks[0];
279 if (var->getRank() < 2)
280 putSiloMultiVar(dbfile, *viIt);
281 else
282 putSiloMultiTensor(dbfile, *viIt);
283 }
284 }
285
286 vector<char*> tensorNames;
287 vector<string> tensorDefStrings;
288 vector<char*> tensorDefs;
289
290 // collect tensors for their Silo definitions
291 for (viIt = variables.begin(); viIt != variables.end(); viIt++) {
292 if (!(*viIt).valid) continue;
293 DataVar_ptr var = (*viIt).dataBlocks[0];
294 if (var->getRank() == 2) {
295 tensorDefStrings.push_back(var->getTensorDef());
296 tensorDefs.push_back((char*)tensorDefStrings.back().c_str());
297 tensorNames.push_back((char*)var->getName().c_str());
298 }
299 }
300
301 if (tensorDefs.size()) {
302 DBSetDir(dbfile, "/");
303 DBoptlist* optList = DBMakeOptlist(2);
304 DBAddOption(optList, DBOPT_CYCLE, &cycle);
305 DBAddOption(optList, DBOPT_DTIME, &time);
306 vector<DBoptlist*> defOpts(tensorDefs.size(), optList);
307 vector<int> defTypes(tensorDefs.size(), DB_VARTYPE_TENSOR);
308 DBPutDefvars(dbfile, "tensors", tensorDefs.size(), &tensorNames[0],
309 &defTypes[0], &tensorDefs[0], &defOpts[0]);
310 DBFreeOptlist(optList);
311 }
312 }
313
314 if (mpiSize > 1) {
315 #if HAVE_MPI
316 PMPIO_HandOffBaton(baton, dbfile);
317 PMPIO_Finish(baton);
318 #endif
319 } else {
320 DBClose(dbfile);
321 }
322
323 return true;
324
325 #else // !USE_SILO
326 return false;
327 #endif
328 }
329
330 //
331 //
332 //
333 bool EscriptDataset::saveVTK(string fileName)
334 {
335 if (numParts == 0)
336 return false;
337
338 string meshName;
339
340 // determine mesh type and variables to write
341 VarVector nodalVars, cellVars;
342 VarVector::iterator viIt;
343 for (viIt = variables.begin(); viIt != variables.end(); viIt++) {
344 const DataBlocks& varBlocks = viIt->dataBlocks;
345 // skip empty variable
346 int numSamples = accumulate(viIt->sampleDistribution.begin(),
347 viIt->sampleDistribution.end(), 0);
348 if (numSamples == 0 || !viIt->valid) {
349 continue;
350 }
351 string tmpName = varBlocks[0]->getMeshName();
352 if (meshName != "") {
353 if (meshName != tmpName) {
354 cerr << "VTK supports only one mesh! Skipping variable "
355 << varBlocks[0]->getName() << " on " << tmpName << endl;
356 continue;
357 }
358 } else {
359 meshName = tmpName;
360 }
361
362 if (varBlocks[0]->isNodeCentered()) {
363 nodalVars.push_back(*viIt);
364 } else {
365 cellVars.push_back(*viIt);
366 }
367 }
368
369 // no valid variables so use default mesh
370 if (meshName == "")
371 meshName = "Elements";
372
373 // add mesh variables
374 for (viIt = meshVariables.begin(); viIt != meshVariables.end(); viIt++) {
375 DataVar_ptr var = viIt->dataBlocks[0];
376 if (meshName == var->getMeshName()) {
377 VarInfo vi = *viIt;
378 vi.varName = string(MESH_VARS)+vi.varName;
379 if (var->isNodeCentered()) {
380 nodalVars.push_back(vi);
381 } else {
382 cellVars.push_back(vi);
383 }
384 }
385 }
386
387 int gNumPoints;
388 int gNumCells = 0;
389 int gCellSizeAndType[2] = { 0, 0 };
390
391 #if HAVE_MPI
392 // remove file first if it exists
393 int error = 0;
394 int mpiErr;
395 if (mpiRank == 0) {
396 ifstream f(fileName.c_str());
397 if (f.is_open()) {
398 f.close();
399 if (remove(fileName.c_str())) {
400 error=1;
401 }
402 }
403 }
404 MPI_Allreduce(&error, &mpiErr, 1, MPI_INT, MPI_MAX, mpiComm);
405 if (mpiErr != 0) {
406 cerr << "Error removing " << fileName << "!" << endl;
407 return false;
408 }
409
410 MPI_File mpiFileHandle;
411 MPI_Info mpiInfo = MPI_INFO_NULL;
412 int amode = MPI_MODE_CREATE|MPI_MODE_WRONLY|MPI_MODE_UNIQUE_OPEN;
413 mpiErr = MPI_File_open(mpiComm, const_cast<char*>(fileName.c_str()),
414 amode, mpiInfo, &mpiFileHandle);
415 if (mpiErr == MPI_SUCCESS) {
416 mpiErr = MPI_File_set_view(mpiFileHandle, 0, MPI_CHAR, MPI_CHAR,
417 const_cast<char*>("native"), mpiInfo);
418 }
419 if (mpiErr != MPI_SUCCESS) {
420 cerr << "Error opening " << fileName << " for parallel writing!" << endl;
421 return false;
422 }
423
424 int myNumCells;
425 if (mpiSize > 1)
426 meshBlocks[0]->removeGhostZones(mpiRank);
427 ElementData_ptr elements = meshBlocks[0]->getElementsByName(meshName);
428 myNumCells = elements->getNumElements();
429 MPI_Reduce(&myNumCells, &gNumCells, 1, MPI_INT, MPI_SUM, 0, mpiComm);
430
431 int myCellSizeAndType[2];
432 myCellSizeAndType[0] = elements->getNodesPerElement();
433 myCellSizeAndType[1] = elements->getType();
434
435 // rank 0 needs to know element type and size but it's possible that this
436 // information is only available on other ranks (value=0) so retrieve it
437 MPI_Reduce(&myCellSizeAndType, &gCellSizeAndType, 2, MPI_INT, MPI_MAX, 0,
438 mpiComm);
439
440 // these definitions make the code more readable and life easier -
441 // WRITE_ORDERED causes all ranks to write the contents of the stream while
442 // WRITE_SHARED should only be called for rank 0.
443 #define WRITE_ORDERED(_stream) \
444 do {\
445 MPI_Status mpiStatus;\
446 string contents = _stream.str();\
447 mpiErr = MPI_File_write_ordered(\
448 mpiFileHandle, const_cast<char*>(contents.c_str()),\
449 contents.length(), MPI_CHAR, &mpiStatus);\
450 _stream.str("");\
451 } while(0)
452
453 #define WRITE_SHARED(_stream) \
454 do {\
455 MPI_Status mpiStatus;\
456 string contents = _stream.str();\
457 mpiErr = MPI_File_write_shared(\
458 mpiFileHandle, const_cast<char*>(contents.c_str()),\
459 contents.length(), MPI_CHAR, &mpiStatus);\
460 _stream.str("");\
461 } while(0)
462
463 #else /*********** non-MPI version follows ************/
464
465 ofstream ofs(fileName.c_str());
466
467 MeshBlocks::iterator meshIt;
468 int idx = 0;
469 for (meshIt = meshBlocks.begin(); meshIt != meshBlocks.end(); meshIt++, idx++) {
470 if (numParts > 1)
471 (*meshIt)->removeGhostZones(idx);
472 ElementData_ptr elements = (*meshIt)->getElementsByName(meshName);
473 gNumCells += elements->getNumElements();
474 if (gCellSizeAndType[0] == 0)
475 gCellSizeAndType[0] = elements->getNodesPerElement();
476 if (gCellSizeAndType[1] == 0)
477 gCellSizeAndType[1] = elements->getType();
478 }
479
480 // the non-MPI versions of WRITE_ORDERED and WRITE_SHARED are
481 // identical.
482 #define WRITE_ORDERED(_stream) \
483 do {\
484 ofs << _stream.str();\
485 _stream.str("");\
486 } while(0)
487
488 #define WRITE_SHARED(_stream) \
489 do {\
490 ofs << _stream.str();\
491 _stream.str("");\
492 } while(0)
493
494 #endif
495
496 gNumPoints = meshBlocks[0]->getNodes()->getGlobalNumNodes();
497
498 ostringstream oss;
499 oss.setf(ios_base::scientific, ios_base::floatfield);
500
501 if (mpiRank == 0) {
502 #ifdef _DEBUG
503 cout << meshName << ": pts=" << gNumPoints << ", cells=" << gNumCells
504 << ", cellsize=" << gCellSizeAndType[0] << ", type="
505 << gCellSizeAndType[1] << ", numNodalVars=" << nodalVars.size()
506 << ", numCellVars=" << cellVars.size()
507 << endl;
508 #endif
509
510 oss << "<?xml version=\"1.0\"?>" << endl;
511 oss << "<VTKFile type=\"UnstructuredGrid\" version=\"0.1\">" << endl;
512 oss << "<UnstructuredGrid>" << endl;
513
514 // write time and cycle values
515 oss << "<FieldData>" << endl;
516 oss << "<DataArray Name=\"TIME\" type=\"Float64\" format=\"ascii\" NumberOfTuples=\"1\">" << endl;
517 oss << time << endl;
518 oss << "</DataArray>" << endl << "</FieldData>" << endl;
519 oss << "<FieldData>" << endl;
520 oss << "<DataArray Name=\"CYCLE\" type=\"Int32\" format=\"ascii\" NumberOfTuples=\"1\">" << endl;
521 oss << cycle << endl;
522 oss << "</DataArray>" << endl << "</FieldData>" << endl;
523
524 // write mesh header
525 oss << "<Piece NumberOfPoints=\"" << gNumPoints
526 << "\" NumberOfCells=\"" << gNumCells << "\">" << endl;
527 oss << "<Points>" << endl;
528 oss << "<DataArray NumberOfComponents=\"3\" type=\"Float64\" format=\"ascii\">" << endl;
529 }
530
531 //
532 // coordinates (note that we are using the original nodes)
533 //
534 for (meshIt = meshBlocks.begin(); meshIt != meshBlocks.end(); meshIt++) {
535 (*meshIt)->getNodes()->writeCoordinatesVTK(oss, mpiRank);
536 }
537
538 WRITE_ORDERED(oss);
539
540 if (mpiRank == 0) {
541 oss << "</DataArray>" << endl << "</Points>" << endl;
542 oss << "<Cells>" << endl;
543 oss << "<DataArray Name=\"connectivity\" type=\"Int32\" format=\"ascii\">" << endl;
544 }
545
546 //
547 // connectivity
548 //
549 for (meshIt = meshBlocks.begin(); meshIt != meshBlocks.end(); meshIt++) {
550 ElementData_ptr el = (*meshIt)->getElementsByName(meshName);
551 el->writeConnectivityVTK(oss);
552 }
553
554 WRITE_ORDERED(oss);
555
556 //
557 // offsets & types
558 //
559 if (mpiRank == 0) {
560 oss << "</DataArray>" << endl;
561 oss << "<DataArray Name=\"offsets\" type=\"Int32\" format=\"ascii\">" << endl;
562 for (int i=1; i < gNumCells+1; i++) {
563 oss << i*gCellSizeAndType[0] << endl;
564 }
565 oss << "</DataArray>" << endl;
566 oss << "<DataArray Name=\"types\" type=\"UInt8\" format=\"ascii\">" << endl;
567 for (int i=1; i < gNumCells+1; i++) {
568 oss << gCellSizeAndType[1] << endl;
569 }
570 oss << "</DataArray>" << endl << "</Cells>" << endl;
571 WRITE_SHARED(oss);
572 }
573
574 // now write all variables - first the nodal data, then cell data
575
576 // write nodal data if any
577 if (nodalVars.size() > 0) {
578 if (mpiRank == 0)
579 oss << "<PointData>" << endl;
580 for (viIt = nodalVars.begin(); viIt != nodalVars.end(); viIt++) {
581 writeVarToVTK(*viIt, oss);
582 WRITE_ORDERED(oss);
583 if (mpiRank == 0)
584 oss << "</DataArray>" << endl;
585 }
586 if (mpiRank == 0)
587 oss << "</PointData>" << endl;
588 }
589
590 // write cell data if any
591 if (cellVars.size() > 0) {
592 if (mpiRank == 0)
593 oss << "<CellData>" << endl;
594 for (viIt = cellVars.begin(); viIt != cellVars.end(); viIt++) {
595 writeVarToVTK(*viIt, oss);
596 WRITE_ORDERED(oss);
597 if (mpiRank == 0)
598 oss << "</DataArray>" << endl;
599 }
600 if (mpiRank == 0)
601 oss << "</CellData>" << endl;
602 }
603
604 if (mpiRank == 0) {
605 oss << "</Piece>" << endl << "</UnstructuredGrid>" << endl
606 << "</VTKFile>" << endl;
607 WRITE_SHARED(oss);
608 }
609
610 #if HAVE_MPI
611 mpiErr = MPI_File_close(&mpiFileHandle);
612 #else
613 ofs.close();
614 #endif
615
616 return true;
617 }
618
619 //
620 //
621 //
622 void EscriptDataset::writeVarToVTK(const VarInfo& varInfo, ostream& os)
623 {
624 const DataBlocks& varBlocks = varInfo.dataBlocks;
625 int rank = varBlocks[0]->getRank();
626 int numComps = 1;
627 if (rank > 0)
628 numComps *= 3;
629 if (rank > 1)
630 numComps *= 3;
631
632 if (mpiRank == 0) {
633 os << "<DataArray Name=\"" << varInfo.varName
634 << "\" type=\"Float64\" NumberOfComponents=\"" << numComps
635 << "\" format=\"ascii\">" << endl;
636 }
637
638 DataBlocks::const_iterator blockIt;
639 for (blockIt = varBlocks.begin(); blockIt != varBlocks.end(); blockIt++) {
640 (*blockIt)->writeToVTK(os, mpiRank);
641 }
642 }
643
644 //
645 //
646 //
647 bool EscriptDataset::loadMeshFromNetCDF()
648 {
649 bool ok = true;
650 char* str = new char[meshFmt.length()+10];
651 if (mpiSize > 1) {
652 FinleyMesh_ptr meshPart(new FinleyMesh());
653 sprintf(str, meshFmt.c_str(), mpiRank);
654 string meshfile = str;
655 if (meshPart->initFromNetCDF(meshfile)) {
656 meshPart->reorderGhostZones(mpiRank);
657 meshBlocks.push_back(meshPart);
658 } else {
659 meshPart.reset();
660 ok = false;
661 }
662 } else {
663 for (int idx=0; idx < numParts; idx++) {
664 FinleyMesh_ptr meshPart(new FinleyMesh());
665 sprintf(str, meshFmt.c_str(), idx);
666 string meshfile = str;
667 if (meshPart->initFromNetCDF(meshfile)) {
668 if (numParts > 1)
669 meshPart->reorderGhostZones(idx);
670 meshBlocks.push_back(meshPart);
671 } else {
672 meshPart.reset();
673 ok = false;
674 break;
675 }
676 }
677 }
678 delete[] str;
679 return ok;
680 }
681
682 //
683 //
684 //
685 bool EscriptDataset::loadVarFromNetCDF(const string& fileName,
686 const string& varName)
687 {
688 int myError = false;
689 char* str = new char[fileName.length()+10];
690 VarInfo vi;
691
692 vi.varName = varName;
693 vi.valid = true;
694
695 // read all parts of the variable
696 MeshBlocks::iterator mIt;
697 int idx = (mpiSize > 1) ? mpiRank : 0;
698 for (mIt = meshBlocks.begin(); mIt != meshBlocks.end(); mIt++, idx++) {
699 sprintf(str, fileName.c_str(), idx);
700 string dfile = str;
701 DataVar_ptr var(new DataVar(varName));
702 if (var->initFromNetCDF(dfile, *mIt))
703 vi.dataBlocks.push_back(var);
704 else {
705 cerr << "Error reading " << dfile << endl;
706 var.reset();
707 myError = true;
708 break;
709 }
710 }
711 delete[] str;
712
713 int gError;
714 #if HAVE_MPI
715 MPI_Allreduce(&myError, &gError, 1, MPI_LOGICAL, MPI_LOR, mpiComm);
716 #else
717 gError = myError;
718 #endif
719
720 if (gError) {
721 // at least one chunk was not read correctly
722 vi.dataBlocks.clear();
723 vi.valid = false;
724 }
725
726 updateSampleDistribution(vi);
727 variables.push_back(vi);
728 return vi.valid;
729 }
730
731 //
732 //
733 //
734 void EscriptDataset::convertMeshVariables()
735 {
736 const StringVec& varNames = meshBlocks[0]->getVarNames();
737 StringVec::const_iterator it;
738 for (it = varNames.begin(); it != varNames.end(); it++) {
739 VarInfo vi;
740 vi.varName = *it;
741 vi.valid = true;
742 // get all parts of current variable
743 MeshBlocks::iterator mIt;
744 for (mIt = meshBlocks.begin(); mIt != meshBlocks.end(); mIt++) {
745 DataVar_ptr var(new DataVar(*it));
746 if (var->initFromMesh(*mIt)) {
747 vi.dataBlocks.push_back(var);
748 } else {
749 cerr << "Error converting mesh variable " << *it << endl;
750 vi.valid = false;
751 break;
752 }
753 }
754 updateSampleDistribution(vi);
755 meshVariables.push_back(vi);
756 }
757 }
758
759 // retrieves the number of samples at each block - used to determine which
760 // blocks contribute to given variable if any.
761 void EscriptDataset::updateSampleDistribution(VarInfo& vi)
762 {
763 IntVec sampleDist;
764 const DataBlocks& varBlocks = vi.dataBlocks;
765
766 if (mpiSize > 1) {
767 #if HAVE_MPI
768 int myNumSamples = varBlocks[0]->getNumberOfSamples();
769 sampleDist.insert(sampleDist.end(), mpiSize, 0);
770 MPI_Allgather(
771 &myNumSamples, 1, MPI_INT, &sampleDist[0], 1, MPI_INT, mpiComm);
772 #endif
773 } else {
774 DataBlocks::const_iterator it;
775 for (it = varBlocks.begin(); it != varBlocks.end(); it++) {
776 sampleDist.push_back((*it)->getNumberOfSamples());
777 }
778 }
779 vi.sampleDistribution = sampleDist;
780 }
781
782 //
783 //
784 //
785 void EscriptDataset::putSiloMultiMesh(DBfile* dbfile, const string& meshName)
786 {
787 #if USE_SILO
788 vector<int> meshtypes;
789 vector<string> tempstrings;
790 vector<char*> meshnames;
791 string pathPrefix;
792
793 int ppIndex = meshBlocks[0]->getSiloPath().find(':');
794 if (ppIndex != string::npos) {
795 pathPrefix = meshBlocks[0]->getSiloPath().substr(0, ppIndex+1);
796 }
797
798 // find a variable belonging to this mesh to get the sample
799 // distribution (which tells us which ranks contribute to this mesh).
800 // Try mesh variables first, then regular ones.
801 VarVector::const_iterator viIt;
802 for (viIt = meshVariables.begin(); viIt != meshVariables.end(); viIt++) {
803 if (meshName == viIt->dataBlocks[0]->getMeshName())
804 break;
805 }
806
807 if (viIt == meshVariables.end()) {
808 for (viIt = variables.begin(); viIt != variables.end(); viIt++) {
809 if (meshName == viIt->dataBlocks[0]->getMeshName())
810 break;
811 }
812 }
813 // this probably means that the mesh is empty
814 if (viIt == variables.end()) {
815 return;
816 }
817
818 for (size_t idx = 0; idx < viIt->sampleDistribution.size(); idx++) {
819 if (viIt->sampleDistribution[idx] > 0) {
820 stringstream siloPath;
821 siloPath << pathPrefix << "/block";
822 int prevWidth = siloPath.width(4);
823 char prevFill = siloPath.fill('0');
824 siloPath << right << idx;
825 siloPath.width(prevWidth);
826 siloPath.fill(prevFill);
827 siloPath << "/";
828 siloPath << meshName;
829 tempstrings.push_back(siloPath.str());
830 meshnames.push_back((char*)tempstrings.back().c_str());
831 meshtypes.push_back(DB_UCDMESH);
832 }
833 }
834
835 // ignore empty mesh
836 if (meshnames.size() > 0) {
837 DBSetDir(dbfile, "/");
838 DBoptlist* optList = DBMakeOptlist(2);
839 DBAddOption(optList, DBOPT_CYCLE, &cycle);
840 DBAddOption(optList, DBOPT_DTIME, &time);
841 DBPutMultimesh(dbfile, meshName.c_str(), meshnames.size(),
842 &meshnames[0], &meshtypes[0], optList);
843 DBFreeOptlist(optList);
844 }
845 #endif
846 }
847
848 //
849 //
850 //
851 void EscriptDataset::putSiloMultiVar(DBfile* dbfile, const VarInfo& vi,
852 bool useMeshFile)
853 {
854 #if USE_SILO
855 vector<int> vartypes;
856 vector<string> tempstrings;
857 vector<char*> varnames;
858 string pathPrefix;
859 if (useMeshFile) {
860 int ppIndex = meshBlocks[0]->getSiloPath().find(':');
861 if (ppIndex != string::npos) {
862 pathPrefix = meshBlocks[0]->getSiloPath().substr(0, ppIndex+1);
863 }
864 }
865
866 for (size_t idx = 0; idx < vi.sampleDistribution.size(); idx++) {
867 if (vi.sampleDistribution[idx] > 0) {
868 stringstream siloPath;
869 siloPath << pathPrefix << "/block";
870 int prevWidth = siloPath.width(4);
871 char prevFill = siloPath.fill('0');
872 siloPath << right << idx;
873 siloPath.width(prevWidth);
874 siloPath.fill(prevFill);
875 siloPath << "/";
876 siloPath << vi.varName;
877 tempstrings.push_back(siloPath.str());
878 varnames.push_back((char*)tempstrings.back().c_str());
879 vartypes.push_back(DB_UCDVAR);
880 }
881 }
882
883 // ignore empty variables
884 if (varnames.size() > 0) {
885 DBSetDir(dbfile, "/");
886 DBoptlist* optList = DBMakeOptlist(2);
887 DBAddOption(optList, DBOPT_CYCLE, &cycle);
888 DBAddOption(optList, DBOPT_DTIME, &time);
889 if (useMeshFile) {
890 string vpath = string(MESH_VARS)+vi.varName;
891 DBPutMultivar(dbfile, vpath.c_str(), varnames.size(),
892 &varnames[0], &vartypes[0], optList);
893 } else {
894 DBPutMultivar(dbfile, vi.varName.c_str(), varnames.size(),
895 &varnames[0], &vartypes[0], optList);
896 }
897 DBFreeOptlist(optList);
898 }
899 #endif
900 }
901
902 //
903 //
904 //
905 void EscriptDataset::putSiloMultiTensor(DBfile* dbfile, const VarInfo& vi)
906 {
907 #if USE_SILO
908 string tensorDir = vi.varName+string("_comps/");
909 DBSetDir(dbfile, "/");
910 DBMkdir(dbfile, tensorDir.c_str());
911 int one = 1;
912 DBoptlist* optList = DBMakeOptlist(3);
913 DBAddOption(optList, DBOPT_CYCLE, &cycle);
914 DBAddOption(optList, DBOPT_DTIME, &time);
915 DBAddOption(optList, DBOPT_HIDE_FROM_GUI, &one);
916 const IntVec& shape = vi.dataBlocks[0]->getShape();
917
918 for (int i=0; i<shape[1]; i++) {
919 for (int j=0; j<shape[0]; j++) {
920 vector<string> tempstrings;
921 vector<char*> varnames;
922 vector<int> vartypes;
923 stringstream comp;
924 comp << vi.varName << "_comps/a_";
925 comp << i;
926 comp << j;
927 for (size_t idx = 0; idx < vi.sampleDistribution.size(); idx++) {
928 if (vi.sampleDistribution[idx] > 0) {
929 stringstream siloPath;
930 siloPath << "/block";
931 int prevWidth = siloPath.width(4);
932 char prevFill = siloPath.fill('0');
933 siloPath << right << idx;
934 siloPath.width(prevWidth);
935 siloPath.fill(prevFill);
936 siloPath << "/" << comp.str();
937 tempstrings.push_back(siloPath.str());
938 varnames.push_back((char*)tempstrings.back().c_str());
939 vartypes.push_back(DB_UCDVAR);
940 }
941 }
942 if (varnames.size() > 0) {
943 DBPutMultivar(dbfile, comp.str().c_str(), varnames.size(),
944 &varnames[0], &vartypes[0], optList);
945 }
946 }
947 }
948 DBFreeOptlist(optList);
949 #endif
950 }
951
952 } // namespace escriptexport
953

  ViewVC Help
Powered by ViewVC 1.1.26