/[escript]/trunk/weipa/src/EscriptDataset.cpp
ViewVC logotype

Contents of /trunk/weipa/src/EscriptDataset.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3792 - (show annotations)
Wed Feb 1 06:16:25 2012 UTC (6 years, 11 months ago) by caltinay
File size: 35037 byte(s)
Merged ripley rectangular domain into trunk.

1
2 /*******************************************************
3 *
4 * Copyright (c) 2003-2010 by University of Queensland
5 * Earth Systems Science Computational Center (ESSCC)
6 * http://www.uq.edu.au/esscc
7 *
8 * Primary Business: Queensland, Australia
9 * Licensed under the Open Software License version 3.0
10 * http://www.opensource.org/licenses/osl-3.0.php
11 *
12 *******************************************************/
13
14 #include <weipa/EscriptDataset.h>
15 #include <weipa/DataVar.h>
16 #include <weipa/ElementData.h>
17 #include <weipa/FileWriter.h>
18 #include <weipa/FinleyDomain.h>
19 #include <weipa/NodeData.h>
20 #include <weipa/RipleyDomain.h>
21
22 #ifndef VISIT_PLUGIN
23 #include <escript/Data.h>
24 #include <dudley/CppAdapter/MeshAdapter.h>
25 #include <finley/CppAdapter/MeshAdapter.h>
26 #include <ripley/RipleyDomain.h>
27 #endif
28
29 #include <cstring>
30 #include <iostream>
31 #include <numeric> // for std::accumulate
32 #include <sstream> // for std::ostringstream
33
34 #if USE_SILO
35 #include <silo.h>
36
37 #if HAVE_MPI
38 #include <pmpio.h>
39 #endif
40
41 #endif // USE_SILO
42
43 using namespace std;
44
45 namespace weipa {
46
47 const char* MESH_VARS = "mesh_vars/";
48 const int NUM_SILO_FILES = 1;
49
50 //
51 // Default constructor
52 //
53 EscriptDataset::EscriptDataset() :
54 cycle(0),
55 time(0.),
56 externalDomain(false),
57 wantsMeshVars(false),
58 mpiRank(0),
59 mpiSize(1)
60 {
61 }
62
63 //
64 // Constructor with communicator
65 //
66 #if HAVE_MPI
67 EscriptDataset::EscriptDataset(MPI_Comm comm) :
68 cycle(0),
69 time(0.),
70 externalDomain(false),
71 wantsMeshVars(false),
72 mpiComm(comm)
73 {
74 MPI_Comm_rank(mpiComm, &mpiRank);
75 MPI_Comm_size(mpiComm, &mpiSize);
76 }
77 #endif
78
79 //
80 // Destructor
81 //
82 EscriptDataset::~EscriptDataset()
83 {
84 }
85
86 //
87 // Sets the domain using an escript domain instance.
88 //
89 bool EscriptDataset::setDomain(const escript::AbstractDomain* domain)
90 {
91 #ifndef VISIT_PLUGIN
92 int myError = 0, gError;
93
94 // fail if the domain has already been set
95 if (domainChunks.size() > 0) {
96 cerr << "Domain has already been set!" << endl;
97 myError = 1;
98 } else if (!domain) {
99 cerr << "Domain is NULL!" << endl;
100 myError = 1;
101 } else {
102 #if HAVE_MPI
103 mpiComm = domain->getMPIComm();
104 mpiRank = domain->getMPIRank();
105 mpiSize = domain->getMPISize();
106 #endif
107 // FinleyDomain can handle both finley and dudley
108 if (dynamic_cast<const finley::MeshAdapter*>(domain)
109 || dynamic_cast<const dudley::MeshAdapter*>(domain)) {
110 DomainChunk_ptr dom(new FinleyDomain());
111 if (dom->initFromEscript(domain)) {
112 if (mpiSize > 1)
113 dom->reorderGhostZones(mpiRank);
114 domainChunks.push_back(dom);
115 } else {
116 cerr << "Error initializing domain!" << endl;
117 myError = 2;
118 }
119 } else if (dynamic_cast<const ripley::RipleyDomain*>(domain)) {
120 DomainChunk_ptr dom(new RipleyDomain());
121 if (dom->initFromEscript(domain)) {
122 if (mpiSize > 1)
123 dom->reorderGhostZones(mpiRank);
124 domainChunks.push_back(dom);
125 } else {
126 cerr << "Error initializing domain!" << endl;
127 myError = 2;
128 }
129 } else {
130 cerr << "Unsupported domain type!" << endl;
131 myError = 2;
132 }
133 }
134
135 if (mpiSize > 1) {
136 #if HAVE_MPI
137 MPI_Allreduce(&myError, &gError, 1, MPI_INT, MPI_MAX, mpiComm);
138 #else
139 gError = myError;
140 #endif
141 } else {
142 gError = myError;
143 }
144
145 if (gError>1) {
146 domainChunks.clear();
147 } else if (gError==0) {
148 // Convert mesh data to variables
149 convertMeshVariables();
150 }
151 return (gError==0);
152
153 #else // VISIT_PLUGIN
154 return false;
155 #endif
156 }
157
158 //
159 //
160 //
161 bool EscriptDataset::addData(escript::Data& data, const string name,
162 const string units)
163 {
164 #ifndef VISIT_PLUGIN
165 bool success = true;
166
167 // fail if no domain has been set
168 if (domainChunks.size() == 0) {
169 success = false;
170 } else {
171 // initialize variable
172 VarInfo vi;
173 vi.varName = name;
174 vi.units = units;
175
176 DataVar_ptr var(new DataVar(vi.varName));
177 if (var->initFromEscript(data, domainChunks[0])) {
178 vi.dataChunks.push_back(var);
179 updateSampleDistribution(vi);
180 vi.valid = true;
181 } else {
182 var.reset();
183 vi.valid = false;
184 }
185 variables.push_back(vi);
186 }
187 return success;
188
189 #else // VISIT_PLUGIN
190 return false;
191 #endif
192 }
193
194 //
195 //
196 //
197 bool EscriptDataset::loadNetCDF(const string domainFilePattern,
198 const StringVec& varFiles,
199 const StringVec& varNames, int nChunks)
200 {
201 // sanity check
202 if (varFiles.size() != varNames.size()) {
203 return false;
204 }
205
206 // load the domain files
207 if (!loadDomain(domainFilePattern, nChunks)) {
208 return false;
209 }
210
211 // load the variables
212 StringVec::const_iterator fileIt = varFiles.begin();
213 StringVec::const_iterator nameIt = varNames.begin();
214 for (; fileIt != varFiles.end(); fileIt++, nameIt++) {
215 loadData(*fileIt, *nameIt, "");
216 }
217
218 return true;
219 }
220
221 //
222 // Load only variables using provided domain
223 //
224 bool EscriptDataset::loadNetCDF(const DomainChunks& domain,
225 const StringVec& varFiles,
226 const StringVec& varNames)
227 {
228 // sanity check
229 if (varFiles.size() != varNames.size()) {
230 return false;
231 }
232
233 // set the domain
234 if (!setExternalDomain(domain)) {
235 return false;
236 }
237
238 // load the variables
239 StringVec::const_iterator fileIt = varFiles.begin();
240 StringVec::const_iterator nameIt = varNames.begin();
241 for (; fileIt != varFiles.end(); fileIt++, nameIt++) {
242 loadData(*fileIt, *nameIt, "");
243 }
244
245 return true;
246 }
247
248 //
249 //
250 //
251 bool EscriptDataset::saveSilo(string fileName, bool useMultiMesh)
252 {
253 #if USE_SILO
254 if (domainChunks.size() == 0)
255 return false;
256
257 const char* blockDirFmt = "/block%04d";
258 string siloPath;
259 DBfile* dbfile = NULL;
260 int driver = DB_HDF5; // prefer HDF5 if available
261 //FIXME: Silo's HDF5 driver and NetCDF 4 are currently incompatible because
262 //NetCDF calls H5close() when all its files are closed.
263 //Unidata has been contacted, Ticket ID: YTC-894489.
264 //When this issue is resolved, remove the following line.
265 driver = DB_PDB;
266 #if HAVE_MPI
267 PMPIO_baton_t* baton = NULL;
268 #endif
269
270 if (fileName.length() < 6 || fileName.compare(fileName.length()-5, 5, ".silo") != 0) {
271 fileName+=".silo";
272 }
273
274 if (mpiSize > 1) {
275 #if HAVE_MPI
276 baton = PMPIO_Init(NUM_SILO_FILES, PMPIO_WRITE,
277 mpiComm, 0x1337, PMPIO_DefaultCreate, PMPIO_DefaultOpen,
278 PMPIO_DefaultClose, (void*)&driver);
279 // try the fallback driver in case of error
280 if (!baton && driver != DB_PDB) {
281 driver = DB_PDB;
282 baton = PMPIO_Init(NUM_SILO_FILES, PMPIO_WRITE,
283 mpiComm, 0x1337, PMPIO_DefaultCreate, PMPIO_DefaultOpen,
284 PMPIO_DefaultClose, (void*)&driver);
285 }
286 if (baton) {
287 char str[64];
288 snprintf(str, 64, blockDirFmt, PMPIO_RankInGroup(baton, mpiRank));
289 siloPath = str;
290 dbfile = (DBfile*) PMPIO_WaitForBaton(
291 baton, fileName.c_str(), siloPath.c_str());
292 }
293 #endif
294 } else {
295 dbfile = DBCreate(fileName.c_str(), DB_CLOBBER, DB_LOCAL,
296 "escriptData", driver);
297 // try the fallback driver in case of error
298 if (!dbfile && driver != DB_PDB) {
299 driver = DB_PDB;
300 dbfile = DBCreate(fileName.c_str(), DB_CLOBBER, DB_LOCAL,
301 "escriptData", driver);
302 }
303 }
304
305 if (!dbfile) {
306 cerr << "Could not create Silo file." << endl;
307 if (mpiSize > 1) {
308 #if HAVE_MPI
309 PMPIO_HandOffBaton(baton, dbfile);
310 PMPIO_Finish(baton);
311 #endif
312 }
313 return false;
314 }
315
316 if (driver==DB_HDF5) {
317 // gzip level 1 already provides good compression with minimal
318 // performance penalty. Some tests showed that gzip levels >3 performed
319 // rather badly on escript data both in terms of time and space
320 DBSetCompression("ERRMODE=FALLBACK METHOD=GZIP LEVEL=1");
321 }
322
323 DomainChunks::iterator domIt;
324 VarVector::iterator viIt;
325 int idx = 0;
326 for (domIt = domainChunks.begin(); domIt != domainChunks.end(); domIt++, idx++) {
327 if (mpiSize == 1) {
328 char str[64];
329 snprintf(str, 64, blockDirFmt, idx);
330 siloPath = str;
331 DBMkdir(dbfile, siloPath.c_str());
332 }
333 // write block of the mesh if we don't use an external domain
334 if (!externalDomain) {
335 if (! (*domIt)->writeToSilo(
336 dbfile, siloPath, meshLabels, meshUnits, wantsMeshVars)) {
337 cerr << "Error writing block " << idx
338 << " of mesh to Silo file!" << endl;
339 break;
340 }
341 }
342
343 // write variables for current domain chunk
344 for (viIt = variables.begin(); viIt != variables.end(); viIt++) {
345 // do not attempt to write this variable if previous steps failed
346 if (!viIt->valid) continue;
347 DataVar_ptr var = viIt->dataChunks[idx];
348 if (!var->writeToSilo(dbfile, siloPath, viIt->units)) {
349 cerr << "Error writing block " << idx << " of '"
350 << var->getName() << "' to Silo file!" << endl;
351 viIt->valid = false;
352 }
353 }
354 }
355
356 // rank 0 writes additional data that describe how the parts fit together
357 if (mpiRank == 0) {
358 if (useMultiMesh) {
359 const StringVec& meshNames = domainChunks[0]->getMeshNames();
360 StringVec::const_iterator it;
361 for (it = meshNames.begin(); it != meshNames.end(); it++)
362 putSiloMultiMesh(dbfile, *it);
363
364 if (wantsMeshVars) {
365 DBMkdir(dbfile, MESH_VARS);
366 for (viIt = meshVariables.begin(); viIt != meshVariables.end(); viIt++)
367 putSiloMultiVar(dbfile, *viIt, true);
368 }
369
370 for (viIt = variables.begin(); viIt != variables.end(); viIt++) {
371 if (!viIt->valid) continue;
372 DataVar_ptr var = viIt->dataChunks[0];
373 if (var->getRank() < 2)
374 putSiloMultiVar(dbfile, *viIt);
375 else
376 putSiloMultiTensor(dbfile, *viIt);
377 }
378 }
379
380 vector<char*> tensorNames;
381 vector<string> tensorDefStrings;
382 vector<char*> tensorDefs;
383
384 // collect tensors for their Silo definitions
385 for (viIt = variables.begin(); viIt != variables.end(); viIt++) {
386 if (!viIt->valid) continue;
387 DataVar_ptr var = viIt->dataChunks[0];
388 if (var->getRank() == 2) {
389 tensorDefStrings.push_back(var->getTensorDef());
390 tensorDefs.push_back((char*)tensorDefStrings.back().c_str());
391 tensorNames.push_back((char*)var->getName().c_str());
392 }
393 }
394
395 if (tensorDefs.size()) {
396 DBSetDir(dbfile, "/");
397 DBoptlist* optList = DBMakeOptlist(2);
398 DBAddOption(optList, DBOPT_CYCLE, &cycle);
399 DBAddOption(optList, DBOPT_DTIME, &time);
400 vector<DBoptlist*> defOpts(tensorDefs.size(), optList);
401 vector<int> defTypes(tensorDefs.size(), DB_VARTYPE_TENSOR);
402 DBPutDefvars(dbfile, "tensors", tensorDefs.size(), &tensorNames[0],
403 &defTypes[0], &tensorDefs[0], &defOpts[0]);
404 DBFreeOptlist(optList);
405 }
406 }
407
408 if (mpiSize > 1) {
409 #if HAVE_MPI
410 PMPIO_HandOffBaton(baton, dbfile);
411 PMPIO_Finish(baton);
412 #endif
413 } else {
414 DBClose(dbfile);
415 }
416
417 return true;
418
419 #else // !USE_SILO
420 return false;
421 #endif
422 }
423
424 //
425 //
426 //
427 bool EscriptDataset::saveVTK(string fileName)
428 {
429 if (domainChunks.size() == 0)
430 return false;
431
432 map<string,VarVector> varsPerMesh;
433
434 // determine meshes and variables to write
435 VarVector::iterator viIt;
436 for (viIt = variables.begin(); viIt != variables.end(); viIt++) {
437 // skip empty variable
438 int numSamples = accumulate(viIt->sampleDistribution.begin(),
439 viIt->sampleDistribution.end(), 0);
440 if (numSamples == 0 || !viIt->valid) {
441 continue;
442 }
443 string meshName = viIt->dataChunks[0]->getMeshName();
444
445 // ranks with 0 samples can't know the correct mesh name.
446 // We assume rank 0 always has samples, if this turns out to be a
447 // wrong assumption then a bit more work is needed to get the correct
448 // mesh name to all ranks.
449 #if HAVE_MPI
450 if (mpiSize > 1) {
451 char name[100];
452 if (mpiRank == 0) {
453 strncpy(&name[0], meshName.c_str(), 100);
454 }
455
456 MPI_Bcast(name, 100, MPI_CHAR, 0, mpiComm);
457 meshName = name;
458 }
459 #endif
460
461 map<string,VarVector>::iterator it = varsPerMesh.find(meshName);
462 if (it != varsPerMesh.end()) {
463 it->second.push_back(*viIt);
464 } else {
465 VarVector v;
466 v.push_back(*viIt);
467 varsPerMesh[meshName] = v;
468 }
469 }
470
471 if (fileName.length() < 5 || fileName.compare(fileName.length()-4, 4, ".vtu") != 0) {
472 fileName+=".vtu";
473 }
474
475 bool ret = true;
476 if (varsPerMesh.empty()) {
477 // no valid variables so just write default mesh
478 ret = saveVTKsingle(fileName, "Elements", VarVector());
479 } else {
480 // write one file per required mesh
481 string newName(fileName);
482 string filePrefix(fileName.substr(0, fileName.length()-4));
483 bool prependMeshName=(varsPerMesh.size()>1);
484 map<string,VarVector>::const_iterator vpmIt;
485 for (vpmIt=varsPerMesh.begin(); vpmIt!=varsPerMesh.end(); vpmIt++) {
486 if (prependMeshName) {
487 newName=filePrefix+"_"+vpmIt->first+".vtu";
488 }
489 // attempt to write all files even if one fails
490 if (!saveVTKsingle(newName, vpmIt->first, vpmIt->second)) {
491 ret = false;
492 }
493 }
494 }
495 return ret;
496 }
497
498 //
499 //
500 //
501 void EscriptDataset::setMeshLabels(const string x, const string y, const string z)
502 {
503 meshLabels.clear();
504 meshLabels.push_back(x);
505 meshLabels.push_back(y);
506 if (z.length()>0)
507 meshLabels.push_back(z);
508 }
509
510 //
511 //
512 //
513 void EscriptDataset::setMeshUnits(const string x, const string y, const string z)
514 {
515 meshUnits.clear();
516 meshUnits.push_back(x);
517 meshUnits.push_back(y);
518 if (z.length()>0)
519 meshUnits.push_back(z);
520 }
521
522 //
523 //
524 //
525 bool EscriptDataset::saveVTKsingle(const string& fileName,
526 const string& meshName,
527 const VarVector& vars)
528 {
529 VarVector nodalVars, cellVars;
530 VarVector::const_iterator viIt;
531 for (viIt = vars.begin(); viIt != vars.end(); viIt++) {
532 const DataChunks& varChunks = viIt->dataChunks;
533 if (varChunks[0]->isNodeCentered()) {
534 nodalVars.push_back(*viIt);
535 } else {
536 cellVars.push_back(*viIt);
537 }
538 }
539
540 if (wantsMeshVars) {
541 // add mesh variables if requested
542 for (viIt = meshVariables.begin(); viIt != meshVariables.end(); viIt++) {
543 DataVar_ptr var = viIt->dataChunks[0];
544 if (meshName == var->getMeshName()) {
545 VarInfo vi = *viIt;
546 vi.varName = string(MESH_VARS)+vi.varName;
547 if (var->isNodeCentered()) {
548 nodalVars.push_back(vi);
549 } else {
550 cellVars.push_back(vi);
551 }
552 }
553 }
554 }
555
556 DomainChunks::iterator domIt;
557 int gNumPoints;
558 int gNumCells = 0;
559 int gCellSizeAndType[2] = { 0, 0 };
560
561 FileWriter* fw = NULL;
562
563 if (mpiSize > 1) {
564 #if HAVE_MPI
565 fw = new FileWriter(mpiComm);
566 domainChunks[0]->removeGhostZones(mpiRank);
567 ElementData_ptr elements = domainChunks[0]->getElementsByName(meshName);
568 int myNumCells = 0;
569 if (elements) {
570 myNumCells = elements->getNumElements();
571 }
572 MPI_Reduce(&myNumCells, &gNumCells, 1, MPI_INT, MPI_SUM, 0, mpiComm);
573
574 int myCellSizeAndType[2] = { 0, 0 };
575 if (elements) {
576 myCellSizeAndType[0] = elements->getNodesPerElement();
577 myCellSizeAndType[1] = elements->getType();
578 }
579
580 // rank 0 needs to know element type and size but it's possible that
581 // this information is only available on other ranks (value=0) so
582 // retrieve it
583 MPI_Reduce(&myCellSizeAndType, &gCellSizeAndType, 2, MPI_INT, MPI_MAX,
584 0, mpiComm);
585 #endif
586 } else {
587 fw = new FileWriter();
588 int idx = 0;
589 for (domIt = domainChunks.begin(); domIt != domainChunks.end(); domIt++, idx++) {
590 if (domainChunks.size() > 1)
591 (*domIt)->removeGhostZones(idx);
592 ElementData_ptr elements = (*domIt)->getElementsByName(meshName);
593 gNumCells += elements->getNumElements();
594 if (gCellSizeAndType[0] == 0)
595 gCellSizeAndType[0] = elements->getNodesPerElement();
596 if (gCellSizeAndType[1] == 0)
597 gCellSizeAndType[1] = elements->getType();
598 }
599 }
600
601 gNumPoints = domainChunks[0]->getNodes()->getGlobalNumNodes();
602
603 ostringstream oss;
604 oss.setf(ios_base::scientific, ios_base::floatfield);
605
606 if (!fw->openFile(fileName)) {
607 return false;
608 }
609
610 if (mpiRank == 0) {
611 #ifdef _DEBUG
612 cout << meshName << ": pts=" << gNumPoints << ", cells=" << gNumCells
613 << ", cellsize=" << gCellSizeAndType[0] << ", type="
614 << gCellSizeAndType[1] << ", numNodalVars=" << nodalVars.size()
615 << ", numCellVars=" << cellVars.size()
616 << endl;
617 #endif
618
619 oss << "<?xml version=\"1.0\"?>" << endl;
620 oss << "<VTKFile type=\"UnstructuredGrid\" version=\"0.1\"";
621 if (mdSchema.length()>0) {
622 oss << " " << mdSchema;
623 }
624 oss << ">" << endl;
625 if (mdString.length()>0) {
626 oss << "<MetaData>" << endl << mdString << endl
627 << "</MetaData>" << endl;
628 }
629 oss << "<UnstructuredGrid>" << endl;
630
631 // write time and cycle values
632 oss << "<FieldData>" << endl;
633 oss << "<DataArray Name=\"TIME\" type=\"Float64\" format=\"ascii\" NumberOfTuples=\"1\">" << endl;
634 oss << time << endl;
635 oss << "</DataArray>" << endl << "</FieldData>" << endl;
636 oss << "<FieldData>" << endl;
637 oss << "<DataArray Name=\"CYCLE\" type=\"Int32\" format=\"ascii\" NumberOfTuples=\"1\">" << endl;
638 oss << cycle << endl;
639 oss << "</DataArray>" << endl << "</FieldData>" << endl;
640
641 // write mesh header
642 oss << "<Piece NumberOfPoints=\"" << gNumPoints
643 << "\" NumberOfCells=\"" << gNumCells << "\">" << endl;
644 oss << "<Points>" << endl;
645 oss << "<DataArray NumberOfComponents=\"3\" type=\"Float64\" format=\"ascii\">" << endl;
646 }
647
648 //
649 // coordinates (note that we are using the original nodes)
650 //
651 int blockNum = (mpiSize>1 ? mpiRank : 0);
652 for (domIt = domainChunks.begin(); domIt != domainChunks.end(); domIt++, blockNum++) {
653 (*domIt)->getNodes()->writeCoordinatesVTK(oss, blockNum);
654 }
655
656 if (!fw->writeOrdered(oss)) {
657 cerr << "Warning, ignoring file write error!" << endl;
658 }
659
660 if (mpiRank == 0) {
661 oss << "</DataArray>" << endl << "</Points>" << endl;
662 oss << "<Cells>" << endl;
663 oss << "<DataArray Name=\"connectivity\" type=\"Int32\" format=\"ascii\">" << endl;
664 }
665
666 //
667 // connectivity
668 //
669 for (domIt = domainChunks.begin(); domIt != domainChunks.end(); domIt++) {
670 ElementData_ptr el = (*domIt)->getElementsByName(meshName);
671 if (el) {
672 el->writeConnectivityVTK(oss);
673 }
674 }
675
676 if (!fw->writeOrdered(oss)) {
677 cerr << "Warning, ignoring file write error!" << endl;
678 }
679
680 //
681 // offsets & types
682 //
683 if (mpiRank == 0) {
684 oss << "</DataArray>" << endl;
685 oss << "<DataArray Name=\"offsets\" type=\"Int32\" format=\"ascii\">" << endl;
686 for (int i=1; i < gNumCells+1; i++) {
687 oss << i*gCellSizeAndType[0] << endl;
688 }
689 oss << "</DataArray>" << endl;
690 oss << "<DataArray Name=\"types\" type=\"UInt8\" format=\"ascii\">" << endl;
691 for (int i=1; i < gNumCells+1; i++) {
692 oss << gCellSizeAndType[1] << endl;
693 }
694 oss << "</DataArray>" << endl << "</Cells>" << endl;
695 if (!fw->writeShared(oss)) {
696 cerr << "Warning, ignoring file write error!" << endl;
697 }
698 }
699
700 // now write all variables - first the nodal data, then cell data
701
702 // write nodal data if any
703 if (!nodalVars.empty()) {
704 if (mpiRank == 0)
705 oss << "<PointData>" << endl;
706 for (viIt = nodalVars.begin(); viIt != nodalVars.end(); viIt++) {
707 writeVarToVTK(*viIt, oss);
708 if (!fw->writeOrdered(oss)) {
709 cerr << "Warning, ignoring file write error!" << endl;
710 }
711 if (mpiRank == 0)
712 oss << "</DataArray>" << endl;
713 }
714 if (mpiRank == 0)
715 oss << "</PointData>" << endl;
716 }
717
718 // write cell data if any
719 if (!cellVars.empty()) {
720 if (mpiRank == 0)
721 oss << "<CellData>" << endl;
722 for (viIt = cellVars.begin(); viIt != cellVars.end(); viIt++) {
723 writeVarToVTK(*viIt, oss);
724 if (!fw->writeOrdered(oss)) {
725 cerr << "Warning, ignoring file write error!" << endl;
726 }
727 if (mpiRank == 0)
728 oss << "</DataArray>" << endl;
729 }
730 if (mpiRank == 0)
731 oss << "</CellData>" << endl;
732 }
733
734 if (mpiRank == 0) {
735 oss << "</Piece>" << endl << "</UnstructuredGrid>" << endl
736 << "</VTKFile>" << endl;
737 if (!fw->writeShared(oss)) {
738 cerr << "Warning, ignoring file write error!" << endl;
739 }
740 }
741
742 fw->close();
743 delete fw;
744 return true;
745 }
746
747 //
748 //
749 //
750 void EscriptDataset::writeVarToVTK(const VarInfo& varInfo, ostream& os)
751 {
752 const DataChunks& varChunks = varInfo.dataChunks;
753 int rank = varChunks[0]->getRank();
754 int numComps = 1;
755 if (rank > 0)
756 numComps *= 3;
757 if (rank > 1)
758 numComps *= 3;
759
760 if (mpiRank == 0) {
761 os << "<DataArray Name=\"" << varInfo.varName
762 << "\" type=\"Float64\" NumberOfComponents=\"" << numComps
763 << "\" format=\"ascii\">" << endl;
764 }
765
766 // this is required in case we read a dataset with more than one chunk on
767 // one rank
768 int blockNum = (mpiSize>1 ? mpiRank : 0);
769 DataChunks::const_iterator blockIt;
770 for (blockIt = varChunks.begin(); blockIt != varChunks.end(); blockIt++, blockNum++) {
771 (*blockIt)->writeToVTK(os, blockNum);
772 }
773 }
774
775 //
776 // Sets the domain from dump files.
777 //
778 bool EscriptDataset::loadDomain(const string filePattern, int nChunks)
779 {
780 int myError = 0, gError;
781
782 if (mpiSize > 1 && nChunks != mpiSize) {
783 cerr << "Cannot load " << nChunks << " chunks on " << mpiSize
784 << " MPI ranks!" << endl;
785 myError = 1;
786
787 } else if (domainChunks.size() > 0) {
788 cerr << "Domain has already been set!" << endl;
789 myError = 1;
790
791 } else {
792 char* str = new char[filePattern.length()+10];
793 // FIXME: This assumes a finley domain!
794 if (mpiSize > 1) {
795 DomainChunk_ptr chunk(new FinleyDomain());
796 sprintf(str, filePattern.c_str(), mpiRank);
797 string domainFile = str;
798 if (chunk->initFromFile(domainFile)) {
799 chunk->reorderGhostZones(mpiRank);
800 domainChunks.push_back(chunk);
801 } else {
802 cerr << "Error initializing domain!" << endl;
803 myError = 2;
804 }
805 } else {
806 for (int idx=0; idx < nChunks; idx++) {
807 DomainChunk_ptr chunk(new FinleyDomain());
808 sprintf(str, filePattern.c_str(), idx);
809 string domainFile = str;
810 if (chunk->initFromFile(domainFile)) {
811 if (nChunks > 1)
812 chunk->reorderGhostZones(idx);
813 domainChunks.push_back(chunk);
814 } else {
815 cerr << "Error initializing domain block " << idx << endl;
816 myError = 2;
817 break;
818 }
819 }
820 }
821 delete[] str;
822 }
823
824 if (mpiSize > 1) {
825 #if HAVE_MPI
826 MPI_Allreduce(&myError, &gError, 1, MPI_INT, MPI_MAX, mpiComm);
827 #else
828 gError = myError;
829 #endif
830 } else {
831 gError = myError;
832 }
833
834 if (gError>1) {
835 domainChunks.clear();
836 } else if (gError==0) {
837 // Convert mesh data to variables
838 convertMeshVariables();
839 }
840 return (gError==0);
841 }
842
843 //
844 // Sets an already converted domain.
845 //
846 bool EscriptDataset::setExternalDomain(const DomainChunks& domain)
847 {
848 int myError = 0, gError;
849
850 if (mpiSize > 1 && domain.size() > 1) {
851 cerr << "Can only add one domain block per rank when using MPI!"
852 << endl;
853 myError = 1;
854
855 } else if (domainChunks.size() > 0) {
856 cerr << "Domain has already been set!" << endl;
857 myError = 1;
858
859 }
860
861 if (mpiSize > 1) {
862 #if HAVE_MPI
863 MPI_Allreduce(&myError, &gError, 1, MPI_INT, MPI_MAX, mpiComm);
864 #else
865 gError = myError;
866 #endif
867 } else {
868 gError = myError;
869 }
870
871 if (!gError) {
872 externalDomain = true;
873 domainChunks = domain;
874 }
875
876 return !gError;
877 }
878
879 //
880 //
881 //
882 bool EscriptDataset::loadData(const string filePattern, const string name,
883 const string units)
884 {
885 int myError = 0, gError;
886
887 // fail if no domain has been set
888 if (domainChunks.size() == 0) {
889 gError = 1;
890
891 } else {
892 // initialize variable
893 VarInfo vi;
894 vi.varName = name;
895 vi.units = units;
896 vi.valid = true;
897 char* str = new char[filePattern.length()+10];
898
899 // read all parts of the variable
900 DomainChunks::iterator domIt;
901 int idx = (mpiSize > 1) ? mpiRank : 0;
902 for (domIt = domainChunks.begin(); domIt != domainChunks.end(); domIt++, idx++) {
903 sprintf(str, filePattern.c_str(), idx);
904 string dfile = str;
905 DataVar_ptr var(new DataVar(name));
906 if (var->initFromFile(dfile, *domIt))
907 vi.dataChunks.push_back(var);
908 else {
909 cerr << "Error reading " << dfile << endl;
910 myError = 1;
911 break;
912 }
913 }
914 delete[] str;
915
916 if (mpiSize > 1) {
917 #if HAVE_MPI
918 MPI_Allreduce(&myError, &gError, 1, MPI_INT, MPI_MAX, mpiComm);
919 #else
920 gError = myError;
921 #endif
922 } else {
923 gError = myError;
924 }
925
926 if (!gError) {
927 // only add variable if all chunks have been read without error
928 updateSampleDistribution(vi);
929 variables.push_back(vi);
930 }
931 }
932
933 return !gError;
934 }
935
936 //
937 //
938 //
939 void EscriptDataset::convertMeshVariables()
940 {
941 const StringVec& varNames = domainChunks[0]->getVarNames();
942 StringVec::const_iterator it;
943 for (it = varNames.begin(); it != varNames.end(); it++) {
944 VarInfo vi;
945 vi.varName = *it;
946 vi.valid = true;
947 // get all parts of current variable
948 DomainChunks::iterator domIt;
949 for (domIt = domainChunks.begin(); domIt != domainChunks.end(); domIt++) {
950 DataVar_ptr var = (*domIt)->getDataVarByName(*it);
951 if (var != NULL) {
952 vi.dataChunks.push_back(var);
953 } else {
954 cerr << "Error converting mesh variable " << *it << endl;
955 vi.valid = false;
956 break;
957 }
958 }
959 updateSampleDistribution(vi);
960 meshVariables.push_back(vi);
961 }
962 }
963
964 // retrieves the number of samples at each block - used to determine which
965 // blocks contribute to given variable if any.
966 void EscriptDataset::updateSampleDistribution(VarInfo& vi)
967 {
968 IntVec sampleDist;
969 const DataChunks& varChunks = vi.dataChunks;
970
971 if (mpiSize > 1) {
972 #if HAVE_MPI
973 int myNumSamples = varChunks[0]->getNumberOfSamples();
974 sampleDist.insert(sampleDist.end(), mpiSize, 0);
975 MPI_Allgather(
976 &myNumSamples, 1, MPI_INT, &sampleDist[0], 1, MPI_INT, mpiComm);
977 #endif
978 } else {
979 DataChunks::const_iterator it;
980 for (it = varChunks.begin(); it != varChunks.end(); it++) {
981 sampleDist.push_back((*it)->getNumberOfSamples());
982 }
983 }
984 vi.sampleDistribution = sampleDist;
985 }
986
987 //
988 //
989 //
990 void EscriptDataset::putSiloMultiMesh(DBfile* dbfile, const string& meshName)
991 {
992 #if USE_SILO
993 vector<int> meshtypes;
994 vector<string> tempstrings;
995 vector<char*> meshnames;
996 string pathPrefix;
997
998 int ppIndex = domainChunks[0]->getSiloPath().find(':');
999 if (ppIndex != string::npos) {
1000 pathPrefix = domainChunks[0]->getSiloPath().substr(0, ppIndex+1);
1001 }
1002
1003 // find a variable that is defined on this mesh to get the sample
1004 // distribution (which tells us which ranks contribute to this mesh).
1005 // Try mesh variables first, then regular ones.
1006 VarVector::const_iterator viIt;
1007 for (viIt = meshVariables.begin(); viIt != meshVariables.end(); viIt++) {
1008 if (meshName == viIt->dataChunks[0]->getMeshName())
1009 break;
1010 }
1011
1012 if (viIt == meshVariables.end()) {
1013 for (viIt = variables.begin(); viIt != variables.end(); viIt++) {
1014 if (meshName == viIt->dataChunks[0]->getMeshName())
1015 break;
1016 }
1017 }
1018 // this probably means that the mesh is empty
1019 if (viIt == variables.end()) {
1020 return;
1021 }
1022
1023 for (size_t idx = 0; idx < viIt->sampleDistribution.size(); idx++) {
1024 if (viIt->sampleDistribution[idx] > 0) {
1025 stringstream siloPath;
1026 siloPath << pathPrefix << "/block";
1027 int prevWidth = siloPath.width(4);
1028 char prevFill = siloPath.fill('0');
1029 siloPath << right << idx;
1030 siloPath.width(prevWidth);
1031 siloPath.fill(prevFill);
1032 siloPath << "/";
1033 siloPath << meshName;
1034 tempstrings.push_back(siloPath.str());
1035 meshnames.push_back((char*)tempstrings.back().c_str());
1036 meshtypes.push_back(DB_UCDMESH);
1037 }
1038 }
1039
1040 // ignore empty mesh
1041 if (!meshnames.empty()) {
1042 DBSetDir(dbfile, "/");
1043 DBoptlist* optList = DBMakeOptlist(2);
1044 DBAddOption(optList, DBOPT_CYCLE, &cycle);
1045 DBAddOption(optList, DBOPT_DTIME, &time);
1046 DBPutMultimesh(dbfile, meshName.c_str(), meshnames.size(),
1047 &meshnames[0], &meshtypes[0], optList);
1048 DBFreeOptlist(optList);
1049 }
1050 #endif
1051 }
1052
1053 //
1054 //
1055 //
1056 void EscriptDataset::putSiloMultiVar(DBfile* dbfile, const VarInfo& vi,
1057 bool useMeshFile)
1058 {
1059 #if USE_SILO
1060 vector<int> vartypes;
1061 vector<string> tempstrings;
1062 vector<char*> varnames;
1063 string pathPrefix;
1064 if (useMeshFile) {
1065 int ppIndex = domainChunks[0]->getSiloPath().find(':');
1066 if (ppIndex != string::npos) {
1067 pathPrefix = domainChunks[0]->getSiloPath().substr(0, ppIndex+1);
1068 }
1069 }
1070
1071 for (size_t idx = 0; idx < vi.sampleDistribution.size(); idx++) {
1072 if (vi.sampleDistribution[idx] > 0) {
1073 stringstream siloPath;
1074 siloPath << pathPrefix << "/block";
1075 int prevWidth = siloPath.width(4);
1076 char prevFill = siloPath.fill('0');
1077 siloPath << right << idx;
1078 siloPath.width(prevWidth);
1079 siloPath.fill(prevFill);
1080 siloPath << "/";
1081 siloPath << vi.varName;
1082 tempstrings.push_back(siloPath.str());
1083 varnames.push_back((char*)tempstrings.back().c_str());
1084 vartypes.push_back(DB_UCDVAR);
1085 }
1086 }
1087
1088 // ignore empty variables
1089 if (!varnames.empty()) {
1090 DBSetDir(dbfile, "/");
1091 DBoptlist* optList = DBMakeOptlist(2);
1092 DBAddOption(optList, DBOPT_CYCLE, &cycle);
1093 DBAddOption(optList, DBOPT_DTIME, &time);
1094 if (useMeshFile) {
1095 string vpath = string(MESH_VARS)+vi.varName;
1096 DBPutMultivar(dbfile, vpath.c_str(), varnames.size(),
1097 &varnames[0], &vartypes[0], optList);
1098 } else {
1099 DBPutMultivar(dbfile, vi.varName.c_str(), varnames.size(),
1100 &varnames[0], &vartypes[0], optList);
1101 }
1102 DBFreeOptlist(optList);
1103 }
1104 #endif
1105 }
1106
1107 //
1108 //
1109 //
1110 void EscriptDataset::putSiloMultiTensor(DBfile* dbfile, const VarInfo& vi)
1111 {
1112 #if USE_SILO
1113 string tensorDir = vi.varName+string("_comps/");
1114 DBSetDir(dbfile, "/");
1115 DBMkdir(dbfile, tensorDir.c_str());
1116 int one = 1;
1117 DBoptlist* optList = DBMakeOptlist(3);
1118 DBAddOption(optList, DBOPT_CYCLE, &cycle);
1119 DBAddOption(optList, DBOPT_DTIME, &time);
1120 DBAddOption(optList, DBOPT_HIDE_FROM_GUI, &one);
1121 const IntVec& shape = vi.dataChunks[0]->getShape();
1122
1123 for (int i=0; i<shape[1]; i++) {
1124 for (int j=0; j<shape[0]; j++) {
1125 vector<string> tempstrings;
1126 vector<char*> varnames;
1127 vector<int> vartypes;
1128 stringstream comp;
1129 comp << vi.varName << "_comps/a_";
1130 comp << i;
1131 comp << j;
1132 for (size_t idx = 0; idx < vi.sampleDistribution.size(); idx++) {
1133 if (vi.sampleDistribution[idx] > 0) {
1134 stringstream siloPath;
1135 siloPath << "/block";
1136 int prevWidth = siloPath.width(4);
1137 char prevFill = siloPath.fill('0');
1138 siloPath << right << idx;
1139 siloPath.width(prevWidth);
1140 siloPath.fill(prevFill);
1141 siloPath << "/" << comp.str();
1142 tempstrings.push_back(siloPath.str());
1143 varnames.push_back((char*)tempstrings.back().c_str());
1144 vartypes.push_back(DB_UCDVAR);
1145 }
1146 }
1147 if (!varnames.empty()) {
1148 DBPutMultivar(dbfile, comp.str().c_str(), varnames.size(),
1149 &varnames[0], &vartypes[0], optList);
1150 }
1151 }
1152 }
1153 DBFreeOptlist(optList);
1154 #endif
1155 }
1156
1157 } // namespace weipa
1158

  ViewVC Help
Powered by ViewVC 1.1.26