/[escript]/trunk/weipa/src/EscriptDataset.cpp
ViewVC logotype

Contents of /trunk/weipa/src/EscriptDataset.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 6119 - (show annotations)
Sun Apr 3 23:36:59 2016 UTC (2 years, 8 months ago) by caltinay
File size: 37278 byte(s)
merging trilinos branch to trunk.
We can now build with trilinos and use it instead of paso for single PDEs.
There are some more things to be done...

1
2 /*****************************************************************************
3 *
4 * Copyright (c) 2003-2016 by The University of Queensland
5 * http://www.uq.edu.au
6 *
7 * Primary Business: Queensland, Australia
8 * Licensed under the Apache License, version 2.0
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12 * Development 2012-2013 by School of Earth Sciences
13 * Development from 2014 by Centre for Geoscience Computing (GeoComp)
14 *
15 *****************************************************************************/
16
17 #include <weipa/EscriptDataset.h>
18 #include <weipa/DataVar.h>
19 #include <weipa/ElementData.h>
20 #include <weipa/NodeData.h>
21 #if defined USE_FINLEY || defined USE_DUDLEY
22 #include <weipa/FinleyDomain.h>
23 #endif
24 #ifdef USE_RIPLEY
25 #include <weipa/RipleyDomain.h>
26 #endif
27 #ifdef USE_SPECKLEY
28 #include <weipa/SpeckleyDomain.h>
29 #endif
30
31 #ifndef VISIT_PLUGIN
32 #include <escript/Data.h>
33 #include <escript/FileWriter.h>
34 #ifdef USE_DUDLEY
35 #include <dudley/CppAdapter/MeshAdapter.h>
36 #endif
37 #ifdef USE_FINLEY
38 #include <finley/CppAdapter/MeshAdapter.h>
39 #endif
40 #ifdef USE_RIPLEY
41 #include <ripley/RipleyDomain.h>
42 #endif
43 #ifdef USE_SPECKLEY
44 #include <speckley/SpeckleyDomain.h>
45 #endif
46
47 using escript::FileWriter;
48 #endif
49
50 #include <cstring>
51 #include <iostream>
52 #include <numeric> // for std::accumulate
53 #include <sstream> // for std::ostringstream
54
55 #if USE_SILO
56 #include <silo.h>
57
58 #if WEIPA_HAVE_MPI
59 #include <pmpio.h>
60 const int NUM_SILO_FILES = 1; // number of Silo files to produce per save
61 #endif
62
63 #endif // USE_SILO
64
65 using namespace std;
66
67 namespace weipa {
68
69 const char* MESH_VARS = "mesh_vars/";
70
71 //
72 // Default constructor
73 //
74 EscriptDataset::EscriptDataset() :
75 cycle(0),
76 time(0.),
77 externalDomain(false),
78 wantsMeshVars(false),
79 mpiRank(0),
80 mpiSize(1)
81 {
82 }
83
84 //
85 // Constructor with communicator
86 //
87 #if WEIPA_HAVE_MPI
88 EscriptDataset::EscriptDataset(MPI_Comm comm) :
89 cycle(0),
90 time(0.),
91 externalDomain(false),
92 wantsMeshVars(false),
93 mpiComm(comm)
94 {
95 MPI_Comm_rank(mpiComm, &mpiRank);
96 MPI_Comm_size(mpiComm, &mpiSize);
97 }
98 #endif
99
100 //
101 // Destructor
102 //
103 EscriptDataset::~EscriptDataset()
104 {
105 }
106
107 //
108 // Sets the domain using an escript domain instance.
109 //
110 bool EscriptDataset::setDomain(const escript::AbstractDomain* domain)
111 {
112 #ifndef VISIT_PLUGIN
113 int myError = 0, gError;
114
115 // fail if the domain has already been set
116 if (domainChunks.size() > 0) {
117 cerr << "Domain has already been set!" << endl;
118 myError = 1;
119 } else if (!domain) {
120 cerr << "Domain is NULL!" << endl;
121 myError = 1;
122 } else {
123 #if WEIPA_HAVE_MPI
124 mpiComm = domain->getMPIComm();
125 mpiRank = domain->getMPIRank();
126 mpiSize = domain->getMPISize();
127 #endif
128
129 // this is here to allow using 'else if' for all selectively compiled
130 // paths
131 if (0) {
132 }
133 #if USE_FINLEY
134 else if (dynamic_cast<const finley::MeshAdapter*>(domain)) {
135 DomainChunk_ptr dom(new FinleyDomain());
136 if (dom->initFromEscript(domain)) {
137 if (mpiSize > 1)
138 dom->reorderGhostZones(mpiRank);
139 domainChunks.push_back(dom);
140 } else {
141 cerr << "Error initializing domain!" << endl;
142 myError = 2;
143 }
144 }
145 #endif
146 #if USE_DUDLEY
147 else if (dynamic_cast<const dudley::MeshAdapter*>(domain)) {
148 DomainChunk_ptr dom(new FinleyDomain());
149 if (dom->initFromEscript(domain)) {
150 if (mpiSize > 1)
151 dom->reorderGhostZones(mpiRank);
152 domainChunks.push_back(dom);
153 } else {
154 cerr << "Error initializing domain!" << endl;
155 myError = 2;
156 }
157 }
158 #endif
159 #if USE_RIPLEY
160 else if (dynamic_cast<const ripley::RipleyDomain*>(domain)) {
161 DomainChunk_ptr dom(new RipleyDomain());
162 if (dom->initFromEscript(domain)) {
163 if (mpiSize > 1)
164 dom->reorderGhostZones(mpiRank);
165 domainChunks.push_back(dom);
166 } else {
167 cerr << "Error initializing domain!" << endl;
168 myError = 2;
169 }
170 }
171 #endif
172 #if USE_SPECKLEY
173 else if (dynamic_cast<const speckley::SpeckleyDomain*>(domain)) {
174 DomainChunk_ptr dom(new SpeckleyDomain());
175 if (dom->initFromEscript(domain)) {
176 if (mpiSize > 1)
177 dom->reorderGhostZones(mpiRank);
178 domainChunks.push_back(dom);
179 } else {
180 cerr << "Error initializing domain!" << endl;
181 myError = 2;
182 }
183 }
184 #endif
185 else {
186 cerr << "Unsupported domain type!" << endl;
187 myError = 2;
188 }
189 }
190
191 if (mpiSize > 1) {
192 #if WEIPA_HAVE_MPI
193 MPI_Allreduce(&myError, &gError, 1, MPI_INT, MPI_MAX, mpiComm);
194 #else
195 gError = myError;
196 #endif
197 } else {
198 gError = myError;
199 }
200
201 if (gError>1) {
202 domainChunks.clear();
203 } else if (gError==0) {
204 // Convert mesh data to variables
205 convertMeshVariables();
206 }
207 return (gError==0);
208
209 #else // VISIT_PLUGIN
210 return false;
211 #endif
212 }
213
214 //
215 //
216 //
217 bool EscriptDataset::addData(escript::Data& data, const string name,
218 const string units)
219 {
220 #ifndef VISIT_PLUGIN
221 bool success = true;
222
223 // fail if no domain has been set
224 if (domainChunks.size() == 0) {
225 success = false;
226 } else {
227 // initialize variable
228 VarInfo vi;
229 vi.varName = name;
230 vi.units = units;
231
232 DataVar_ptr var(new DataVar(vi.varName));
233 if (var->initFromEscript(data, domainChunks[0])) {
234 vi.dataChunks.push_back(var);
235 updateSampleDistribution(vi);
236 vi.valid = true;
237 } else {
238 var.reset();
239 vi.valid = false;
240 }
241 variables.push_back(vi);
242 }
243 return success;
244
245 #else // VISIT_PLUGIN
246 return false;
247 #endif
248 }
249
250 //
251 //
252 //
253 bool EscriptDataset::loadNetCDF(const string domainFilePattern,
254 const StringVec& varFiles,
255 const StringVec& varNames, int nChunks)
256 {
257 // sanity check
258 if (varFiles.size() != varNames.size()) {
259 return false;
260 }
261
262 // load the domain files
263 if (!loadDomain(domainFilePattern, nChunks)) {
264 return false;
265 }
266
267 // load the variables
268 StringVec::const_iterator fileIt = varFiles.begin();
269 StringVec::const_iterator nameIt = varNames.begin();
270 for (; fileIt != varFiles.end(); fileIt++, nameIt++) {
271 loadData(*fileIt, *nameIt, "");
272 }
273
274 return true;
275 }
276
277 //
278 // Load only variables using provided domain
279 //
280 bool EscriptDataset::loadNetCDF(const DomainChunks& domain,
281 const StringVec& varFiles,
282 const StringVec& varNames)
283 {
284 // sanity check
285 if (varFiles.size() != varNames.size()) {
286 return false;
287 }
288
289 // set the domain
290 if (!setExternalDomain(domain)) {
291 return false;
292 }
293
294 // load the variables
295 StringVec::const_iterator fileIt = varFiles.begin();
296 StringVec::const_iterator nameIt = varNames.begin();
297 for (; fileIt != varFiles.end(); fileIt++, nameIt++) {
298 loadData(*fileIt, *nameIt, "");
299 }
300
301 return true;
302 }
303
304 //
305 //
306 //
307 bool EscriptDataset::saveSilo(string fileName, bool useMultiMesh)
308 {
309 #if USE_SILO
310 if (domainChunks.size() == 0)
311 return false;
312
313 const char* blockDirFmt = "/block%04d";
314 string siloPath;
315 DBfile* dbfile = NULL;
316 int driver = DB_HDF5; // prefer HDF5 if available
317 //FIXME: Silo's HDF5 driver and NetCDF 4 are currently incompatible because
318 //NetCDF calls H5close() when all its files are closed.
319 //Unidata has been contacted, Ticket ID: YTC-894489.
320 //When this issue is resolved, remove the following line.
321 driver = DB_PDB;
322 #if WEIPA_HAVE_MPI
323 PMPIO_baton_t* baton = NULL;
324 #endif
325
326 if (fileName.length() < 6 || fileName.compare(fileName.length()-5, 5, ".silo") != 0) {
327 fileName+=".silo";
328 }
329
330 if (mpiSize > 1) {
331 #if WEIPA_HAVE_MPI
332 baton = PMPIO_Init(NUM_SILO_FILES, PMPIO_WRITE,
333 mpiComm, 0x1337, PMPIO_DefaultCreate, PMPIO_DefaultOpen,
334 PMPIO_DefaultClose, (void*)&driver);
335 // try the fallback driver in case of error
336 if (!baton && driver != DB_PDB) {
337 driver = DB_PDB;
338 baton = PMPIO_Init(NUM_SILO_FILES, PMPIO_WRITE,
339 mpiComm, 0x1337, PMPIO_DefaultCreate, PMPIO_DefaultOpen,
340 PMPIO_DefaultClose, (void*)&driver);
341 }
342 if (baton) {
343 char str[64];
344 snprintf(str, 64, blockDirFmt, PMPIO_RankInGroup(baton, mpiRank));
345 siloPath = str;
346 dbfile = (DBfile*) PMPIO_WaitForBaton(
347 baton, fileName.c_str(), siloPath.c_str());
348 }
349 #endif
350 } else {
351 dbfile = DBCreate(fileName.c_str(), DB_CLOBBER, DB_LOCAL,
352 "escriptData", driver);
353 // try the fallback driver in case of error
354 if (!dbfile && driver != DB_PDB) {
355 driver = DB_PDB;
356 dbfile = DBCreate(fileName.c_str(), DB_CLOBBER, DB_LOCAL,
357 "escriptData", driver);
358 }
359 }
360
361 if (!dbfile) {
362 cerr << "Could not create Silo file." << endl;
363 if (mpiSize > 1) {
364 #if WEIPA_HAVE_MPI
365 PMPIO_HandOffBaton(baton, dbfile);
366 PMPIO_Finish(baton);
367 #endif
368 }
369 return false;
370 }
371
372 if (driver==DB_HDF5) {
373 // gzip level 1 already provides good compression with minimal
374 // performance penalty. Some tests showed that gzip levels >3 performed
375 // rather badly on escript data both in terms of time and space
376 DBSetCompression("ERRMODE=FALLBACK METHOD=GZIP LEVEL=1");
377 }
378
379 DomainChunks::iterator domIt;
380 VarVector::iterator viIt;
381 int idx = 0;
382 for (domIt = domainChunks.begin(); domIt != domainChunks.end(); domIt++, idx++) {
383 if (mpiSize == 1) {
384 char str[64];
385 snprintf(str, 64, blockDirFmt, idx);
386 siloPath = str;
387 DBMkdir(dbfile, siloPath.c_str());
388 }
389 // write block of the mesh if we don't use an external domain
390 if (!externalDomain) {
391 if (! (*domIt)->writeToSilo(
392 dbfile, siloPath, meshLabels, meshUnits, wantsMeshVars)) {
393 cerr << "Error writing block " << idx
394 << " of mesh to Silo file!" << endl;
395 break;
396 }
397 }
398
399 // write variables for current domain chunk
400 for (viIt = variables.begin(); viIt != variables.end(); viIt++) {
401 // do not attempt to write this variable if previous steps failed
402 if (!viIt->valid) continue;
403 DataVar_ptr var = viIt->dataChunks[idx];
404 if (!var->writeToSilo(dbfile, siloPath, viIt->units)) {
405 cerr << "Error writing block " << idx << " of '"
406 << var->getName() << "' to Silo file!" << endl;
407 viIt->valid = false;
408 }
409 }
410 }
411
412 // rank 0 writes additional data that describe how the parts fit together
413 if (mpiRank == 0) {
414 if (useMultiMesh) {
415 const StringVec& meshNames = domainChunks[0]->getMeshNames();
416 StringVec::const_iterator it;
417 for (it = meshNames.begin(); it != meshNames.end(); it++)
418 putSiloMultiMesh(dbfile, *it);
419
420 if (wantsMeshVars) {
421 DBMkdir(dbfile, MESH_VARS);
422 for (viIt = meshVariables.begin(); viIt != meshVariables.end(); viIt++)
423 putSiloMultiVar(dbfile, *viIt, true);
424 }
425
426 for (viIt = variables.begin(); viIt != variables.end(); viIt++) {
427 if (!viIt->valid) continue;
428 DataVar_ptr var = viIt->dataChunks[0];
429 if (var->getRank() < 2)
430 putSiloMultiVar(dbfile, *viIt);
431 else
432 putSiloMultiTensor(dbfile, *viIt);
433 }
434 }
435
436 vector<char*> tensorNames;
437 vector<string> tensorDefStrings;
438 vector<char*> tensorDefs;
439
440 // collect tensors for their Silo definitions
441 for (viIt = variables.begin(); viIt != variables.end(); viIt++) {
442 if (!viIt->valid) continue;
443 DataVar_ptr var = viIt->dataChunks[0];
444 if (var->getRank() == 2) {
445 tensorDefStrings.push_back(var->getTensorDef());
446 tensorNames.push_back((char*)var->getName().c_str());
447 }
448 }
449 for (vector<string>::const_iterator sit=tensorDefStrings.begin();
450 sit != tensorDefStrings.end(); sit++)
451 tensorDefs.push_back((char*)sit->c_str());
452
453 if (tensorDefs.size()) {
454 DBSetDir(dbfile, "/");
455 DBoptlist* optList = DBMakeOptlist(2);
456 DBAddOption(optList, DBOPT_CYCLE, &cycle);
457 DBAddOption(optList, DBOPT_DTIME, &time);
458 vector<DBoptlist*> defOpts(tensorDefs.size(), optList);
459 vector<int> defTypes(tensorDefs.size(), DB_VARTYPE_TENSOR);
460 DBPutDefvars(dbfile, "tensors", tensorDefs.size(), &tensorNames[0],
461 &defTypes[0], &tensorDefs[0], &defOpts[0]);
462 DBFreeOptlist(optList);
463 }
464 }
465
466 if (mpiSize > 1) {
467 #if WEIPA_HAVE_MPI
468 PMPIO_HandOffBaton(baton, dbfile);
469 PMPIO_Finish(baton);
470 #endif
471 } else {
472 DBClose(dbfile);
473 }
474
475 return true;
476
477 #else // !USE_SILO
478 std::cerr << "WARNING: saving to silo file requested but escript was not built"
479 " with silo support";
480 return false;
481 #endif
482 }
483
484 //
485 //
486 //
487 bool EscriptDataset::saveVTK(string fileName)
488 {
489 if (domainChunks.size() == 0)
490 return false;
491
492 map<string,VarVector> varsPerMesh;
493
494 // determine meshes and variables to write
495 VarVector::iterator viIt;
496 for (viIt = variables.begin(); viIt != variables.end(); viIt++) {
497 // skip empty variable
498 int numSamples = accumulate(viIt->sampleDistribution.begin(),
499 viIt->sampleDistribution.end(), 0);
500 if (numSamples == 0 || !viIt->valid) {
501 continue;
502 }
503 string meshName = viIt->dataChunks[0]->getMeshName();
504
505 // ranks with 0 samples can't know the correct mesh name.
506 // We assume rank 0 always has samples, if this turns out to be a
507 // wrong assumption then a bit more work is needed to get the correct
508 // mesh name to all ranks.
509 #if WEIPA_HAVE_MPI
510 if (mpiSize > 1) {
511 char name[100];
512 if (mpiRank == 0) {
513 strncpy(&name[0], meshName.c_str(), 100);
514 }
515
516 MPI_Bcast(name, 100, MPI_CHAR, 0, mpiComm);
517 meshName = name;
518 }
519 #endif
520
521 map<string,VarVector>::iterator it = varsPerMesh.find(meshName);
522 if (it != varsPerMesh.end()) {
523 it->second.push_back(*viIt);
524 } else {
525 VarVector v;
526 v.push_back(*viIt);
527 varsPerMesh[meshName] = v;
528 }
529 }
530
531 if (fileName.length() < 5 || fileName.compare(fileName.length()-4, 4, ".vtu") != 0) {
532 fileName+=".vtu";
533 }
534
535 bool ret = true;
536 if (varsPerMesh.empty()) {
537 // no valid variables so just write default mesh
538 ret = saveVTKsingle(fileName, "Elements", VarVector());
539 } else {
540 // write one file per required mesh
541 string newName(fileName);
542 string filePrefix(fileName.substr(0, fileName.length()-4));
543 bool prependMeshName=(varsPerMesh.size()>1);
544 map<string,VarVector>::const_iterator vpmIt;
545 for (vpmIt=varsPerMesh.begin(); vpmIt!=varsPerMesh.end(); vpmIt++) {
546 if (prependMeshName) {
547 newName=filePrefix+"_"+vpmIt->first+".vtu";
548 }
549 // attempt to write all files even if one fails
550 if (!saveVTKsingle(newName, vpmIt->first, vpmIt->second)) {
551 ret = false;
552 }
553 }
554 }
555 return ret;
556 }
557
558 //
559 //
560 //
561 void EscriptDataset::setMeshLabels(const string x, const string y, const string z)
562 {
563 meshLabels.clear();
564 meshLabels.push_back(x);
565 meshLabels.push_back(y);
566 if (z.length()>0)
567 meshLabels.push_back(z);
568 }
569
570 //
571 //
572 //
573 void EscriptDataset::setMeshUnits(const string x, const string y, const string z)
574 {
575 meshUnits.clear();
576 meshUnits.push_back(x);
577 meshUnits.push_back(y);
578 if (z.length()>0)
579 meshUnits.push_back(z);
580 }
581
582 //
583 //
584 //
585 bool EscriptDataset::saveVTKsingle(const string& fileName,
586 const string& meshName,
587 const VarVector& vars)
588 {
589 #ifndef VISIT_PLUGIN
590 VarVector nodalVars, cellVars;
591 VarVector::const_iterator viIt;
592 for (viIt = vars.begin(); viIt != vars.end(); viIt++) {
593 const DataChunks& varChunks = viIt->dataChunks;
594 if (varChunks[0]->isNodeCentered()) {
595 nodalVars.push_back(*viIt);
596 } else {
597 cellVars.push_back(*viIt);
598 }
599 }
600
601 if (wantsMeshVars) {
602 // add mesh variables if requested
603 for (viIt = meshVariables.begin(); viIt != meshVariables.end(); viIt++) {
604 DataVar_ptr var = viIt->dataChunks[0];
605 if (meshName == var->getMeshName()) {
606 VarInfo vi = *viIt;
607 vi.varName = string(MESH_VARS)+vi.varName;
608 if (var->isNodeCentered()) {
609 nodalVars.push_back(vi);
610 } else {
611 cellVars.push_back(vi);
612 }
613 }
614 }
615 }
616
617 DomainChunks::iterator domIt;
618 int gNumPoints;
619 int gNumCells = 0;
620 int gCellSizeAndType[2] = { 0, 0 };
621
622 boost::scoped_ptr<FileWriter> fw(NULL);
623
624 if (mpiSize > 1) {
625 #if WEIPA_HAVE_MPI
626 fw.reset(new FileWriter(mpiComm));
627 domainChunks[0]->removeGhostZones(mpiRank);
628 ElementData_ptr elements = domainChunks[0]->getElementsByName(meshName);
629 int myNumCells = 0;
630 if (elements) {
631 myNumCells = elements->getNumElements();
632 }
633 MPI_Reduce(&myNumCells, &gNumCells, 1, MPI_INT, MPI_SUM, 0, mpiComm);
634
635 int myCellSizeAndType[2] = { 0, 0 };
636 if (elements) {
637 myCellSizeAndType[0] = elements->getNodesPerElement();
638 myCellSizeAndType[1] = elements->getType();
639 }
640
641 // rank 0 needs to know element type and size but it's possible that
642 // this information is only available on other ranks (value=0) so
643 // retrieve it
644 MPI_Reduce(&myCellSizeAndType, &gCellSizeAndType, 2, MPI_INT, MPI_MAX,
645 0, mpiComm);
646 #endif
647 } else {
648 fw.reset(new FileWriter());
649 int idx = 0;
650 for (domIt = domainChunks.begin(); domIt != domainChunks.end(); domIt++, idx++) {
651 if (domainChunks.size() > 1)
652 (*domIt)->removeGhostZones(idx);
653 ElementData_ptr elements = (*domIt)->getElementsByName(meshName);
654 gNumCells += elements->getNumElements();
655 if (gCellSizeAndType[0] == 0)
656 gCellSizeAndType[0] = elements->getNodesPerElement();
657 if (gCellSizeAndType[1] == 0)
658 gCellSizeAndType[1] = elements->getType();
659 }
660 }
661
662 gNumPoints = domainChunks[0]->getNodes()->getGlobalNumNodes();
663
664 ostringstream oss;
665 oss.setf(ios_base::scientific, ios_base::floatfield);
666
667 if (!fw->openFile(fileName)) {
668 return false;
669 }
670
671 if (mpiRank == 0) {
672 #ifdef _DEBUG
673 cout << meshName << ": pts=" << gNumPoints << ", cells=" << gNumCells
674 << ", cellsize=" << gCellSizeAndType[0] << ", type="
675 << gCellSizeAndType[1] << ", numNodalVars=" << nodalVars.size()
676 << ", numCellVars=" << cellVars.size()
677 << endl;
678 #endif
679
680 oss << "<?xml version=\"1.0\"?>" << endl;
681 oss << "<VTKFile type=\"UnstructuredGrid\" version=\"0.1\"";
682 if (mdSchema.length()>0) {
683 oss << " " << mdSchema;
684 }
685 oss << ">" << endl;
686 if (mdString.length()>0) {
687 oss << "<MetaData>" << endl << mdString << endl
688 << "</MetaData>" << endl;
689 }
690 oss << "<UnstructuredGrid>" << endl;
691
692 // write time and cycle values
693 oss << "<FieldData>" << endl;
694 oss << "<DataArray Name=\"TIME\" type=\"Float64\" format=\"ascii\" NumberOfTuples=\"1\">" << endl;
695 oss << time << endl;
696 oss << "</DataArray>" << endl << "</FieldData>" << endl;
697 oss << "<FieldData>" << endl;
698 oss << "<DataArray Name=\"CYCLE\" type=\"Int32\" format=\"ascii\" NumberOfTuples=\"1\">" << endl;
699 oss << cycle << endl;
700 oss << "</DataArray>" << endl << "</FieldData>" << endl;
701
702 // write mesh header
703 oss << "<Piece NumberOfPoints=\"" << gNumPoints
704 << "\" NumberOfCells=\"" << gNumCells << "\">" << endl;
705 oss << "<Points>" << endl;
706 oss << "<DataArray NumberOfComponents=\"3\" type=\"Float64\" format=\"ascii\">" << endl;
707 }
708
709 //
710 // coordinates (note that we are using the original nodes)
711 //
712 int blockNum = (mpiSize>1 ? mpiRank : 0);
713 for (domIt = domainChunks.begin(); domIt != domainChunks.end(); domIt++, blockNum++) {
714 (*domIt)->getNodes()->writeCoordinatesVTK(oss, blockNum);
715 }
716
717 if (!fw->writeOrdered(oss)) {
718 cerr << "Warning, ignoring file write error!" << endl;
719 }
720
721 if (mpiRank == 0) {
722 oss << "</DataArray>" << endl << "</Points>" << endl;
723 oss << "<Cells>" << endl;
724 oss << "<DataArray Name=\"connectivity\" type=\"Int32\" format=\"ascii\">" << endl;
725 }
726
727 //
728 // connectivity
729 //
730 for (domIt = domainChunks.begin(); domIt != domainChunks.end(); domIt++) {
731 ElementData_ptr el = (*domIt)->getElementsByName(meshName);
732 if (el) {
733 el->writeConnectivityVTK(oss);
734 }
735 }
736
737 if (!fw->writeOrdered(oss)) {
738 cerr << "Warning, ignoring file write error!" << endl;
739 }
740
741 //
742 // offsets & types
743 //
744 if (mpiRank == 0) {
745 oss << "</DataArray>" << endl;
746 oss << "<DataArray Name=\"offsets\" type=\"Int32\" format=\"ascii\">" << endl;
747 for (int i=1; i < gNumCells+1; i++) {
748 oss << i*gCellSizeAndType[0] << endl;
749 }
750 oss << "</DataArray>" << endl;
751 oss << "<DataArray Name=\"types\" type=\"UInt8\" format=\"ascii\">" << endl;
752 for (int i=1; i < gNumCells+1; i++) {
753 oss << gCellSizeAndType[1] << endl;
754 }
755 oss << "</DataArray>" << endl << "</Cells>" << endl;
756 if (!fw->writeShared(oss)) {
757 cerr << "Warning, ignoring file write error!" << endl;
758 }
759 }
760
761 // now write all variables - first the nodal data, then cell data
762
763 // write nodal data if any
764 if (!nodalVars.empty()) {
765 if (mpiRank == 0)
766 oss << "<PointData>" << endl;
767 for (viIt = nodalVars.begin(); viIt != nodalVars.end(); viIt++) {
768 writeVarToVTK(*viIt, oss);
769 if (!fw->writeOrdered(oss)) {
770 cerr << "Warning, ignoring file write error!" << endl;
771 }
772 if (mpiRank == 0)
773 oss << "</DataArray>" << endl;
774 }
775 if (mpiRank == 0)
776 oss << "</PointData>" << endl;
777 }
778
779 // write cell data if any
780 if (!cellVars.empty()) {
781 if (mpiRank == 0)
782 oss << "<CellData>" << endl;
783 for (viIt = cellVars.begin(); viIt != cellVars.end(); viIt++) {
784 writeVarToVTK(*viIt, oss);
785 if (!fw->writeOrdered(oss)) {
786 cerr << "Warning, ignoring file write error!" << endl;
787 }
788 if (mpiRank == 0)
789 oss << "</DataArray>" << endl;
790 }
791 if (mpiRank == 0)
792 oss << "</CellData>" << endl;
793 }
794
795 if (mpiRank == 0) {
796 oss << "</Piece>" << endl << "</UnstructuredGrid>" << endl
797 << "</VTKFile>" << endl;
798 if (!fw->writeShared(oss)) {
799 cerr << "Warning, ignoring file write error!" << endl;
800 }
801 }
802
803 fw->close();
804 return true;
805 #else // VISIT_PLUGIN
806 return false;
807 #endif
808 }
809
810 //
811 //
812 //
813 void EscriptDataset::writeVarToVTK(const VarInfo& varInfo, ostream& os)
814 {
815 const DataChunks& varChunks = varInfo.dataChunks;
816 int rank = varChunks[0]->getRank();
817 int numComps = 1;
818 if (rank > 0)
819 numComps *= 3;
820 if (rank > 1)
821 numComps *= 3;
822
823 if (mpiRank == 0) {
824 os << "<DataArray Name=\"" << varInfo.varName
825 << "\" type=\"Float64\" NumberOfComponents=\"" << numComps
826 << "\" format=\"ascii\">" << endl;
827 }
828
829 // this is required in case we read a dataset with more than one chunk on
830 // one rank
831 int blockNum = (mpiSize>1 ? mpiRank : 0);
832 DataChunks::const_iterator blockIt;
833 for (blockIt = varChunks.begin(); blockIt != varChunks.end(); blockIt++, blockNum++) {
834 (*blockIt)->writeToVTK(os, blockNum);
835 }
836 }
837
838 //
839 // Sets the domain from dump files.
840 //
841 bool EscriptDataset::loadDomain(const string filePattern, int nChunks)
842 {
843 int myError = 0, gError;
844
845 if (mpiSize > 1 && nChunks != mpiSize) {
846 cerr << "Cannot load " << nChunks << " chunks on " << mpiSize
847 << " MPI ranks!" << endl;
848 myError = 1;
849
850 } else if (domainChunks.size() > 0) {
851 cerr << "Domain has already been set!" << endl;
852 myError = 1;
853
854 } else {
855 #ifdef USE_FINLEY
856 char* str = new char[filePattern.length()+10];
857 // FIXME: This assumes a finley domain!
858 if (mpiSize > 1) {
859 DomainChunk_ptr chunk(new FinleyDomain());
860 sprintf(str, filePattern.c_str(), mpiRank);
861 string domainFile = str;
862 if (chunk->initFromFile(domainFile)) {
863 chunk->reorderGhostZones(mpiRank);
864 domainChunks.push_back(chunk);
865 } else {
866 cerr << "Error initializing domain!" << endl;
867 myError = 2;
868 }
869 } else {
870 for (int idx=0; idx < nChunks; idx++) {
871 DomainChunk_ptr chunk(new FinleyDomain());
872 sprintf(str, filePattern.c_str(), idx);
873 string domainFile = str;
874 if (chunk->initFromFile(domainFile)) {
875 if (nChunks > 1)
876 chunk->reorderGhostZones(idx);
877 domainChunks.push_back(chunk);
878 } else {
879 cerr << "Error initializing domain block " << idx << endl;
880 myError = 2;
881 break;
882 }
883 }
884 }
885 delete[] str;
886 #else // USE_FINLEY
887 cerr << "weipa not compiled with finley support." << endl;
888 myError = 3;
889 #endif
890 }
891
892 if (mpiSize > 1) {
893 #if WEIPA_HAVE_MPI
894 MPI_Allreduce(&myError, &gError, 1, MPI_INT, MPI_MAX, mpiComm);
895 #else
896 gError = myError;
897 #endif
898 } else {
899 gError = myError;
900 }
901
902 if (gError>1) {
903 domainChunks.clear();
904 } else if (gError==0) {
905 // Convert mesh data to variables
906 convertMeshVariables();
907 }
908 return (gError==0);
909 }
910
911 //
912 // Sets an already converted domain.
913 //
914 bool EscriptDataset::setExternalDomain(const DomainChunks& domain)
915 {
916 int myError = 0, gError;
917
918 if (mpiSize > 1 && domain.size() > 1) {
919 cerr << "Can only add one domain block per rank when using MPI!"
920 << endl;
921 myError = 1;
922
923 } else if (domainChunks.size() > 0) {
924 cerr << "Domain has already been set!" << endl;
925 myError = 1;
926
927 }
928
929 if (mpiSize > 1) {
930 #if WEIPA_HAVE_MPI
931 MPI_Allreduce(&myError, &gError, 1, MPI_INT, MPI_MAX, mpiComm);
932 #else
933 gError = myError;
934 #endif
935 } else {
936 gError = myError;
937 }
938
939 if (!gError) {
940 externalDomain = true;
941 domainChunks = domain;
942 }
943
944 return !gError;
945 }
946
947 //
948 //
949 //
950 bool EscriptDataset::loadData(const string filePattern, const string name,
951 const string units)
952 {
953 int myError = 0, gError;
954
955 // fail if no domain has been set
956 if (domainChunks.size() == 0) {
957 gError = 1;
958
959 } else {
960 // initialize variable
961 VarInfo vi;
962 vi.varName = name;
963 vi.units = units;
964 vi.valid = true;
965 char* str = new char[filePattern.length()+10];
966
967 // read all parts of the variable
968 DomainChunks::iterator domIt;
969 int idx = (mpiSize > 1) ? mpiRank : 0;
970 for (domIt = domainChunks.begin(); domIt != domainChunks.end(); domIt++, idx++) {
971 sprintf(str, filePattern.c_str(), idx);
972 string dfile = str;
973 DataVar_ptr var(new DataVar(name));
974 if (var->initFromFile(dfile, *domIt))
975 vi.dataChunks.push_back(var);
976 else {
977 cerr << "Error reading " << dfile << endl;
978 myError = 1;
979 break;
980 }
981 }
982 delete[] str;
983
984 if (mpiSize > 1) {
985 #if WEIPA_HAVE_MPI
986 MPI_Allreduce(&myError, &gError, 1, MPI_INT, MPI_MAX, mpiComm);
987 #else
988 gError = myError;
989 #endif
990 } else {
991 gError = myError;
992 }
993
994 if (!gError) {
995 // only add variable if all chunks have been read without error
996 updateSampleDistribution(vi);
997 variables.push_back(vi);
998 }
999 }
1000
1001 return !gError;
1002 }
1003
1004 //
1005 //
1006 //
1007 void EscriptDataset::convertMeshVariables()
1008 {
1009 const StringVec& varNames = domainChunks[0]->getVarNames();
1010 StringVec::const_iterator it;
1011 for (it = varNames.begin(); it != varNames.end(); it++) {
1012 VarInfo vi;
1013 vi.varName = *it;
1014 vi.valid = true;
1015 // get all parts of current variable
1016 DomainChunks::iterator domIt;
1017 for (domIt = domainChunks.begin(); domIt != domainChunks.end(); domIt++) {
1018 DataVar_ptr var = (*domIt)->getDataVarByName(*it);
1019 if (var != NULL) {
1020 vi.dataChunks.push_back(var);
1021 } else {
1022 cerr << "Error converting mesh variable " << *it << endl;
1023 vi.valid = false;
1024 break;
1025 }
1026 }
1027 updateSampleDistribution(vi);
1028 meshVariables.push_back(vi);
1029 }
1030 }
1031
1032 // retrieves the number of samples at each block - used to determine which
1033 // blocks contribute to given variable if any.
1034 void EscriptDataset::updateSampleDistribution(VarInfo& vi)
1035 {
1036 IntVec sampleDist;
1037 const DataChunks& varChunks = vi.dataChunks;
1038
1039 if (mpiSize > 1) {
1040 #if WEIPA_HAVE_MPI
1041 int myNumSamples = varChunks[0]->getNumberOfSamples();
1042 sampleDist.insert(sampleDist.end(), mpiSize, 0);
1043 MPI_Allgather(
1044 &myNumSamples, 1, MPI_INT, &sampleDist[0], 1, MPI_INT, mpiComm);
1045 #endif
1046 } else {
1047 DataChunks::const_iterator it;
1048 for (it = varChunks.begin(); it != varChunks.end(); it++) {
1049 sampleDist.push_back((*it)->getNumberOfSamples());
1050 }
1051 }
1052 vi.sampleDistribution = sampleDist;
1053 }
1054
1055 //
1056 //
1057 //
1058 void EscriptDataset::putSiloMultiMesh(DBfile* dbfile, const string& meshName)
1059 {
1060 #if USE_SILO
1061 vector<int> meshtypes;
1062 vector<string> tempstrings;
1063 vector<char*> meshnames;
1064 string pathPrefix;
1065
1066 int ppIndex = domainChunks[0]->getSiloPath().find(':');
1067 if (ppIndex != string::npos) {
1068 pathPrefix = domainChunks[0]->getSiloPath().substr(0, ppIndex+1);
1069 }
1070
1071 // find a variable that is defined on this mesh to get the sample
1072 // distribution (which tells us which ranks contribute to this mesh).
1073 // Try mesh variables first, then regular ones.
1074 VarVector::const_iterator viIt;
1075 for (viIt = meshVariables.begin(); viIt != meshVariables.end(); viIt++) {
1076 if (meshName == viIt->dataChunks[0]->getMeshName())
1077 break;
1078 }
1079
1080 if (viIt == meshVariables.end()) {
1081 for (viIt = variables.begin(); viIt != variables.end(); viIt++) {
1082 if (meshName == viIt->dataChunks[0]->getMeshName())
1083 break;
1084 }
1085 // this probably means that the mesh is empty
1086 if (viIt == variables.end()) {
1087 return;
1088 }
1089 }
1090
1091 for (size_t idx = 0; idx < viIt->sampleDistribution.size(); idx++) {
1092 if (viIt->sampleDistribution[idx] > 0) {
1093 stringstream siloPath;
1094 siloPath << pathPrefix << "/block";
1095 int prevWidth = siloPath.width(4);
1096 char prevFill = siloPath.fill('0');
1097 siloPath << right << idx;
1098 siloPath.width(prevWidth);
1099 siloPath.fill(prevFill);
1100 siloPath << "/";
1101 siloPath << meshName;
1102 tempstrings.push_back(siloPath.str());
1103 meshtypes.push_back(DB_UCDMESH);
1104 }
1105 }
1106 for (vector<string>::const_iterator sit=tempstrings.begin();
1107 sit != tempstrings.end(); sit++)
1108 meshnames.push_back((char*)sit->c_str());
1109
1110 // ignore empty mesh
1111 if (!meshnames.empty()) {
1112 DBSetDir(dbfile, "/");
1113 DBoptlist* optList = DBMakeOptlist(2);
1114 DBAddOption(optList, DBOPT_CYCLE, &cycle);
1115 DBAddOption(optList, DBOPT_DTIME, &time);
1116 DBPutMultimesh(dbfile, meshName.c_str(), meshnames.size(),
1117 &meshnames[0], &meshtypes[0], optList);
1118 DBFreeOptlist(optList);
1119 }
1120 #endif
1121 }
1122
1123 //
1124 //
1125 //
1126 void EscriptDataset::putSiloMultiVar(DBfile* dbfile, const VarInfo& vi,
1127 bool useMeshFile)
1128 {
1129 #if USE_SILO
1130 vector<int> vartypes;
1131 vector<string> tempstrings;
1132 vector<char*> varnames;
1133 string pathPrefix;
1134 if (useMeshFile) {
1135 int ppIndex = domainChunks[0]->getSiloPath().find(':');
1136 if (ppIndex != string::npos) {
1137 pathPrefix = domainChunks[0]->getSiloPath().substr(0, ppIndex+1);
1138 }
1139 }
1140
1141 for (size_t idx = 0; idx < vi.sampleDistribution.size(); idx++) {
1142 if (vi.sampleDistribution[idx] > 0) {
1143 stringstream siloPath;
1144 siloPath << pathPrefix << "/block";
1145 int prevWidth = siloPath.width(4);
1146 char prevFill = siloPath.fill('0');
1147 siloPath << right << idx;
1148 siloPath.width(prevWidth);
1149 siloPath.fill(prevFill);
1150 siloPath << "/";
1151 siloPath << vi.varName;
1152 tempstrings.push_back(siloPath.str());
1153 vartypes.push_back(DB_UCDVAR);
1154 }
1155 }
1156 for (vector<string>::const_iterator sit=tempstrings.begin();
1157 sit != tempstrings.end(); sit++)
1158 varnames.push_back((char*)sit->c_str());
1159
1160 // ignore empty variables
1161 if (!varnames.empty()) {
1162 DBSetDir(dbfile, "/");
1163 DBoptlist* optList = DBMakeOptlist(2);
1164 DBAddOption(optList, DBOPT_CYCLE, &cycle);
1165 DBAddOption(optList, DBOPT_DTIME, &time);
1166 if (useMeshFile) {
1167 string vpath = string(MESH_VARS)+vi.varName;
1168 DBPutMultivar(dbfile, vpath.c_str(), varnames.size(),
1169 &varnames[0], &vartypes[0], optList);
1170 } else {
1171 DBPutMultivar(dbfile, vi.varName.c_str(), varnames.size(),
1172 &varnames[0], &vartypes[0], optList);
1173 }
1174 DBFreeOptlist(optList);
1175 }
1176 #endif
1177 }
1178
1179 //
1180 //
1181 //
1182 void EscriptDataset::putSiloMultiTensor(DBfile* dbfile, const VarInfo& vi)
1183 {
1184 #if USE_SILO
1185 string tensorDir = vi.varName+string("_comps/");
1186 DBSetDir(dbfile, "/");
1187 DBMkdir(dbfile, tensorDir.c_str());
1188 int one = 1;
1189 DBoptlist* optList = DBMakeOptlist(3);
1190 DBAddOption(optList, DBOPT_CYCLE, &cycle);
1191 DBAddOption(optList, DBOPT_DTIME, &time);
1192 DBAddOption(optList, DBOPT_HIDE_FROM_GUI, &one);
1193 const IntVec& shape = vi.dataChunks[0]->getShape();
1194
1195 for (int i=0; i<shape[1]; i++) {
1196 for (int j=0; j<shape[0]; j++) {
1197 vector<string> tempstrings;
1198 vector<char*> varnames;
1199 vector<int> vartypes;
1200 stringstream comp;
1201 comp << vi.varName << "_comps/a_";
1202 comp << i;
1203 comp << j;
1204 for (size_t idx = 0; idx < vi.sampleDistribution.size(); idx++) {
1205 if (vi.sampleDistribution[idx] > 0) {
1206 stringstream siloPath;
1207 siloPath << "/block";
1208 int prevWidth = siloPath.width(4);
1209 char prevFill = siloPath.fill('0');
1210 siloPath << right << idx;
1211 siloPath.width(prevWidth);
1212 siloPath.fill(prevFill);
1213 siloPath << "/" << comp.str();
1214 tempstrings.push_back(siloPath.str());
1215 varnames.push_back((char*)tempstrings.back().c_str());
1216 vartypes.push_back(DB_UCDVAR);
1217 }
1218 }
1219 if (!varnames.empty()) {
1220 DBPutMultivar(dbfile, comp.str().c_str(), varnames.size(),
1221 &varnames[0], &vartypes[0], optList);
1222 }
1223 }
1224 }
1225 DBFreeOptlist(optList);
1226 #endif
1227 }
1228
1229 } // namespace weipa
1230

  ViewVC Help
Powered by ViewVC 1.1.26