/[escript]/trunk/finley/src/Mesh_read.cpp
ViewVC logotype

Contents of /trunk/finley/src/Mesh_read.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 6939 - (show annotations)
Mon Jan 20 03:37:18 2020 UTC (4 months, 1 week ago) by uqaeller
File size: 14290 byte(s)
Updated the copyright header.


1
2 /*****************************************************************************
3 *
4 * Copyright (c) 2003-2020 by The University of Queensland
5 * http://www.uq.edu.au
6 *
7 * Primary Business: Queensland, Australia
8 * Licensed under the Apache License, version 2.0
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12 * Development 2012-2013 by School of Earth Sciences
13 * Development from 2014-2017 by Centre for Geoscience Computing (GeoComp)
14 * Development from 2019 by School of Earth and Environmental Sciences
15 **
16 *****************************************************************************/
17
18 #include "FinleyDomain.h"
19
20 #include <escript/index.h>
21
22 using escript::IOError;
23
24 namespace {
25
26 using namespace finley;
27
28 ElementFile* readElementFile(std::ifstream& fileHandle, int order,
29 int reducedOrder, escript::JMPI mpiInfo)
30 {
31 dim_t numEle = 0;
32 ElementTypeId typeID = NoRef;
33 std::string elementType, line;
34
35 // Read the element typeID and number of elements
36 if (mpiInfo->rank == 0) {
37 std::getline(fileHandle, line);
38 if (!fileHandle.good())
39 throw IOError("Mesh::read: Scan error while reading file - expected <ElementType> <numEle>");
40 size_t pos = line.find(' ');
41 if (pos == std::string::npos)
42 throw IOError("Mesh::read: Scan error reading file - expected <ElementType> <numEle>");
43 elementType = line.substr(0, pos);
44 numEle = std::stol(line.substr(pos+1));
45 typeID = ReferenceElement::getTypeId(elementType.c_str());
46 }
47 #ifdef ESYS_MPI
48 if (mpiInfo->size > 1) {
49 dim_t temp1[2];
50 temp1[0] = (dim_t)typeID;
51 temp1[1] = numEle;
52 int mpiError = MPI_Bcast(temp1, 2, MPI_DIM_T, 0, mpiInfo->comm);
53 if (mpiError != MPI_SUCCESS) {
54 throw FinleyException("Mesh::read: broadcast of element typeID failed");
55 }
56 typeID = static_cast<ElementTypeId>(temp1[0]);
57 numEle = temp1[1];
58 }
59 #endif
60 if (typeID == NoRef) {
61 std::stringstream ss;
62 ss << "Mesh::read: Unidentified element type " << elementType;
63 throw IOError(ss.str());
64 }
65
66 // Allocate the ElementFile
67 const_ReferenceElementSet_ptr refElements(new ReferenceElementSet(
68 typeID, order, reducedOrder));
69 ElementFile* out = new ElementFile(refElements, mpiInfo);
70 const int numNodes = out->numNodes;
71
72 /********************** Read the element data **************************/
73 dim_t chunkSize = numEle / mpiInfo->size + 1;
74 dim_t totalEle = 0;
75 dim_t chunkEle = 0;
76 int nextCPU = 1;
77 /// Store Id + Tag + node list (+ one int at end for chunkEle)
78 index_t* tempInts = new index_t[chunkSize * (2 + numNodes) + 1];
79 // Elements are specified as a list of integers...only need one message
80 // instead of two as with the nodes
81 if (mpiInfo->rank == 0) { // Master
82 for (;;) { // Infinite loop
83 #pragma omp parallel for
84 for (index_t i0 = 0; i0 < chunkSize * (2 + numNodes) + 1; i0++)
85 tempInts[i0] = -1;
86 chunkEle = 0;
87 for (index_t i0 = 0; i0 < chunkSize; i0++) {
88 if (totalEle >= numEle)
89 break; // End inner loop
90 std::getline(fileHandle, line);
91 if (!fileHandle.good())
92 throw IOError("Mesh::read: Scan error while reading element data");
93 std::stringstream ss;
94 ss << line;
95 ss >> tempInts[i0 * (2 + numNodes) + 0]
96 >> tempInts[i0 * (2 + numNodes) + 1];
97 for (int i1 = 0; i1 < numNodes; i1++) {
98 ss >> tempInts[i0 * (2 + numNodes) + 2 + i1];
99 }
100 totalEle++;
101 chunkEle++;
102 }
103 #ifdef ESYS_MPI
104 // Eventually we'll send chunk of elements to each CPU except 0
105 // itself, here goes one of them
106 if (nextCPU < mpiInfo->size) {
107 tempInts[chunkSize * (2 + numNodes)] = chunkEle;
108 MPI_Send(tempInts, chunkSize * (2 + numNodes) + 1, MPI_DIM_T,
109 nextCPU, 81722, mpiInfo->comm);
110 }
111 #endif
112 nextCPU++;
113 // Infinite loop ends when I've read a chunk for each of the worker
114 // nodes plus one more chunk for the master
115 if (nextCPU > mpiInfo->size)
116 break; // End infinite loop
117 } // Infinite loop
118 } // end master
119 else { // Worker
120 #ifdef ESYS_MPI
121 // Each worker receives one message
122 MPI_Status status;
123 MPI_Recv(tempInts, chunkSize * (2 + numNodes) + 1, MPI_DIM_T, 0,
124 81722, mpiInfo->comm, &status);
125 chunkEle = tempInts[chunkSize * (2 + numNodes)];
126 #endif
127 } // Worker
128
129 out->allocTable(chunkEle);
130
131 // Copy Element data from tempInts to element file
132 out->minColor = 0;
133 out->maxColor = chunkEle - 1;
134 #pragma omp parallel for
135 for (index_t i0 = 0; i0 < chunkEle; i0++) {
136 out->Id[i0] = tempInts[i0 * (2 + numNodes) + 0];
137 out->Tag[i0] = tempInts[i0 * (2 + numNodes) + 1];
138 out->Owner[i0] = mpiInfo->rank;
139 out->Color[i0] = i0;
140 for (int i1 = 0; i1 < numNodes; i1++) {
141 out->Nodes[INDEX2(i1, i0, numNodes)] =
142 tempInts[i0 * (2 + numNodes) + 2 + i1];
143 }
144 }
145 delete[] tempInts;
146 return out;
147 }
148
149 } // anonymous
150
151 namespace finley {
152
153 escript::Domain_ptr FinleyDomain::read(escript::JMPI mpiInfo,
154 const std::string& filename,
155 int order, int reducedOrder,
156 bool optimize)
157 {
158 dim_t numNodes = 0;
159 int numDim = 0;
160 std::string name, line, token;
161 std::ifstream fileHandle;
162
163 if (mpiInfo->rank == 0) {
164 // open file
165 fileHandle.open(filename.c_str());
166 if (!fileHandle.good()) {
167 std::stringstream ss;
168 ss << "Mesh::read: Opening file " << filename
169 << " for reading failed.";
170 throw IOError(ss.str());
171 }
172
173 // read header
174 std::getline(fileHandle, name);
175 if (!fileHandle.good())
176 throw IOError("Mesh::read: Scan error while reading file header");
177
178 // get the number of dimensions and nodes
179 std::getline(fileHandle, line);
180 if (!fileHandle.good())
181 throw IOError("Mesh::read: Scan error while reading file - expected <?D-Nodes> <numNodes>");
182 numDim = std::stoi(line.substr(0, 1));
183 token = line.substr(line.find(' ')+1);
184 numNodes = std::stoi(token);
185 }
186
187 #ifdef ESYS_MPI
188 // MPI Broadcast numDim, numNodes, name if there are multiple MPI procs
189 if (mpiInfo->size > 1) {
190 dim_t temp1[3];
191 if (mpiInfo->rank == 0) {
192 temp1[0] = numDim;
193 temp1[1] = numNodes;
194 temp1[2] = name.length() + 1;
195 } else {
196 temp1[0] = 0;
197 temp1[1] = 0;
198 temp1[2] = 1;
199 }
200 MPI_Bcast(temp1, 3, MPI_DIM_T, 0, mpiInfo->comm);
201 numDim = temp1[0];
202 numNodes = temp1[1];
203 name.resize(temp1[2]);
204 MPI_Bcast(&name[0], temp1[2], MPI_CHAR, 0, mpiInfo->comm);
205 }
206 #endif
207
208 // allocate domain
209 FinleyDomain* domain = new FinleyDomain(name, numDim, mpiInfo);
210
211 // Each CPU will get at most chunkSize nodes so the message has to be
212 // sufficiently large
213 dim_t chunkSize = numNodes / mpiInfo->size + 1;
214 dim_t totalNodes = 0;
215 dim_t chunkNodes = 0;
216 int nextCPU = 1;
217 // Stores the integer message data
218 index_t* tempInts = new index_t[chunkSize * 3 + 1];
219 // Stores the double message data
220 double* tempCoords = new double[chunkSize * numDim];
221
222 // Read chunkSize nodes, send it in a chunk to worker CPU which copies
223 // chunk into its local domain. It doesn't matter that a CPU has the wrong
224 // nodes for its elements, this is sorted out later. First chunk sent to
225 // CPU 1, second to CPU 2, ..., last chunk stays on CPU 0 (the master).
226 // The three columns of integers (Id, gDOF, Tag) are gathered into a single
227 // array tempInts and sent together in a single MPI message.
228 if (mpiInfo->rank == 0) { // Master
229 for (;;) { // Infinite loop
230 #pragma omp parallel for
231 for (index_t i0 = 0; i0 < chunkSize * 3 + 1; i0++)
232 tempInts[i0] = -1;
233
234 #pragma omp parallel for
235 for (index_t i0 = 0; i0 < chunkSize * numDim; i0++)
236 tempCoords[i0] = -1.0;
237
238 chunkNodes = 0;
239 for (index_t i1 = 0; i1 < chunkSize; i1++) {
240 if (totalNodes >= numNodes)
241 break; // End of inner loop
242 std::getline(fileHandle, line);
243 if (!fileHandle.good())
244 throw IOError("Mesh::read: Scan error while reading node data");
245 std::stringstream ss;
246 ss << line;
247 ss >> tempInts[0 + i1] >> tempInts[chunkSize + i1]
248 >> tempInts[chunkSize * 2 + i1];
249 ss >> tempCoords[i1 * numDim];
250 if (numDim > 1)
251 ss >> tempCoords[i1 * numDim + 1];
252 if (numDim > 2)
253 ss >> tempCoords[i1 * numDim + 2];
254 totalNodes++; // When do we quit the infinite loop?
255 chunkNodes++; // How many nodes do we actually have in this chunk? It may be smaller than chunkSize.
256 }
257 if (chunkNodes > chunkSize) {
258 throw FinleyException("Mesh::read: error reading chunks of domain, data too large for message size");
259 }
260 #ifdef ESYS_MPI
261 // Eventually we'll send chunkSize nodes to each CPU numbered
262 // 1 ... mpiInfo->size-1, here goes one of them
263 if (nextCPU < mpiInfo->size) {
264 // The message has one more int to send chunkNodes
265 tempInts[chunkSize * 3] = chunkNodes;
266 MPI_Send(tempInts, chunkSize * 3 + 1, MPI_DIM_T, nextCPU, 81720, mpiInfo->comm);
267 MPI_Send(tempCoords, chunkSize * numDim, MPI_DOUBLE, nextCPU, 81721, mpiInfo->comm);
268 }
269 #endif
270 nextCPU++;
271 // Infinite loop ends when I've read a chunk for each of the worker
272 // nodes plus one more chunk for the master
273 if (nextCPU > mpiInfo->size)
274 break; // End infinite loop
275 } // Infinite loop
276 } // End master
277 else { // Worker
278 #ifdef ESYS_MPI
279 // Each worker receives two messages
280 MPI_Status status;
281 MPI_Recv(tempInts, chunkSize * 3 + 1, MPI_DIM_T, 0, 81720, mpiInfo->comm, &status);
282 MPI_Recv(tempCoords, chunkSize * numDim, MPI_DOUBLE, 0, 81721, mpiInfo->comm, &status);
283 // How many nodes are in this worker's chunk?
284 chunkNodes = tempInts[chunkSize * 3];
285 #endif
286 } // Worker
287
288 // Copy node data from tempMem to domain
289 NodeFile* nodes = domain->getNodes();
290 nodes->allocTable(chunkNodes);
291
292 #pragma omp parallel for
293 for (index_t i0 = 0; i0 < chunkNodes; i0++) {
294 nodes->Id[i0] = tempInts[0 + i0];
295 nodes->globalDegreesOfFreedom[i0] = tempInts[chunkSize + i0];
296 nodes->Tag[i0] = tempInts[chunkSize * 2 + i0];
297 for (int i1 = 0; i1 < numDim; i1++) {
298 nodes->Coordinates[INDEX2(i1, i0, numDim)] = tempCoords[i0 * numDim + i1];
299 }
300 }
301 delete[] tempInts;
302 delete[] tempCoords;
303
304 /*************************** read elements ******************************/
305 domain->setElements(readElementFile(fileHandle, order, reducedOrder, mpiInfo));
306
307 /************************ read face elements ****************************/
308 domain->setFaceElements(readElementFile(fileHandle, order, reducedOrder, mpiInfo));
309
310 /************************ read contact elements ****************************/
311 domain->setContactElements(readElementFile(fileHandle, order, reducedOrder, mpiInfo));
312
313 /************************ read nodal elements ***************************/
314 domain->setPoints(readElementFile(fileHandle, order, reducedOrder, mpiInfo));
315
316 /************************ get the name tags ****************************/
317 std::string remainder;
318 size_t len = 0;
319 int tag_key;
320 if (mpiInfo->rank == 0) { // Master
321 // Read the word 'Tags'
322 if (!fileHandle.eof()) {
323 std::getline(fileHandle, name);
324 if (!fileHandle.good())
325 throw IOError("Mesh::read: Scan error while reading tag header");
326 }
327 // Read rest of file in one chunk, after using seek to find length
328 std::ios::pos_type cur_pos = fileHandle.tellg();
329 fileHandle.seekg(0, std::ios::end);
330 std::ios::pos_type end_pos = fileHandle.tellg();
331 fileHandle.seekg(cur_pos);
332 remainder.resize(end_pos - cur_pos + 1);
333 if (!fileHandle.eof()) {
334 fileHandle.read(&remainder[0], end_pos-cur_pos);
335 if (fileHandle.bad())
336 throw IOError("Mesh::read: Error reading remainder");
337 remainder[end_pos - cur_pos] = 0;
338 }
339 len = remainder.find_last_not_of(' ');
340 remainder = remainder.substr(0, len+1);
341 } // Master
342
343 #ifdef ESYS_MPI
344 int len_i = static_cast<int>(len);
345 MPI_Bcast(&len_i, 1, MPI_INT, 0, mpiInfo->comm);
346 len = static_cast<size_t>(len_i);
347 if (mpiInfo->rank != 0) {
348 remainder.resize(len + 1);
349 }
350 if (MPI_Bcast(&remainder[0], len+1, MPI_CHAR, 0, mpiInfo->comm) != MPI_SUCCESS)
351 throw FinleyException("Mesh::read: broadcast of remainder failed");
352 #endif
353
354 std::stringstream rem;
355 rem << remainder;
356 while (std::getline(rem, line)) {
357 size_t pos = line.find(' ');
358 if (pos != std::string::npos) {
359 name = line.substr(0, pos);
360 tag_key = std::stoi(line.substr(pos+1));
361 domain->setTagMap(name, tag_key);
362 }
363 }
364
365 // close file
366 if (mpiInfo->rank == 0)
367 fileHandle.close();
368
369 domain->resolveNodeIds();
370 domain->prepare(optimize);
371 return domain->getPtr();
372 }
373
374 } // namespace finley
375

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision
svn:mergeinfo /branches/4.0fordebian/finley/src/Mesh_read.cpp:5567-5588 /branches/diaplayground/finley/src/Mesh_read.cpp:4940-5147 /branches/lapack2681/finley/src/Mesh_read.cpp:2682-2741 /branches/pasowrap/finley/src/Mesh_read.cpp:3661-3674 /branches/py3_attempt2/finley/src/Mesh_read.cpp:3871-3891 /branches/restext/finley/src/Mesh_read.cpp:2610-2624 /branches/ripleygmg_from_3668/finley/src/Mesh_read.cpp:3669-3791 /branches/stage3.0/finley/src/Mesh_read.cpp:2569-2590 /branches/symbolic_from_3470/finley/src/Mesh_read.cpp:3471-3974 /branches/trilinos_from_5897/finley/src/Mesh_read.cpp:5898-6118 /release/3.0/finley/src/Mesh_read.cpp:2591-2601 /release/4.0/finley/src/Mesh_read.cpp:5380-5406 /trunk/finley/src/Mesh_read.cpp:4257-4344

  ViewVC Help
Powered by ViewVC 1.1.26