/[escript]/branches/trilinos_from_5897/dudley/src/Mesh_read.cpp
ViewVC logotype

Contents of /branches/trilinos_from_5897/dudley/src/Mesh_read.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 6079 - (show annotations)
Mon Mar 21 12:22:38 2016 UTC (2 years, 11 months ago) by caltinay
File size: 14445 byte(s)
Big commit - making dudley much more like finley to make it more
managable. Fixed quite a few issues that had been fixed in finley.
Disposed of all ReducedNode/ReducedDOF entities that dudley never supported.
Compiles and passes tests.

1
2 /*****************************************************************************
3 *
4 * Copyright (c) 2003-2016 by The University of Queensland
5 * http://www.uq.edu.au
6 *
7 * Primary Business: Queensland, Australia
8 * Licensed under the Open Software License version 3.0
9 * http://www.opensource.org/licenses/osl-3.0.php
10 *
11 * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12 * Development 2012-2013 by School of Earth Sciences
13 * Development from 2014 by Centre for Geoscience Computing (GeoComp)
14 *
15 *****************************************************************************/
16
17 #include "Mesh.h"
18
19 using escript::IOError;
20
21 namespace {
22
23 using namespace dudley;
24
25 ElementFile* readElementFile(FILE* fileHandle, escript::JMPI mpiInfo)
26 {
27 dim_t numEle;
28 ElementTypeId typeID = Dudley_NoRef;
29 char elementType[1024];
30 int scan_ret;
31
32 // Read the element typeID and number of elements
33 if (mpiInfo->rank == 0) {
34 scan_ret = fscanf(fileHandle, "%s %d\n", elementType, &numEle);
35 if (scan_ret == EOF)
36 throw IOError("Mesh::read: Scan error while reading file");
37 typeID = eltTypeFromString(elementType);
38 }
39 #ifdef ESYS_MPI
40 if (mpiInfo->size > 1) {
41 dim_t temp1[2];
42 temp1[0] = (dim_t)typeID;
43 temp1[1] = numEle;
44 int mpiError = MPI_Bcast(temp1, 2, MPI_DIM_T, 0, mpiInfo->comm);
45 if (mpiError != MPI_SUCCESS) {
46 throw DudleyException("Mesh::read: broadcast of element typeID failed");
47 }
48 typeID = static_cast<ElementTypeId>(temp1[0]);
49 numEle = temp1[1];
50 }
51 #endif
52 if (typeID == Dudley_NoRef) {
53 std::stringstream ss;
54 ss << "Mesh::read: Unidentified element type " << elementType;
55 throw IOError(ss.str());
56 }
57
58 // Allocate the ElementFile
59 ElementFile* out = new ElementFile(typeID, mpiInfo);
60 const int numNodes = out->numNodes;
61
62 /********************** Read the element data **************************/
63 dim_t chunkSize = numEle / mpiInfo->size + 1;
64 dim_t totalEle = 0;
65 dim_t chunkEle = 0;
66 int nextCPU = 1;
67 /// Store Id + Tag + node list (+ one int at end for chunkEle)
68 index_t* tempInts = new index_t[chunkSize * (2 + numNodes) + 1];
69 // Elements are specified as a list of integers...only need one message
70 // instead of two as with the nodes
71 if (mpiInfo->rank == 0) { // Master
72 for (;;) { // Infinite loop
73 #pragma omp parallel for
74 for (index_t i0 = 0; i0 < chunkSize * (2 + numNodes) + 1; i0++)
75 tempInts[i0] = -1;
76 chunkEle = 0;
77 for (index_t i0 = 0; i0 < chunkSize; i0++) {
78 if (totalEle >= numEle)
79 break; // End inner loop
80 scan_ret = fscanf(fileHandle, "%d %d",
81 &tempInts[i0 * (2 + numNodes) + 0],
82 &tempInts[i0 * (2 + numNodes) + 1]);
83 if (scan_ret == EOF)
84 throw IOError("Mesh::read: Scan error while reading file");
85 for (int i1 = 0; i1 < numNodes; i1++) {
86 scan_ret = fscanf(fileHandle, " %d",
87 &tempInts[i0 * (2 + numNodes) + 2 + i1]);
88 if (scan_ret == EOF)
89 throw IOError("Mesh::read: Scan error while reading file");
90 }
91 scan_ret = fscanf(fileHandle, "\n");
92 if (scan_ret == EOF)
93 throw IOError("Mesh::read: Scan error while reading file");
94 totalEle++;
95 chunkEle++;
96 }
97 #ifdef ESYS_MPI
98 // Eventually we'll send chunk of elements to each CPU except 0
99 // itself, here goes one of them
100 if (nextCPU < mpiInfo->size) {
101 tempInts[chunkSize * (2 + numNodes)] = chunkEle;
102 MPI_Send(tempInts, chunkSize * (2 + numNodes) + 1, MPI_DIM_T,
103 nextCPU, 81722, mpiInfo->comm);
104 }
105 #endif
106 nextCPU++;
107 // Infinite loop ends when I've read a chunk for each of the worker
108 // nodes plus one more chunk for the master
109 if (nextCPU > mpiInfo->size)
110 break; // End infinite loop
111 } // Infinite loop
112 } // end master
113 else { // Worker
114 #ifdef ESYS_MPI
115 // Each worker receives one message
116 MPI_Status status;
117 MPI_Recv(tempInts, chunkSize * (2 + numNodes) + 1, MPI_DIM_T, 0,
118 81722, mpiInfo->comm, &status);
119 chunkEle = tempInts[chunkSize * (2 + numNodes)];
120 #endif
121 } // Worker
122
123 out->allocTable(chunkEle);
124
125 // Copy Element data from tempInts to mesh
126 out->minColor = 0;
127 out->maxColor = chunkEle - 1;
128 #pragma omp parallel for
129 for (index_t i0 = 0; i0 < chunkEle; i0++) {
130 out->Id[i0] = tempInts[i0 * (2 + numNodes) + 0];
131 out->Tag[i0] = tempInts[i0 * (2 + numNodes) + 1];
132 out->Owner[i0] = mpiInfo->rank;
133 out->Color[i0] = i0;
134 for (int i1 = 0; i1 < numNodes; i1++) {
135 out->Nodes[INDEX2(i1, i0, numNodes)] =
136 tempInts[i0 * (2 + numNodes) + 2 + i1];
137 }
138 }
139 delete[] tempInts;
140 return out;
141 }
142
143 } // anonymous
144
145 namespace dudley {
146
147 Mesh* Mesh::read(escript::JMPI mpiInfo, const std::string& filename,
148 bool optimize)
149 {
150 dim_t numNodes;
151 int numDim = 0;
152 char name[1024], frm[20];
153 FILE *fileHandle = NULL;
154 int scan_ret;
155
156 if (mpiInfo->rank == 0) {
157 // open file
158 fileHandle = fopen(filename.c_str(), "r");
159 if (!fileHandle) {
160 std::stringstream ss;
161 ss << "Mesh::read: Opening file " << filename
162 << " for reading failed.";
163 throw DudleyException(ss.str());
164 }
165
166 // read header
167 sprintf(frm, "%%%d[^\n]", 1023);
168 scan_ret = fscanf(fileHandle, frm, name);
169 if (scan_ret == EOF)
170 throw IOError("Mesh::read: Scan error while reading file");
171
172 // get the number of nodes
173 scan_ret = fscanf(fileHandle, "%1d%*s %d\n", &numDim, &numNodes);
174 if (scan_ret == EOF)
175 throw IOError("Mesh::read: Scan error while reading file");
176 }
177
178 #ifdef ESYS_MPI
179 // MPI Broadcast numDim, numNodes, name if there are multiple MPI procs
180 if (mpiInfo->size > 1) {
181 dim_t temp1[3];
182 if (mpiInfo->rank == 0) {
183 temp1[0] = numDim;
184 temp1[1] = numNodes;
185 temp1[2] = strlen(name) + 1;
186 } else {
187 temp1[0] = 0;
188 temp1[1] = 0;
189 temp1[2] = 1;
190 }
191 MPI_Bcast(temp1, 3, MPI_DIM_T, 0, mpiInfo->comm);
192 numDim = temp1[0];
193 numNodes = temp1[1];
194 MPI_Bcast(name, temp1[2], MPI_CHAR, 0, mpiInfo->comm);
195 }
196 #endif
197
198 // allocate mesh
199 Mesh* mesh = new Mesh(name, numDim, mpiInfo);
200
201 // Each CPU will get at most chunkSize nodes so the message has to be
202 // sufficiently large
203 dim_t chunkSize = numNodes / mpiInfo->size + 1;
204 dim_t totalNodes = 0;
205 dim_t chunkNodes = 0;
206 int nextCPU = 1;
207 // Stores the integer message data
208 index_t* tempInts = new index_t[chunkSize * 3 + 1];
209 // Stores the double message data
210 double* tempCoords = new double[chunkSize * numDim];
211
212 // Read chunkSize nodes, send it in a chunk to worker CPU which copies
213 // chunk into its local mesh. It doesn't matter that a CPU has the wrong
214 // nodes for its elements, this is sorted out later. First chunk sent to
215 // CPU 1, second to CPU 2, ..., last chunk stays on CPU 0 (the master).
216 // The three columns of integers (Id, gDOF, Tag) are gathered into a single
217 // array tempInts and sent together in a single MPI message.
218 if (mpiInfo->rank == 0) { // Master
219 for (;;) { // Infinite loop
220 #pragma omp parallel for
221 for (index_t i0 = 0; i0 < chunkSize * 3 + 1; i0++)
222 tempInts[i0] = -1;
223
224 #pragma omp parallel for
225 for (index_t i0 = 0; i0 < chunkSize * numDim; i0++)
226 tempCoords[i0] = -1.0;
227
228 chunkNodes = 0;
229 for (index_t i1 = 0; i1 < chunkSize; i1++) {
230 if (totalNodes >= numNodes)
231 break; // End of inner loop
232 if (1 == numDim) {
233 scan_ret = fscanf(fileHandle, "%d %d %d %le\n",
234 &tempInts[0 + i1],
235 &tempInts[chunkSize + i1],
236 &tempInts[chunkSize * 2 + i1],
237 &tempCoords[i1 * numDim + 0]);
238 } else if (2 == numDim) {
239 scan_ret = fscanf(fileHandle, "%d %d %d %le %le\n",
240 &tempInts[0 + i1],
241 &tempInts[chunkSize + i1],
242 &tempInts[chunkSize * 2 + i1],
243 &tempCoords[i1 * numDim + 0],
244 &tempCoords[i1 * numDim + 1]);
245 } else if (3 == numDim) {
246 scan_ret = fscanf(fileHandle, "%d %d %d %le %le %le\n",
247 &tempInts[0 + i1],
248 &tempInts[chunkSize + i1],
249 &tempInts[chunkSize * 2 + i1],
250 &tempCoords[i1 * numDim + 0],
251 &tempCoords[i1 * numDim + 1],
252 &tempCoords[i1 * numDim + 2]);
253 }
254 if (scan_ret == EOF)
255 throw IOError("Mesh::read: Scan error while reading file");
256 totalNodes++; // When do we quit the infinite loop?
257 chunkNodes++; // How many nodes do we actually have in this chunk? It may be smaller than chunkSize.
258 }
259 if (chunkNodes > chunkSize) {
260 throw DudleyException("Mesh::read: error reading chunks of mesh, data too large for message size");
261 }
262 #ifdef ESYS_MPI
263 // Eventually we'll send chunkSize nodes to each CPU numbered
264 // 1 ... mpiInfo->size-1, here goes one of them
265 if (nextCPU < mpiInfo->size) {
266 // The message has one more int to send chunkNodes
267 tempInts[chunkSize * 3] = chunkNodes;
268 MPI_Send(tempInts, chunkSize * 3 + 1, MPI_DIM_T, nextCPU, 81720, mpiInfo->comm);
269 MPI_Send(tempCoords, chunkSize * numDim, MPI_DOUBLE, nextCPU, 81721, mpiInfo->comm);
270 }
271 #endif
272 nextCPU++;
273 // Infinite loop ends when I've read a chunk for each of the worker
274 // nodes plus one more chunk for the master
275 if (nextCPU > mpiInfo->size)
276 break; // End infinite loop
277 } // Infinite loop
278 } // End master
279 else { // Worker
280 #ifdef ESYS_MPI
281 // Each worker receives two messages
282 MPI_Status status;
283 MPI_Recv(tempInts, chunkSize * 3 + 1, MPI_DIM_T, 0, 81720, mpiInfo->comm, &status);
284 MPI_Recv(tempCoords, chunkSize * numDim, MPI_DOUBLE, 0, 81721, mpiInfo->comm, &status);
285 // How many nodes are in this worker's chunk?
286 chunkNodes = tempInts[chunkSize * 3];
287 #endif
288 } // Worker
289
290 // Copy node data from tempMem to mesh
291 mesh->Nodes->allocTable(chunkNodes);
292
293 #pragma omp parallel for
294 for (index_t i0 = 0; i0 < chunkNodes; i0++) {
295 mesh->Nodes->Id[i0] = tempInts[0 + i0];
296 mesh->Nodes->globalDegreesOfFreedom[i0] = tempInts[chunkSize + i0];
297 mesh->Nodes->Tag[i0] = tempInts[chunkSize * 2 + i0];
298 for (int i1 = 0; i1 < numDim; i1++) {
299 mesh->Nodes->Coordinates[INDEX2(i1, i0, numDim)] = tempCoords[i0 * numDim + i1];
300 }
301 }
302 delete[] tempInts;
303 delete[] tempCoords;
304
305 /*************************** read elements ******************************/
306 mesh->Elements = readElementFile(fileHandle, mpiInfo);
307
308 /************************ read face elements ****************************/
309 mesh->FaceElements = readElementFile(fileHandle, mpiInfo);
310
311 /************************ read nodal elements ***************************/
312 mesh->Points = readElementFile(fileHandle, mpiInfo);
313
314 /************************ get the name tags ****************************/
315 char *remainder = NULL, *ptr;
316 size_t len = 0;
317 int tag_key;
318 if (mpiInfo->rank == 0) { // Master
319 // Read the word 'Tag'
320 if (!feof(fileHandle)) {
321 scan_ret = fscanf(fileHandle, "%s\n", name);
322 if (scan_ret == EOF)
323 throw IOError("Mesh::read: Scan error while reading file");
324 }
325 // Read rest of file in one chunk, after using seek to find length
326 long cur_pos = ftell(fileHandle);
327 fseek(fileHandle, 0L, SEEK_END);
328 long end_pos = ftell(fileHandle);
329 fseek(fileHandle, (long)cur_pos, SEEK_SET);
330 remainder = new char[end_pos - cur_pos + 1];
331 if (!feof(fileHandle)) {
332 scan_ret = fread(remainder, (size_t) end_pos - cur_pos,
333 sizeof(char), fileHandle);
334 if (scan_ret == EOF)
335 throw IOError("Mesh::read: Scan error while reading file");
336 remainder[end_pos - cur_pos] = 0;
337 }
338 len = strlen(remainder);
339 while (len > 1 && isspace(remainder[--len])) {
340 remainder[len] = 0;
341 }
342 len = strlen(remainder);
343 } // Master
344
345 #ifdef ESYS_MPI
346 int len_i = static_cast<int>(len);
347 MPI_Bcast(&len_i, 1, MPI_INT, 0, mpiInfo->comm);
348 len = static_cast<size_t>(len_i);
349 if (mpiInfo->rank != 0) {
350 remainder = new char[len + 1];
351 remainder[0] = 0;
352 }
353 if (MPI_Bcast(remainder, len+1, MPI_CHAR, 0, mpiInfo->comm) != MPI_SUCCESS)
354 throw DudleyException("Mesh::read: broadcast of remainder failed");
355 #endif
356
357 if (remainder[0]) {
358 ptr = remainder;
359 do {
360 sscanf(ptr, "%s %d\n", name, &tag_key);
361 if (*name)
362 mesh->addTagMap(name, tag_key);
363 ptr++;
364 } while (NULL != (ptr = strchr(ptr, '\n')) && *ptr);
365 }
366 delete[] remainder;
367
368 // close file
369 if (mpiInfo->rank == 0)
370 fclose(fileHandle);
371
372 mesh->resolveNodeIds();
373 mesh->prepare(optimize);
374 return mesh;
375 }
376
377 } // namespace dudley
378

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision
svn:mergeinfo /branches/4.0fordebian/dudley/src/Mesh_read.cpp:5567-5588 /branches/lapack2681/finley/src/Mesh_read.cpp:2682-2741 /branches/pasowrap/dudley/src/Mesh_read.cpp:3661-3674 /branches/py3_attempt2/dudley/src/Mesh_read.cpp:3871-3891 /branches/restext/finley/src/Mesh_read.cpp:2610-2624 /branches/ripleygmg_from_3668/dudley/src/Mesh_read.cpp:3669-3791 /branches/stage3.0/finley/src/Mesh_read.cpp:2569-2590 /branches/symbolic_from_3470/dudley/src/Mesh_read.cpp:3471-3974 /branches/symbolic_from_3470/ripley/test/python/dudley/src/Mesh_read.cpp:3517-3974 /release/3.0/finley/src/Mesh_read.cpp:2591-2601 /release/4.0/dudley/src/Mesh_read.cpp:5380-5406 /trunk/dudley/src/Mesh_read.cpp:4257-4344,5898-6007 /trunk/ripley/test/python/dudley/src/Mesh_read.cpp:3480-3515

  ViewVC Help
Powered by ViewVC 1.1.26