/[escript]/trunk/escriptcore/src/MPIDataReducer.cpp
ViewVC logotype

Contents of /trunk/escriptcore/src/MPIDataReducer.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 5492 - (show annotations)
Tue Feb 24 09:33:32 2015 UTC (4 years, 4 months ago) by jfenwick
File size: 9208 byte(s)
fixed linking problem
1 /*****************************************************************************
2 *
3 * Copyright (c) 2014-2015 by University of Queensland
4 * http://www.uq.edu.au
5 *
6 * Primary Business: Queensland, Australia
7 * Licensed under the Open Software License version 3.0
8 * http://www.opensource.org/licenses/osl-3.0.php
9 *
10 * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
11 * Development 2012-2013 by School of Earth Sciences
12 * Development from 2014 by Centre for Geoscience Computing (GeoComp)
13 *
14 *****************************************************************************/
15
16 #define ESNEEDPYTHON
17 #include "esysUtils/first.h"
18
19
20 #include <sstream>
21 #include <limits>
22 #include <boost/python/extract.hpp>
23 #include <boost/scoped_array.hpp>
24
25 #include "MPIDataReducer.h"
26 #include "SplitWorldException.h"
27
28 using namespace boost::python;
29 using namespace escript;
30
31
32 namespace escript
33 {
34 Reducer_ptr makeDataReducer(std::string type)
35 {
36 MPI_Op op;
37 if (type=="SUM")
38 {
39 op=MPI_SUM;
40 }
41 else
42 {
43 throw SplitWorldException("Unsupported operation for makeDataReducer.");
44 }
45 MPIDataReducer* m=new MPIDataReducer(op);
46 return Reducer_ptr(m);
47 }
48
49 }
50
51 namespace
52 {
53
54 void combineData(Data& d1, const Data& d2, MPI_Op op)
55 {
56 if (op==MPI_SUM)
57 {
58 d1+=d2;
59 }
60 }
61
62 }
63
64 MPIDataReducer::MPIDataReducer(MPI_Op op)
65 : reduceop(op)
66 {
67 valueadded=false;
68 if (op==MPI_SUM)
69 {
70 // deliberately left blank
71 throw SplitWorldException("Unsupported MPI_Op");
72 }
73 else
74 {
75 throw SplitWorldException("Unsupported MPI_Op");
76 }
77 }
78
79
80 void MPIDataReducer::setDomain(escript::Domain_ptr d)
81 {
82 dom=d;
83 }
84
85 std::string MPIDataReducer::description()
86 {
87 std::string op="SUM";
88 return "Reducer("+op+") for Data objects";
89 }
90
91 bool MPIDataReducer::valueCompatible(boost::python::object v)
92 {
93 extract<Data&> ex(v);
94 if (!ex.check())
95 {
96 return false;
97 }
98 if (dom.get()!=0)
99 {
100 const Data& d=ex();
101 if (d.getDomain().get()!=dom.get())
102 {
103 return false; // the domains don't match
104 }
105 }
106 return true;
107 }
108
109
110 bool MPIDataReducer::reduceLocalValue(boost::python::object v, std::string& errstring)
111 {
112 extract<Data&> ex(v);
113 if (!ex.check())
114 {
115 errstring="reduceLocalValue: expected Data object. Got something else.";
116 return false;
117 }
118 Data& d=ex();
119 if ((d.getDomain()!=dom) && (dom.get()!=0))
120 {
121 errstring="reduceLocalValue: Got a Data object, but it was not using the SubWorld's domain.";
122 return false;
123 }
124 d.expand(); // because I don't want to mess about with types of Data
125 if (!valueadded) // first value so answer becomes this one
126 {
127 value=d;
128 dom=d.getDomain();
129 }
130 else
131 {
132 if (d.getFunctionSpace()!=value.getFunctionSpace())
133 {
134 errstring="reduceLocalValue: FunctionSpaces for Data objects being combined must match.";
135 return false;
136 }
137 combineData(value, d, reduceop);
138 }
139 return true;
140 }
141
142 void MPIDataReducer::reset()
143 {
144 valueadded=false;
145 value=Data();
146 }
147
148 bool MPIDataReducer::checkRemoteCompatibility(esysUtils::JMPI& mpi_info, std::string& errstring)
149 {
150 #ifdef ESYS_MPI
151 // since they can't add it unless it is using the proper domain, we need to check
152
153 std::vector<unsigned> compat(6);
154 getCompatibilityInfo(compat);
155
156 // still need to incorporate domain version into this
157 // or are domains not mutable in any way that matters?
158 int* rbuff=new int[mpi_info->size*compat.size()];
159 boost::scoped_array<int> dummy(rbuff); // to ensure cleanup
160 for (int i=0;i<mpi_info->size;++i)
161 {
162 rbuff[i]=0; // since this won't match any valid value we can use it as a failure check
163 }
164 if (MPI_Allgather(&compat[0], compat.size(), MPI_UNSIGNED, rbuff,
165 compat.size(), MPI_UNSIGNED, mpi_info->comm)!=MPI_SUCCESS)
166 {
167 errstring="MPI failure in checkRemoteCompatibility.";
168 return false;
169 }
170 for (int i=0;i<mpi_info->size-1;++i)
171 {
172 for (int j=0;j<compat.size();++i)
173 {
174 if (rbuff[i*compat.size()+j]!=rbuff[(i+1)*compat.size()+j])
175 {
176 std::ostringstream oss;
177 oss << "Incompatible value found for SubWorld " << i+1 << '.';
178 errstring=oss.str();
179 return false;
180 }
181 }
182 }
183 return true;
184 #else
185 return true;
186 #endif
187 }
188
189 // By the time this function is called, we know that all the values
190 // are compatible
191 bool MPIDataReducer::reduceRemoteValues(esysUtils::JMPI& mpi_info, bool active)
192 {
193 if (!active)
194 {
195 return false; // shutting down this option until I implement it
196 }
197 #ifdef ESYS_MPI
198 DataTypes::ValueType& vr=value.getExpandedVectorReference();
199 Data result(0, value.getDataPointShape(), value.getFunctionSpace(), true);
200 DataTypes::ValueType& rr=value.getExpandedVectorReference();
201 if (MPI_Allreduce(&(vr[0]), &(rr[0]), vr.size(), MPI_DOUBLE, reduceop, mpi_info->comm)!=MPI_SUCCESS)
202 {
203 return false;
204 }
205 return true;
206 #else
207 return true;
208 #endif
209 }
210
211 // populate a vector of ints with enough information to ensure two values are compatible
212 // or to construct a container for incomming data
213 // Format for this:
214 // [0] Type of Data: {0 : error, 1: DataEmpty, 10: constant, 11:tagged, 12:expanded}
215 // [1] Functionspace type code
216 // [2] Only used for tagged --- gives the number of tags (which exist in the data object)
217 // [3..6] Components of the shape
218 void MPIDataReducer::getCompatibilityInfo(std::vector<unsigned>& params)
219 {
220 params.resize(7);
221 for (int i=0;i<7;++i)
222 {
223 params[0]=0;
224 }
225 if (value.isConstant())
226 {
227 params[0]=10;
228 }
229 else if (value.isTagged())
230 {
231 params[0]=11;
232 }
233 else if (value.isExpanded())
234 {
235 params[0]=12;
236 }
237 else // This could be DataEmpty or some other weirdness but we won't allow that
238 {
239 params[0]=0; // invalid type to send
240 }
241 params[1]=value.getFunctionSpace().getTypeCode();
242 params[2]=static_cast<unsigned>(value.getNumberOfTaggedValues());
243 const DataTypes::ShapeType& s=value.getDataPointShape();
244 for (int i=0;i<s.size();++i)
245 {
246 params[3+i]=s[i];
247 }
248 }
249
250
251 // Get a value for this variable from another process
252 // This is not a reduction and will replace any existing value
253 bool MPIDataReducer::recvFrom(Esys_MPI_rank localid, Esys_MPI_rank source, esysUtils::JMPI& mpiinfo)
254 {
255 #ifdef ESYS_MPI
256 // first we need to find out what we are expecting
257 unsigned params[7];
258 MPI_Status stat;
259 if (MPI_Recv(params, 7, MPI_UNSIGNED, source, PARAMTAG, mpiinfo->comm, &stat)!=MPI_SUCCESS)
260 {
261 return false;
262 }
263 if (params[0]<10) // the sender somehow tried to send something invalid
264 {
265 return false;
266 }
267 // now we put the shape object together
268 escript::DataTypes::ShapeType s;
269 for (int i=0;i<4;++i)
270 {
271 if (params[3+i]>0)
272 {
273 s.push_back(params[3+i]);
274 }
275 else
276 {
277 break;
278 }
279 }
280 // Now we need the FunctionSpace
281 FunctionSpace fs=FunctionSpace(dom, static_cast<int>(params[1]));
282 value=Data(0, s, fs, params[0]==12);
283 if (params[0]==11) // The Data is tagged so we need to work out what tags we need
284 {
285 // TODO: Need to ship the tags and names over but for now just make sure there
286 // are the same number of tags
287 value.tag();
288
289 DataVector dv(DataTypes::noValues(s), 0, 1);
290 for (unsigned i=0;i<params[2];++i)
291 {
292 value.setTaggedValueFromCPP(static_cast<int>(i)+1, s, dv, 0);
293 }
294 return false; // because I don't trust this yet
295 }
296 #endif
297 return true;
298 }
299
300 // Send a value to this variable to another process
301 // This is not a reduction and will replace any existing value
302 bool MPIDataReducer::sendTo(Esys_MPI_rank localid, Esys_MPI_rank target, esysUtils::JMPI& mpiinfo)
303 {
304 #ifdef ESYS_MPI
305 // first step is to let the other world know what sort of thing it needs to make
306 if (value.isLazy())
307 {
308 value.resolve();
309 }
310 std::vector<unsigned> params;
311 getCompatibilityInfo(params);
312 if (MPI_Send(&params[0], 6, MPI_UNSIGNED, target, PARAMTAG, mpiinfo->comm)!=MPI_SUCCESS)
313 {
314 return false;
315 }
316 // now we have informed the other end of what happened
317 // are we done or is there actually data to send
318 if (params[0]<10)
319 {
320 return false;
321 }
322 // at this point, we know there is data to send
323 const DataAbstract::ValueType::value_type* vect=value.getDataRO();
324 // now the receiver knows how much data it should be receive
325 // need to make sure that we aren't trying to send data with no local samples
326 if (vect!=0)
327 {
328 // MPI v3 has this first param as a const void* (as it should be)
329 // Version on my machine expects void*
330 if (MPI_Send(const_cast<DataAbstract::ValueType::value_type*>(vect), value.getLength(), MPI_DOUBLE, target, PARAMTAG, mpiinfo->comm)!=MPI_SUCCESS)
331 {
332 return false;
333 }
334 }
335 #endif
336 return true;
337 }
338
339 boost::python::object MPIDataReducer::getPyObj()
340 {
341 throw SplitWorldException("getPyObj Not implemented yet.");
342 }
343
344
345 // send from proc 0 in the communicator to all others
346 bool MPIDataReducer::groupSend(MPI_Comm& com)
347 {
348 throw SplitWorldException("groupSend Not implemented yet.");
349 }
350
351 bool MPIDataReducer::groupReduce(MPI_Comm& com, char mystate)
352 {
353 throw SplitWorldException("groupReduce Not implemented yet.");
354 }
355

  ViewVC Help
Powered by ViewVC 1.1.26