236 |
|
|
237 |
Paso_MPIInfo *mpi_info = Paso_MPIInfo_alloc( MPI_COMM_WORLD ); |
Paso_MPIInfo *mpi_info = Paso_MPIInfo_alloc( MPI_COMM_WORLD ); |
238 |
dim_t numNodes, numDim, numEle, i0, i1; |
dim_t numNodes, numDim, numEle, i0, i1; |
|
index_t tag_key; |
|
239 |
Finley_Mesh *mesh_p=NULL; |
Finley_Mesh *mesh_p=NULL; |
240 |
char name[LenString_MAX],element_type[LenString_MAX],frm[20]; |
char name[LenString_MAX],element_type[LenString_MAX],frm[20]; |
241 |
char error_msg[LenErrorMsg_MAX]; |
char error_msg[LenErrorMsg_MAX]; |
242 |
double time0=Finley_timer(); |
double time0=Finley_timer(); |
243 |
FILE *fileHandle_p = NULL; |
FILE *fileHandle_p = NULL; |
244 |
ElementTypeId typeID, faceTypeID, contactTypeID, pointTypeID; |
ElementTypeId typeID; |
245 |
|
|
246 |
|
#if 0 /* comment out the rest of the un-implemented crap for now */ |
247 |
|
/* See below */ |
248 |
|
ElementTypeId faceTypeID, contactTypeID, pointTypeID; |
249 |
|
index_t tag_key; |
250 |
|
#endif |
251 |
|
|
252 |
Finley_resetError(); |
Finley_resetError(); |
253 |
|
|
294 |
/* allocate mesh */ |
/* allocate mesh */ |
295 |
mesh_p = Finley_Mesh_alloc(name,numDim,order,reduced_order,mpi_info); |
mesh_p = Finley_Mesh_alloc(name,numDim,order,reduced_order,mpi_info); |
296 |
if (Finley_noError()) { |
if (Finley_noError()) { |
297 |
int chunkSize = numNodes / mpi_info->size + 1, totalNodes=0, chunkNodes=0, chunkEle=0, nextCPU=1, mpi_error; |
int chunkSize = numNodes / mpi_info->size + 1, totalNodes=0, chunkNodes=0, chunkEle=0, nextCPU=1; |
298 |
int *tempInts = TMPMEMALLOC(numNodes*3+1, index_t); |
int *tempInts = TMPMEMALLOC(numNodes*3+1, index_t); |
299 |
double *tempCoords = TMPMEMALLOC(numNodes*numDim, double); |
double *tempCoords = TMPMEMALLOC(numNodes*numDim, double); |
300 |
|
|
331 |
/* Eventually we'll send chunk of nodes to each CPU numbered 1 ... mpi_info->size-1, here goes one of them */ |
/* Eventually we'll send chunk of nodes to each CPU numbered 1 ... mpi_info->size-1, here goes one of them */ |
332 |
if (nextCPU < mpi_info->size) { |
if (nextCPU < mpi_info->size) { |
333 |
#ifdef PASO_MPI |
#ifdef PASO_MPI |
334 |
|
int mpi_error; |
335 |
|
|
336 |
tempInts[numNodes*3] = chunkNodes; |
tempInts[numNodes*3] = chunkNodes; |
337 |
/* ksteube The size of this message can and should be brought down to chunkNodes*3+1, must re-org tempInts */ |
/* ksteube The size of this message can and should be brought down to chunkNodes*3+1, must re-org tempInts */ |
338 |
mpi_error = MPI_Send(tempInts, numNodes*3+1, MPI_INT, nextCPU, 81720, mpi_info->comm); |
mpi_error = MPI_Send(tempInts, numNodes*3+1, MPI_INT, nextCPU, 81720, mpi_info->comm); |
434 |
|
|
435 |
if (Finley_noError()) { |
if (Finley_noError()) { |
436 |
int *tempInts = TMPMEMALLOC(numEle*(2+numNodes)+1, index_t); /* Store Id + Tag + node list (+ one int at end for chunkEle) */ |
int *tempInts = TMPMEMALLOC(numEle*(2+numNodes)+1, index_t); /* Store Id + Tag + node list (+ one int at end for chunkEle) */ |
437 |
int chunkSize = numEle / mpi_info->size, totalEle=0, nextCPU=1, mpi_error; |
int chunkSize = numEle / mpi_info->size, totalEle=0, nextCPU=1; |
438 |
if (numEle % mpi_info->size != 0) chunkSize++; /* Remainder from numEle / mpi_info->size will be spread out one-per-CPU */ |
if (numEle % mpi_info->size != 0) chunkSize++; /* Remainder from numEle / mpi_info->size will be spread out one-per-CPU */ |
439 |
if (mpi_info->rank == 0) { /* Master */ |
if (mpi_info->rank == 0) { /* Master */ |
440 |
for (;;) { /* Infinite loop */ |
for (;;) { /* Infinite loop */ |
451 |
/* Eventually we'll send chunk of nodes to each CPU except 0 itself, here goes one of them */ |
/* Eventually we'll send chunk of nodes to each CPU except 0 itself, here goes one of them */ |
452 |
if (nextCPU < mpi_info->size) { |
if (nextCPU < mpi_info->size) { |
453 |
#ifdef PASO_MPI |
#ifdef PASO_MPI |
454 |
|
int mpi_error; |
455 |
|
|
456 |
tempInts[numEle*(2+numNodes)] = chunkEle; |
tempInts[numEle*(2+numNodes)] = chunkEle; |
457 |
printf("ksteube CPU=%d/%d send to %d\n", mpi_info->rank, mpi_info->size, nextCPU); |
printf("ksteube CPU=%d/%d send to %d\n", mpi_info->rank, mpi_info->size, nextCPU); |
458 |
mpi_error = MPI_Send(tempInts, numEle*(2+numNodes)+1, MPI_INT, nextCPU, 81722, mpi_info->comm); |
mpi_error = MPI_Send(tempInts, numEle*(2+numNodes)+1, MPI_INT, nextCPU, 81722, mpi_info->comm); |