/[escript]/branches/doubleplusgood/esysUtils/src/Esys_MPI.cpp
ViewVC logotype

Contents of /branches/doubleplusgood/esysUtils/src/Esys_MPI.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 4336 - (show annotations)
Thu Mar 21 08:25:18 2013 UTC (6 years, 7 months ago) by jfenwick
File size: 4238 byte(s)
all MEM macros gone except reallocs
1
2 /*****************************************************************************
3 *
4 * Copyright (c) 2003-2013 by University of Queensland
5 * http://www.uq.edu.au
6 *
7 * Primary Business: Queensland, Australia
8 * Licensed under the Open Software License version 3.0
9 * http://www.opensource.org/licenses/osl-3.0.php
10 *
11 * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12 * Development since 2012 by School of Earth Sciences
13 *
14 *****************************************************************************/
15
16
17 #include <stdlib.h>
18 #include <stdio.h>
19 #include <string.h>
20
21
22 #include "Esys_MPI.h"
23 #include "index.h"
24 #include "mem.h"
25 #include "error.h"
26
27
28 /* allocate memory for an mpi_comm, and find the communicator details */
29 Esys_MPIInfo* Esys_MPIInfo_alloc( MPI_Comm comm )
30 {
31 #ifdef ESYS_MPI
32 int error;
33 #endif
34
35 Esys_MPIInfo *out=NULL;
36
37 out = new Esys_MPIInfo;
38
39 out->reference_counter = 0;
40 out->msg_tag_counter = 0;
41 #ifdef ESYS_MPI
42 error = MPI_Comm_rank( comm, &out->rank )==MPI_SUCCESS && MPI_Comm_size( comm, &out->size )==MPI_SUCCESS;
43 if( !error ) {
44 Esys_setError( ESYS_MPI_ERROR, "Esys_MPIInfo_alloc : error finding comm rank/size" );
45 }
46
47 out->comm = comm;
48 #else
49 out->rank=0;
50 out->size=1;
51 out->comm=-1;
52 #endif
53 out->reference_counter++;
54
55 return out;
56 }
57
58 /* free memory for an mpi_comm */
59 void Esys_MPIInfo_free( Esys_MPIInfo *in )
60 {
61 if( in!=NULL) {
62 if (!(--in->reference_counter) ) delete in;
63 }
64 }
65
66 Esys_MPIInfo *Esys_MPIInfo_getReference( Esys_MPIInfo* in )
67 {
68 if (in!=NULL)
69 ++(in->reference_counter);
70
71 return in;
72 }
73 /* N = #CPUs, k is a CPU number but out of range or even negative. Return a CPU number in 0...n-1. */
74 index_t Esys_MPIInfo_mod(index_t n, index_t k)
75 {
76 index_t q, out=0;
77 if (n>1) {
78 q=k/n;
79 if (k>0) {
80 out=k-n*q;
81 } else if (k<0) {
82 out=k-n*(q-1);
83 }
84 }
85 return out;
86 }
87
88 void Esys_MPIInfo_Split( Esys_MPIInfo *mpi_info, dim_t N, dim_t* local_N,index_t* offset)
89 {
90 int rest=0;
91 int s=mpi_info->size;
92 int r=mpi_info->rank;
93 *local_N=N/s;
94 rest=N-(*local_N)*s;
95 if (r<rest) {
96 (*local_N)++;
97 (*offset)=(*local_N)*r;
98 } else {
99 (*offset)=(*local_N)*r+rest;
100 }
101 }
102
103
104 dim_t Esys_MPIInfo_setDistribution(Esys_MPIInfo* mpi_info ,index_t min_id,index_t max_id,index_t* distribution) {
105 int rest=0, p;
106 int s=mpi_info->size;
107 dim_t N=max_id-min_id+1;
108 if (N>0) {
109 int local_N=N/s;
110 rest=N-local_N*s;
111 for (p=0; p<s; ++p) {
112 if (p<rest) {
113 distribution[p]=min_id+(local_N+1)*p;
114 } else {
115 distribution[p]=min_id+rest+local_N*p;
116 }
117 }
118 distribution[s]=max_id+1;
119 if (rest==0) {
120 return local_N;
121 } else {
122 return local_N+1;
123 }
124 } else {
125 for (p=0; p<s+1; ++p) distribution[p]=min_id;
126 return 0;
127 }
128 }
129
130 /* checks that there is no error accross all processes in a communicator */
131 /* NOTE : does not make guarentee consistency of error string on each process */
132 bool_t Esys_MPIInfo_noError( Esys_MPIInfo *mpi_info )
133 {
134 int errorLocal = Esys_noError() ? 0 : 1;
135 int errorGlobal = errorLocal;
136
137 return (errorGlobal==0);
138 }
139
140 /**************************************************
141 WRAPPERS
142 **************************************************/
143
144 int Esys_MPIInfo_initialized( void )
145 {
146 #ifdef ESYS_MPI
147 int error=0, initialised=0;
148 error = MPI_Initialized( &initialised );
149 if( error!=MPI_SUCCESS )
150 Esys_setError( ESYS_MPI_ERROR, "mpi_initialised : MPI error" );
151 return initialised;
152 #else
153 return TRUE;
154 #endif
155 }
156
157 /* Append MPI rank to file name if multiple MPI processes */
158 char *Esys_MPI_appendRankToFileName(const char *fileName, int mpi_size, int mpi_rank) {
159 /* Make plenty of room for the mpi_rank number and terminating '\0' */
160 char *newFileName = new char[strlen(fileName)+20];
161 strncpy(newFileName, fileName, strlen(fileName)+1);
162 if (mpi_size>1) sprintf(newFileName+strlen(newFileName), ".%04d", mpi_rank);
163 return(newFileName);
164 }
165
166 #ifndef _OPENMP
167 int serial_get_max_threads(void) {
168 return 1;
169 }
170 int serial_get_thread_num(void) {
171 return 0;
172 }
173 #endif
174

Properties

Name Value
svn:mergeinfo /branches/lapack2681/paso/src/Paso_MPI.c:2682-2741 /branches/pasowrap/esysUtils/src/Esys_MPI.c:3661-3674 /branches/py3_attempt2/esysUtils/src/Esys_MPI.c:3871-3891 /branches/restext/paso/src/Paso_MPI.c:2610-2624 /branches/ripleygmg_from_3668/esysUtils/src/Esys_MPI.c:3669-3791 /branches/stage3.0/paso/src/Paso_MPI.c:2569-2590 /branches/symbolic_from_3470/esysUtils/src/Esys_MPI.c:3471-3974 /branches/symbolic_from_3470/ripley/test/python/esysUtils/src/Esys_MPI.c:3517-3974 /release/3.0/paso/src/Paso_MPI.c:2591-2601 /trunk/ripley/test/python/esysUtils/src/Esys_MPI.c:3480-3515

  ViewVC Help
Powered by ViewVC 1.1.26