# Contents of /trunk/paso/src/PCG.c

Revision 3642 - (show annotations)
Thu Oct 27 03:41:51 2011 UTC (7 years, 6 months ago) by caltinay
File MIME type: text/plain
File size: 13666 byte(s)
```Assorted spelling/comment fixes in paso.

```
 1 2 /******************************************************* 3 * 4 * Copyright (c) 2003-2010 by University of Queensland 5 * Earth Systems Science Computational Center (ESSCC) 6 * http://www.uq.edu.au/esscc 7 * 8 * Primary Business: Queensland, Australia 9 * Licensed under the Open Software License version 3.0 10 * http://www.opensource.org/licenses/osl-3.0.php 11 * 12 *******************************************************/ 13 14 15 /* PCG iterations */ 16 17 #include "SystemMatrix.h" 18 #include "Paso.h" 19 #include "Solver.h" 20 21 #ifdef _OPENMP 22 #include 23 #endif 24 25 #ifdef ESYS_MPI 26 #include 27 #endif 28 29 /* 30 * 31 * Purpose 32 * ======= 33 * 34 * PCG solves the linear system A*x = b using the 35 * preconditioned conjugate gradient method plus a smoother. 36 * A has to be symmetric. 37 * 38 * Convergence test: norm( b - A*x )< TOL. 39 * For other measures, see the above reference. 40 * 41 * Arguments 42 * ========= 43 * 44 * r (input) DOUBLE PRECISION array, dimension N. 45 * On entry, residual of initial guess x. 46 * 47 * x (input/output) DOUBLE PRECISION array, dimension N. 48 * On input, the initial guess. 49 * 50 * ITER (input/output) INT 51 * On input, the maximum iterations to be performed. 52 * On output, actual number of iterations performed. 53 * 54 * INFO (output) INT 55 * 56 * = SOLVER_NO_ERROR: Successful exit. Iterated approximate solution returned. 57 * = SOLVER_MAXITER_REACHED 58 * = SOLVER_INPUT_ERROR Illegal parameter: 59 * = SOLVER_BREAKDOWN: If parameters RHO or OMEGA become smaller 60 * = SOLVER_MEMORY_ERROR : If parameters RHO or OMEGA become smaller 61 * 62 * ============================================================== 63 */ 64 65 /* #define PASO_DYNAMIC_SCHEDULING_MVM */ 66 67 #if defined PASO_DYNAMIC_SCHEDULING_MVM && defined __OPENMP 68 #define USE_DYNAMIC_SCHEDULING 69 #endif 70 71 err_t Paso_Solver_PCG( 72 Paso_SystemMatrix * A, 73 double * r, 74 double * x, 75 dim_t *iter, 76 double * tolerance, 77 Paso_Performance* pp) { 78 79 /* Local variables */ 80 dim_t num_iter=0,maxit,num_iter_global, len,rest, np, ipp; 81 #ifdef USE_DYNAMIC_SCHEDULING 82 dim_t chunk_size=-1; 83 #endif 84 register double ss,ss1; 85 dim_t i0, istart, iend; 86 bool_t breakFlag=FALSE, maxIterFlag=FALSE, convergeFlag=FALSE; 87 err_t status = SOLVER_NO_ERROR; 88 dim_t n = Paso_SystemMatrix_getTotalNumRows(A); 89 double *resid = tolerance, *rs=NULL, *p=NULL, *v=NULL, *x2=NULL ; 90 double tau_old,tau,beta,delta,gamma_1,gamma_2,alpha,sum_1,sum_2,sum_3,sum_4,sum_5,tol; 91 #ifdef ESYS_MPI 92 double loc_sum[2], sum[2]; 93 #endif 94 double norm_of_residual=0,norm_of_residual_global; 95 register double d; 96 97 /* There should not be any executable code before this ifdef */ 98 99 #ifdef USE_DYNAMIC_SCHEDULING 100 101 /* Watch out for these declarations (see above) */ 102 char* chksz_chr; 103 dim_t n_chunks; 104 105 chksz_chr=getenv("PASO_CHUNK_SIZE_PCG"); 106 if (chksz_chr!=NULL) sscanf(chksz_chr, "%d",&chunk_size); 107 np=omp_get_max_threads(); 108 chunk_size=MIN(MAX(1,chunk_size),n/np); 109 n_chunks=n/chunk_size; 110 if (n_chunks*chunk_sizempi_info->comm); 219 #endif 220 tau_old=tau; 221 tau=sum_1; 222 /* p=v+beta*p */ 223 #pragma omp parallel private(i0, istart, iend, ipp,beta) 224 { 225 #ifdef USE_DYNAMIC_SCHEDULING 226 #pragma omp for schedule(dynamic, 1) 227 for (ipp=0; ipp < n_chunks; ++ipp) { 228 istart=chunk_size*ipp; 229 iend=MIN(istart+chunk_size,n); 230 #else 231 #pragma omp for schedule(static) 232 for (ipp=0; ipp mpi_info->comm); 288 #endif 289 delta=sum_2; 290 alpha=tau/delta; 291 292 if (! (breakFlag = (ABS(delta) <= TOLERANCE_FOR_SCALARS))) { 293 /* smoother */ 294 sum_3 = 0; 295 sum_4 = 0; 296 #pragma omp parallel private(i0, istart, iend, ipp,d, ss, ss1) 297 { 298 ss=0; 299 ss1=0; 300 #ifdef USE_DYNAMIC_SCHEDULING 301 #pragma omp for schedule(dynamic, 1) 302 for (ipp=0; ipp < n_chunks; ++ipp) { 303 istart=chunk_size*ipp; 304 iend=MIN(istart+chunk_size,n); 305 #else 306 #pragma omp for schedule(static) 307 for (ipp=0; ipp mpi_info->comm); 333 sum_3=sum[0]; 334 sum_4=sum[1]; 335 #endif 336 sum_5 = 0; 337 #pragma omp parallel private(i0, istart, iend, ipp, ss, gamma_1,gamma_2) 338 { 339 gamma_1= ( (ABS(sum_3)<= PASO_ZERO) ? 0 : -sum_4/sum_3) ; 340 gamma_2= PASO_ONE-gamma_1; 341 ss=0; 342 #ifdef USE_DYNAMIC_SCHEDULING 343 #pragma omp for schedule(dynamic, 1) 344 for (ipp=0; ipp < n_chunks; ++ipp) { 345 istart=chunk_size*ipp; 346 iend=MIN(istart+chunk_size,n); 347 #else 348 #pragma omp for schedule(static) 349 for (ipp=0; ipp mpi_info->comm); 373 #endif 374 norm_of_residual=sqrt(sum_5); 375 convergeFlag = norm_of_residual <= tol; 376 maxIterFlag = num_iter > maxit; 377 breakFlag = (ABS(tau) <= TOLERANCE_FOR_SCALARS); 378 } 379 } 380 /* end of iteration */ 381 num_iter_global=num_iter; 382 norm_of_residual_global=norm_of_residual; 383 if (maxIterFlag) { 384 status = SOLVER_MAXITER_REACHED; 385 } else if (breakFlag) { 386 status = SOLVER_BREAKDOWN; 387 } 388 Performance_stopMonitor(pp,PERFORMANCE_SOLVER); 389 TMPMEMFREE(rs); 390 TMPMEMFREE(x2); 391 TMPMEMFREE(v); 392 TMPMEMFREE(p); 393 *iter=num_iter_global; 394 *resid=norm_of_residual_global; 395 } 396 /* End of PCG */ 397 return status; 398 } 399

## Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision