/[escript]/trunk/escript/py_src/pdetools.py
ViewVC logotype

Annotation of /trunk/escript/py_src/pdetools.py

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2156 - (hide annotations)
Mon Dec 15 05:09:02 2008 UTC (11 years, 2 months ago) by gross
File MIME type: text/x-python
File size: 64857 byte(s)
some modifications to the iterative solver to make them easier to use. 
There are also improved versions of the Darcy flux solver and the incompressible solver.


1 ksteube 1809
2     ########################################################
3 ksteube 1312 #
4 ksteube 1809 # Copyright (c) 2003-2008 by University of Queensland
5     # Earth Systems Science Computational Center (ESSCC)
6     # http://www.uq.edu.au/esscc
7 ksteube 1312 #
8 ksteube 1809 # Primary Business: Queensland, Australia
9     # Licensed under the Open Software License version 3.0
10     # http://www.opensource.org/licenses/osl-3.0.php
11 ksteube 1312 #
12 ksteube 1809 ########################################################
13 jgs 121
14 ksteube 1809 __copyright__="""Copyright (c) 2003-2008 by University of Queensland
15     Earth Systems Science Computational Center (ESSCC)
16     http://www.uq.edu.au/esscc
17     Primary Business: Queensland, Australia"""
18     __license__="""Licensed under the Open Software License version 3.0
19     http://www.opensource.org/licenses/osl-3.0.php"""
20     __url__="http://www.uq.edu.au/esscc/escript-finley"
21    
22 jgs 121 """
23 jgs 149 Provides some tools related to PDEs.
24 jgs 121
25 jgs 149 Currently includes:
26     - Projector - to project a discontinuous
27 gross 351 - Locator - to trace values in data objects at a certain location
28     - TimeIntegrationManager - to handel extraplotion in time
29 gross 867 - SaddlePointProblem - solver for Saddle point problems using the inexact uszawa scheme
30 gross 637
31     @var __author__: name of author
32     @var __copyright__: copyrights
33     @var __license__: licence agreement
34     @var __url__: url entry point on documentation
35     @var __version__: version
36     @var __date__: date of the version
37 jgs 121 """
38    
39 gross 637 __author__="Lutz Gross, l.gross@uq.edu.au"
40 elspeth 609
41 gross 637
42 jgs 149 import escript
43     import linearPDEs
44 jgs 121 import numarray
45 jgs 149 import util
46 ksteube 1312 import math
47 jgs 121
48 artak 1465 ##### Added by Artak
49 gross 1467 # from Numeric import zeros,Int,Float64
50 artak 1465 ###################################
51    
52    
53 gross 351 class TimeIntegrationManager:
54     """
55     a simple mechanism to manage time dependend values.
56    
57 gross 720 typical usage is::
58 gross 351
59 gross 720 dt=0.1 # time increment
60     tm=TimeIntegrationManager(inital_value,p=1)
61     while t<1.
62     v_guess=tm.extrapolate(dt) # extrapolate to t+dt
63     v=...
64     tm.checkin(dt,v)
65     t+=dt
66 gross 351
67 gross 720 @note: currently only p=1 is supported.
68 gross 351 """
69     def __init__(self,*inital_values,**kwargs):
70     """
71     sets up the value manager where inital_value is the initial value and p is order used for extrapolation
72     """
73     if kwargs.has_key("p"):
74     self.__p=kwargs["p"]
75     else:
76     self.__p=1
77     if kwargs.has_key("time"):
78     self.__t=kwargs["time"]
79     else:
80     self.__t=0.
81     self.__v_mem=[inital_values]
82     self.__order=0
83     self.__dt_mem=[]
84     self.__num_val=len(inital_values)
85    
86     def getTime(self):
87     return self.__t
88 gross 396 def getValue(self):
89 gross 409 out=self.__v_mem[0]
90     if len(out)==1:
91     return out[0]
92     else:
93     return out
94    
95 gross 351 def checkin(self,dt,*values):
96     """
97     adds new values to the manager. the p+1 last value get lost
98     """
99     o=min(self.__order+1,self.__p)
100     self.__order=min(self.__order+1,self.__p)
101     v_mem_new=[values]
102     dt_mem_new=[dt]
103     for i in range(o-1):
104     v_mem_new.append(self.__v_mem[i])
105     dt_mem_new.append(self.__dt_mem[i])
106     v_mem_new.append(self.__v_mem[o-1])
107     self.__order=o
108     self.__v_mem=v_mem_new
109     self.__dt_mem=dt_mem_new
110     self.__t+=dt
111    
112     def extrapolate(self,dt):
113     """
114     extrapolates to dt forward in time.
115     """
116     if self.__order==0:
117     out=self.__v_mem[0]
118     else:
119     out=[]
120     for i in range(self.__num_val):
121     out.append((1.+dt/self.__dt_mem[0])*self.__v_mem[0][i]-dt/self.__dt_mem[0]*self.__v_mem[1][i])
122    
123     if len(out)==0:
124     return None
125     elif len(out)==1:
126     return out[0]
127     else:
128     return out
129 gross 396
130 gross 351
131 jgs 121 class Projector:
132 jgs 149 """
133     The Projector is a factory which projects a discontiuous function onto a
134     continuous function on the a given domain.
135     """
136 jgs 121 def __init__(self, domain, reduce = True, fast=True):
137     """
138 jgs 149 Create a continuous function space projector for a domain.
139 jgs 121
140 jgs 149 @param domain: Domain of the projection.
141     @param reduce: Flag to reduce projection order (default is True)
142     @param fast: Flag to use a fast method based on matrix lumping (default is true)
143 jgs 121 """
144 jgs 149 self.__pde = linearPDEs.LinearPDE(domain)
145 jgs 148 if fast:
146 jgs 149 self.__pde.setSolverMethod(linearPDEs.LinearPDE.LUMPING)
147 jgs 121 self.__pde.setSymmetryOn()
148     self.__pde.setReducedOrderTo(reduce)
149     self.__pde.setValue(D = 1.)
150 ksteube 1312 return
151 jgs 121
152     def __call__(self, input_data):
153     """
154 jgs 149 Projects input_data onto a continuous function
155 jgs 121
156 jgs 149 @param input_data: The input_data to be projected.
157 jgs 121 """
158 gross 525 out=escript.Data(0.,input_data.getShape(),self.__pde.getFunctionSpaceForSolution())
159 gross 1122 self.__pde.setValue(Y = escript.Data(), Y_reduced = escript.Data())
160 jgs 121 if input_data.getRank()==0:
161     self.__pde.setValue(Y = input_data)
162     out=self.__pde.getSolution()
163     elif input_data.getRank()==1:
164     for i0 in range(input_data.getShape()[0]):
165     self.__pde.setValue(Y = input_data[i0])
166     out[i0]=self.__pde.getSolution()
167     elif input_data.getRank()==2:
168     for i0 in range(input_data.getShape()[0]):
169     for i1 in range(input_data.getShape()[1]):
170     self.__pde.setValue(Y = input_data[i0,i1])
171     out[i0,i1]=self.__pde.getSolution()
172     elif input_data.getRank()==3:
173     for i0 in range(input_data.getShape()[0]):
174     for i1 in range(input_data.getShape()[1]):
175     for i2 in range(input_data.getShape()[2]):
176     self.__pde.setValue(Y = input_data[i0,i1,i2])
177     out[i0,i1,i2]=self.__pde.getSolution()
178     else:
179     for i0 in range(input_data.getShape()[0]):
180     for i1 in range(input_data.getShape()[1]):
181     for i2 in range(input_data.getShape()[2]):
182     for i3 in range(input_data.getShape()[3]):
183     self.__pde.setValue(Y = input_data[i0,i1,i2,i3])
184     out[i0,i1,i2,i3]=self.__pde.getSolution()
185     return out
186    
187 gross 525 class NoPDE:
188     """
189     solves the following problem for u:
190 jgs 121
191 gross 525 M{kronecker[i,j]*D[j]*u[j]=Y[i]}
192    
193     with constraint
194    
195     M{u[j]=r[j]} where M{q[j]>0}
196    
197     where D, Y, r and q are given functions of rank 1.
198    
199     In the case of scalars this takes the form
200    
201     M{D*u=Y}
202    
203     with constraint
204    
205     M{u=r} where M{q>0}
206    
207     where D, Y, r and q are given scalar functions.
208    
209     The constraint is overwriting any other condition.
210    
211 gross 720 @note: This class is similar to the L{linearPDEs.LinearPDE} class with A=B=C=X=0 but has the intention
212     that all input parameter are given in L{Solution} or L{ReducedSolution}. The whole
213     thing is a bit strange and I blame Robert.Woodcock@csiro.au for this.
214 gross 525 """
215     def __init__(self,domain,D=None,Y=None,q=None,r=None):
216     """
217     initialize the problem
218    
219     @param domain: domain of the PDE.
220     @type domain: L{Domain}
221     @param D: coefficient of the solution.
222 gross 720 @type D: C{float}, C{int}, L{numarray.NumArray}, L{Data}
223 gross 525 @param Y: right hand side
224 gross 720 @type Y: C{float}, C{int}, L{numarray.NumArray}, L{Data}
225 gross 525 @param q: location of constraints
226 gross 720 @type q: C{float}, C{int}, L{numarray.NumArray}, L{Data}
227 gross 525 @param r: value of solution at locations of constraints
228 gross 720 @type r: C{float}, C{int}, L{numarray.NumArray}, L{Data}
229 gross 525 """
230     self.__domain=domain
231     self.__D=D
232     self.__Y=Y
233     self.__q=q
234     self.__r=r
235     self.__u=None
236     self.__function_space=escript.Solution(self.__domain)
237     def setReducedOn(self):
238     """
239     sets the L{FunctionSpace} of the solution to L{ReducedSolution}
240     """
241     self.__function_space=escript.ReducedSolution(self.__domain)
242     self.__u=None
243    
244     def setReducedOff(self):
245     """
246     sets the L{FunctionSpace} of the solution to L{Solution}
247     """
248     self.__function_space=escript.Solution(self.__domain)
249     self.__u=None
250    
251     def setValue(self,D=None,Y=None,q=None,r=None):
252     """
253     assigns values to the parameters.
254    
255     @param D: coefficient of the solution.
256 gross 720 @type D: C{float}, C{int}, L{numarray.NumArray}, L{Data}
257 gross 525 @param Y: right hand side
258 gross 720 @type Y: C{float}, C{int}, L{numarray.NumArray}, L{Data}
259 gross 525 @param q: location of constraints
260 gross 720 @type q: C{float}, C{int}, L{numarray.NumArray}, L{Data}
261 gross 525 @param r: value of solution at locations of constraints
262 gross 720 @type r: C{float}, C{int}, L{numarray.NumArray}, L{Data}
263 gross 525 """
264     if not D==None:
265     self.__D=D
266     self.__u=None
267     if not Y==None:
268     self.__Y=Y
269     self.__u=None
270     if not q==None:
271     self.__q=q
272     self.__u=None
273     if not r==None:
274     self.__r=r
275     self.__u=None
276    
277     def getSolution(self):
278     """
279     returns the solution
280    
281     @return: the solution of the problem
282     @rtype: L{Data} object in the L{FunctionSpace} L{Solution} or L{ReducedSolution}.
283     """
284     if self.__u==None:
285     if self.__D==None:
286     raise ValueError,"coefficient D is undefined"
287     D=escript.Data(self.__D,self.__function_space)
288     if D.getRank()>1:
289     raise ValueError,"coefficient D must have rank 0 or 1"
290     if self.__Y==None:
291     self.__u=escript.Data(0.,D.getShape(),self.__function_space)
292     else:
293     self.__u=util.quotient(self.__Y,D)
294     if not self.__q==None:
295     q=util.wherePositive(escript.Data(self.__q,self.__function_space))
296     self.__u*=(1.-q)
297     if not self.__r==None: self.__u+=q*self.__r
298     return self.__u
299    
300 jgs 147 class Locator:
301     """
302 jgs 149 Locator provides access to the values of data objects at a given
303     spatial coordinate x.
304    
305     In fact, a Locator object finds the sample in the set of samples of a
306     given function space or domain where which is closest to the given
307     point x.
308 jgs 147 """
309    
310     def __init__(self,where,x=numarray.zeros((3,))):
311 jgs 149 """
312     Initializes a Locator to access values in Data objects on the Doamin
313     or FunctionSpace where for the sample point which
314     closest to the given point x.
315 gross 880
316     @param where: function space
317     @type where: L{escript.FunctionSpace}
318     @param x: coefficient of the solution.
319     @type x: L{numarray.NumArray} or C{list} of L{numarray.NumArray}
320 jgs 149 """
321     if isinstance(where,escript.FunctionSpace):
322 jgs 147 self.__function_space=where
323 jgs 121 else:
324 jgs 149 self.__function_space=escript.ContinuousFunction(where)
325 gross 880 if isinstance(x, list):
326     self.__id=[]
327     for p in x:
328 gross 921 self.__id.append(util.length(self.__function_space.getX()-p[:self.__function_space.getDim()]).minGlobalDataPoint())
329 gross 880 else:
330 gross 921 self.__id=util.length(self.__function_space.getX()-x[:self.__function_space.getDim()]).minGlobalDataPoint()
331 jgs 121
332 jgs 147 def __str__(self):
333 jgs 149 """
334     Returns the coordinates of the Locator as a string.
335     """
336 gross 880 x=self.getX()
337     if instance(x,list):
338     out="["
339     first=True
340     for xx in x:
341     if not first:
342     out+=","
343     else:
344     first=False
345     out+=str(xx)
346     out+="]>"
347     else:
348     out=str(x)
349     return out
350 jgs 121
351 gross 880 def getX(self):
352     """
353     Returns the exact coordinates of the Locator.
354     """
355     return self(self.getFunctionSpace().getX())
356    
357 jgs 147 def getFunctionSpace(self):
358 jgs 149 """
359     Returns the function space of the Locator.
360     """
361 jgs 147 return self.__function_space
362    
363 gross 880 def getId(self,item=None):
364 jgs 149 """
365     Returns the identifier of the location.
366     """
367 gross 880 if item == None:
368     return self.__id
369     else:
370     if isinstance(self.__id,list):
371     return self.__id[item]
372     else:
373     return self.__id
374 jgs 121
375    
376 jgs 147 def __call__(self,data):
377 jgs 149 """
378     Returns the value of data at the Locator of a Data object otherwise
379     the object is returned.
380     """
381 jgs 147 return self.getValue(data)
382 jgs 121
383 jgs 147 def getValue(self,data):
384 jgs 149 """
385     Returns the value of data at the Locator if data is a Data object
386     otherwise the object is returned.
387     """
388     if isinstance(data,escript.Data):
389 jgs 147 if data.getFunctionSpace()==self.getFunctionSpace():
390 gross 880 dat=data
391 jgs 147 else:
392 gross 880 dat=data.interpolate(self.getFunctionSpace())
393     id=self.getId()
394     r=data.getRank()
395     if isinstance(id,list):
396     out=[]
397     for i in id:
398 gross 921 o=data.getValueOfGlobalDataPoint(*i)
399 gross 880 if data.getRank()==0:
400     out.append(o[0])
401     else:
402     out.append(o)
403     return out
404 jgs 147 else:
405 gross 921 out=data.getValueOfGlobalDataPoint(*id)
406 gross 880 if data.getRank()==0:
407     return out[0]
408     else:
409     return out
410 jgs 147 else:
411     return data
412 jgs 149
413 ksteube 1312 class SolverSchemeException(Exception):
414     """
415     exceptions thrown by solvers
416     """
417     pass
418    
419     class IndefinitePreconditioner(SolverSchemeException):
420     """
421     the preconditioner is not positive definite.
422     """
423     pass
424     class MaxIterReached(SolverSchemeException):
425     """
426     maxium number of iteration steps is reached.
427     """
428     pass
429 gross 2100 class CorrectionFailed(SolverSchemeException):
430     """
431     no convergence has been achieved in the solution correction scheme.
432     """
433     pass
434 ksteube 1312 class IterationBreakDown(SolverSchemeException):
435     """
436     iteration scheme econouters an incurable breakdown.
437     """
438     pass
439     class NegativeNorm(SolverSchemeException):
440     """
441     a norm calculation returns a negative norm.
442     """
443     pass
444    
445 gross 2156 def PCG(r, Aprod, x, Msolve, bilinearform, atol=0, rtol=1.e-8, iter_max=100, initial_guess=True, verbose=False):
446 ksteube 1312 """
447     Solver for
448    
449     M{Ax=b}
450    
451     with a symmetric and positive definite operator A (more details required!).
452     It uses the conjugate gradient method with preconditioner M providing an approximation of A.
453    
454 gross 2156 The iteration is terminated if
455 ksteube 1312
456 gross 2156 M{|r| <= atol+rtol*|r0|}
457    
458     where M{r0} is the initial residual and M{|.|} is the energy norm. In fact
459    
460     M{|r| =sqrt( bilinearform(Msolve(r),r))}
461    
462 ksteube 1312 For details on the preconditioned conjugate gradient method see the book:
463    
464     Templates for the Solution of Linear Systems by R. Barrett, M. Berry,
465     T.F. Chan, J. Demmel, J. Donato, J. Dongarra, V. Eijkhout, R. Pozo,
466     C. Romine, and H. van der Vorst.
467    
468 gross 2156 @param r: initial residual M{r=b-Ax}. C{r} is altered.
469     @type r: any object supporting inplace add (x+=y) and scaling (x=scalar*y)
470     @param x: an initial guess for the solution.
471     @type x: any object supporting inplace add (x+=y) and scaling (x=scalar*y)
472 ksteube 1312 @param Aprod: returns the value Ax
473 jfenwick 2061 @type Aprod: function C{Aprod(x)} where C{x} is of the same object like argument C{x}.
474 gross 2156 The returned object needs to be of the same type like argument C{r}.
475 jfenwick 2061 @param Msolve: solves Mx=r
476 gross 2156 @type Msolve: function C{Msolve(r)} where C{r} is of the same type like argument C{r}.
477 jfenwick 2061 The returned object needs to be of the same type like argument C{x}.
478     @param bilinearform: inner product C{<x,r>}
479     @type bilinearform: function C{bilinearform(x,r)} where C{x} is of the same type like argument C{x} and C{r} is.
480     The returned value is a C{float}.
481 gross 2156 @param atol: absolute tolerance
482     @type atol: non-negative C{float}
483     @param rtol: relative tolerance
484     @type rtol: non-negative C{float}
485 ksteube 1312 @param iter_max: maximum number of iteration steps.
486     @type iter_max: C{int}
487 gross 1330 @return: the solution approximation and the corresponding residual
488     @rtype: C{tuple}
489 gross 2156 @warning: C{r} and C{x} are altered.
490 ksteube 1312 """
491     iter=0
492     rhat=Msolve(r)
493 gross 1330 d = rhat
494 ksteube 1312 rhat_dot_r = bilinearform(rhat, r)
495 gross 1330 if rhat_dot_r<0: raise NegativeNorm,"negative norm."
496 gross 2156 norm_r0=math.sqrt(rhat_dot_r)
497     atol2=atol+rtol*norm_r0
498     if atol2<=0:
499     raise ValueError,"Non-positive tolarance."
500     atol2=max(atol2, 100. * util.EPSILON * norm_r0)
501 ksteube 1312
502 gross 2156 if verbose: print "PCG: initial residual norm = %e (absolute tolerance = %e)"%(norm_r0, atol2)
503    
504    
505     while not math.sqrt(rhat_dot_r) <= atol2:
506 gross 1330 iter+=1
507 ksteube 1312 if iter >= iter_max: raise MaxIterReached,"maximum number of %s steps reached."%iter_max
508    
509     q=Aprod(d)
510     alpha = rhat_dot_r / bilinearform(d, q)
511     x += alpha * d
512     r += (-alpha) * q
513    
514     rhat=Msolve(r)
515     rhat_dot_r_new = bilinearform(rhat, r)
516     beta = rhat_dot_r_new / rhat_dot_r
517     rhat+=beta * d
518     d=rhat
519    
520     rhat_dot_r = rhat_dot_r_new
521 gross 1330 if rhat_dot_r<0: raise NegativeNorm,"negative norm."
522 gross 2100 if verbose: print "PCG: iteration step %s: residual norm = %e"%(iter, math.sqrt(rhat_dot_r))
523     if verbose: print "PCG: tolerance reached after %s steps."%iter
524 gross 1330 return x,r
525 ksteube 1312
526 gross 1878 class Defect(object):
527     """
528     defines a non-linear defect F(x) of a variable x
529     """
530     def __init__(self):
531     """
532     initialize defect
533     """
534     self.setDerivativeIncrementLength()
535 artak 1465
536 gross 1878 def bilinearform(self, x0, x1):
537     """
538     returns the inner product of x0 and x1
539     @param x0: a value for x
540     @param x1: a value for x
541     @return: the inner product of x0 and x1
542     @rtype: C{float}
543     """
544     return 0
545    
546     def norm(self,x):
547     """
548     the norm of argument C{x}
549    
550     @param x: a value for x
551     @return: norm of argument x
552     @rtype: C{float}
553     @note: by default C{sqrt(self.bilinearform(x,x)} is retrurned.
554     """
555     s=self.bilinearform(x,x)
556     if s<0: raise NegativeNorm,"negative norm."
557     return math.sqrt(s)
558 artak 1465
559 gross 1878
560     def eval(self,x):
561     """
562     returns the value F of a given x
563    
564     @param x: value for which the defect C{F} is evalulated.
565     @return: value of the defect at C{x}
566     """
567     return 0
568    
569     def __call__(self,x):
570     return self.eval(x)
571    
572     def setDerivativeIncrementLength(self,inc=math.sqrt(util.EPSILON)):
573     """
574     sets the relative length of the increment used to approximate the derivative of the defect
575     the increment is inc*norm(x)/norm(v)*v in the direction of v with x as a staring point.
576    
577     @param inc: relative increment length
578     @type inc: positive C{float}
579     """
580     if inc<=0: raise ValueError,"positive increment required."
581     self.__inc=inc
582    
583     def getDerivativeIncrementLength(self):
584     """
585     returns the relative increment length used to approximate the derivative of the defect
586     @return: value of the defect at C{x}
587     @rtype: positive C{float}
588     """
589     return self.__inc
590    
591     def derivative(self, F0, x0, v, v_is_normalised=True):
592     """
593     returns the directional derivative at x0 in the direction of v
594    
595     @param F0: value of this defect at x0
596     @param x0: value at which derivative is calculated.
597     @param v: direction
598     @param v_is_normalised: is true to indicate that C{v} is nomalized (self.norm(v)=0)
599     @return: derivative of this defect at x0 in the direction of C{v}
600     @note: by default numerical evaluation (self.eval(x0+eps*v)-F0)/eps is used but this method
601     maybe oepsnew verwritten to use exact evalution.
602     """
603     normx=self.norm(x0)
604     if normx>0:
605     epsnew = self.getDerivativeIncrementLength() * normx
606     else:
607     epsnew = self.getDerivativeIncrementLength()
608     if not v_is_normalised:
609     normv=self.norm(v)
610     if normv<=0:
611     return F0*0
612     else:
613     epsnew /= normv
614     F1=self.eval(x0 + epsnew * v)
615     return (F1-F0)/epsnew
616    
617     ######################################
618     def NewtonGMRES(defect, x, iter_max=100, sub_iter_max=20, atol=0,rtol=1.e-4, sub_tol_max=0.5, gamma=0.9, verbose=False):
619     """
620     solves a non-linear problem M{F(x)=0} for unknown M{x} using the stopping criterion:
621    
622     M{norm(F(x) <= atol + rtol * norm(F(x0)}
623    
624     where M{x0} is the initial guess.
625    
626     @param defect: object defining the the function M{F}, C{defect.norm} defines the M{norm} used in the stopping criterion.
627     @type defect: L{Defect}
628     @param x: initial guess for the solution, C{x} is altered.
629     @type x: any object type allowing basic operations such as L{numarray.NumArray}, L{Data}
630     @param iter_max: maximum number of iteration steps
631     @type iter_max: positive C{int}
632     @param sub_iter_max:
633     @type sub_iter_max:
634     @param atol: absolute tolerance for the solution
635     @type atol: positive C{float}
636     @param rtol: relative tolerance for the solution
637     @type rtol: positive C{float}
638     @param gamma: tolerance safety factor for inner iteration
639     @type gamma: positive C{float}, less than 1
640     @param sub_tol_max: upper bound for inner tolerance.
641     @type sub_tol_max: positive C{float}, less than 1
642     @return: an approximation of the solution with the desired accuracy
643     @rtype: same type as the initial guess.
644     """
645     lmaxit=iter_max
646     if atol<0: raise ValueError,"atol needs to be non-negative."
647     if rtol<0: raise ValueError,"rtol needs to be non-negative."
648     if rtol+atol<=0: raise ValueError,"rtol or atol needs to be non-negative."
649     if gamma<=0 or gamma>=1: raise ValueError,"tolerance safety factor for inner iteration (gamma =%s) needs to be positive and less than 1."%gamma
650     if sub_tol_max<=0 or sub_tol_max>=1: raise ValueError,"upper bound for inner tolerance for inner iteration (sub_tol_max =%s) needs to be positive and less than 1."%sub_tol_max
651    
652     F=defect(x)
653     fnrm=defect.norm(F)
654     stop_tol=atol + rtol*fnrm
655     sub_tol=sub_tol_max
656     if verbose: print "NewtonGMRES: initial residual = %e."%fnrm
657     if verbose: print " tolerance = %e."%sub_tol
658     iter=1
659     #
660     # main iteration loop
661     #
662     while not fnrm<=stop_tol:
663     if iter >= iter_max: raise MaxIterReached,"maximum number of %s steps reached."%iter_max
664     #
665     # adjust sub_tol_
666     #
667     if iter > 1:
668     rat=fnrm/fnrmo
669     sub_tol_old=sub_tol
670     sub_tol=gamma*rat**2
671     if gamma*sub_tol_old**2 > .1: sub_tol=max(sub_tol,gamma*sub_tol_old**2)
672     sub_tol=max(min(sub_tol,sub_tol_max), .5*stop_tol/fnrm)
673     #
674     # calculate newton increment xc
675     # if iter_max in __FDGMRES is reached MaxIterReached is thrown
676     # if iter_restart -1 is returned as sub_iter
677     # if atol is reached sub_iter returns the numer of steps performed to get there
678     #
679     #
680     if verbose: print " subiteration (GMRES) is called with relative tolerance %e."%sub_tol
681     try:
682     xc, sub_iter=__FDGMRES(F, defect, x, sub_tol*fnrm, iter_max=iter_max-iter, iter_restart=sub_iter_max)
683     except MaxIterReached:
684     raise MaxIterReached,"maximum number of %s steps reached."%iter_max
685     if sub_iter<0:
686     iter+=sub_iter_max
687     else:
688     iter+=sub_iter
689     # ====
690     x+=xc
691     F=defect(x)
692     iter+=1
693     fnrmo, fnrm=fnrm, defect.norm(F)
694     if verbose: print " step %s: residual %e."%(iter,fnrm)
695     if verbose: print "NewtonGMRES: completed after %s steps."%iter
696     return x
697    
698     def __givapp(c,s,vin):
699     """
700     apply a sequence of Givens rotations (c,s) to the recuirsively to the vector vin
701     @warning: C{vin} is altered.
702     """
703     vrot=vin
704 gross 1467 if isinstance(c,float):
705     vrot=[c*vrot[0]-s*vrot[1],s*vrot[0]+c*vrot[1]]
706     else:
707     for i in range(len(c)):
708     w1=c[i]*vrot[i]-s[i]*vrot[i+1]
709     w2=s[i]*vrot[i]+c[i]*vrot[i+1]
710     vrot[i:i+2]=w1,w2
711 artak 1465 return vrot
712    
713 gross 1878 def __FDGMRES(F0, defect, x0, atol, iter_max=100, iter_restart=20):
714 artak 1475 h=numarray.zeros((iter_restart,iter_restart),numarray.Float64)
715     c=numarray.zeros(iter_restart,numarray.Float64)
716     s=numarray.zeros(iter_restart,numarray.Float64)
717     g=numarray.zeros(iter_restart,numarray.Float64)
718 artak 1465 v=[]
719    
720 gross 1878 rho=defect.norm(F0)
721     if rho<=0.: return x0*0
722 artak 1488
723 gross 1878 v.append(-F0/rho)
724 artak 1465 g[0]=rho
725 gross 1878 iter=0
726     while rho > atol and iter<iter_restart-1:
727 artak 1557
728 artak 1465 if iter >= iter_max: raise MaxIterReached,"maximum number of %s steps reached."%iter_max
729    
730 gross 1878 p=defect.derivative(F0,x0,v[iter], v_is_normalised=True)
731 artak 1465 v.append(p)
732    
733 gross 1878 v_norm1=defect.norm(v[iter+1])
734 artak 1465
735 gross 1878 # Modified Gram-Schmidt
736 artak 1557 for j in range(iter+1):
737 gross 2100 h[j,iter]=defect.bilinearform(v[j],v[iter+1])
738     v[iter+1]-=h[j,iter]*v[j]
739 artak 1465
740 gross 2100 h[iter+1,iter]=defect.norm(v[iter+1])
741     v_norm2=h[iter+1,iter]
742 artak 1465
743 gross 1878 # Reorthogonalize if needed
744 artak 1465 if v_norm1 + 0.001*v_norm2 == v_norm1: #Brown/Hindmarsh condition (default)
745 gross 1878 for j in range(iter+1):
746     hr=defect.bilinearform(v[j],v[iter+1])
747 gross 2100 h[j,iter]=h[j,iter]+hr
748 gross 1878 v[iter+1] -= hr*v[j]
749 artak 1465
750 gross 1878 v_norm2=defect.norm(v[iter+1])
751 gross 2100 h[iter+1,iter]=v_norm2
752 gross 1878 # watch out for happy breakdown
753 artak 1550 if not v_norm2 == 0:
754 gross 2100 v[iter+1]=v[iter+1]/h[iter+1,iter]
755 artak 1465
756 gross 1878 # Form and store the information for the new Givens rotation
757 artak 1465 if iter > 0 :
758 artak 1550 hhat=numarray.zeros(iter+1,numarray.Float64)
759 gross 2100 for i in range(iter+1) : hhat[i]=h[i,iter]
760 gross 1878 hhat=__givapp(c[0:iter],s[0:iter],hhat);
761 gross 2100 for i in range(iter+1) : h[i,iter]=hhat[i]
762 artak 1465
763 gross 2100 mu=math.sqrt(h[iter,iter]*h[iter,iter]+h[iter+1,iter]*h[iter+1,iter])
764 artak 1557
765 artak 1465 if mu!=0 :
766 gross 2100 c[iter]=h[iter,iter]/mu
767     s[iter]=-h[iter+1,iter]/mu
768     h[iter,iter]=c[iter]*h[iter,iter]-s[iter]*h[iter+1,iter]
769     h[iter+1,iter]=0.0
770 gross 1878 g[iter:iter+2]=__givapp(c[iter],s[iter],g[iter:iter+2])
771 artak 1465
772 gross 1878 # Update the residual norm
773 artak 1465 rho=abs(g[iter+1])
774     iter+=1
775    
776 gross 1878 # At this point either iter > iter_max or rho < tol.
777     # It's time to compute x and leave.
778 artak 1465 if iter > 0 :
779 gross 1467 y=numarray.zeros(iter,numarray.Float64)
780 gross 2100 y[iter-1] = g[iter-1] / h[iter-1,iter-1]
781 artak 1465 if iter > 1 :
782     i=iter-2
783     while i>=0 :
784 gross 2100 y[i] = ( g[i] - numarray.dot(h[i,i+1:iter], y[i+1:iter])) / h[i,i]
785 artak 1465 i=i-1
786     xhat=v[iter-1]*y[iter-1]
787     for i in range(iter-1):
788     xhat += v[i]*y[i]
789 gross 1878 else :
790     xhat=v[0] * 0
791 artak 1557
792 artak 1488 if iter<iter_restart-1:
793 gross 1878 stopped=iter
794 artak 1475 else:
795 gross 1878 stopped=-1
796 artak 1465
797 gross 1878 return xhat,stopped
798 artak 1481
799 gross 2156 def GMRES(r, Aprod, x, bilinearform, atol=0, rtol=1.e-8, iter_max=100, iter_restart=20, verbose=False):
800     """
801     Solver for
802    
803     M{Ax=b}
804    
805     with a general operator A (more details required!).
806     It uses the generalized minimum residual method (GMRES).
807    
808     The iteration is terminated if
809    
810     M{|r| <= atol+rtol*|r0|}
811    
812     where M{r0} is the initial residual and M{|.|} is the energy norm. In fact
813    
814     M{|r| =sqrt( bilinearform(r,r))}
815    
816     @param r: initial residual M{r=b-Ax}. C{r} is altered.
817     @type r: any object supporting inplace add (x+=y) and scaling (x=scalar*y)
818     @param x: an initial guess for the solution.
819     @type x: same like C{r}
820     @param Aprod: returns the value Ax
821     @type Aprod: function C{Aprod(x)} where C{x} is of the same object like argument C{x}.
822     The returned object needs to be of the same type like argument C{r}.
823     @param bilinearform: inner product C{<x,r>}
824     @type bilinearform: function C{bilinearform(x,r)} where C{x} is of the same type like argument C{x} and C{r} is.
825     The returned value is a C{float}.
826     @param atol: absolute tolerance
827     @type atol: non-negative C{float}
828     @param rtol: relative tolerance
829     @type rtol: non-negative C{float}
830     @param iter_max: maximum number of iteration steps.
831     @type iter_max: C{int}
832     @param iter_restart: in order to ssave memory the orthogonalization process is terminated after C{iter_restart} steps and the
833     iteration is restarted.
834     @type iter_restart: C{int}
835     @return: the solution approximation and the corresponding residual.
836     @rtype: C{tuple}
837     @warning: C{r} and C{x} are altered.
838     """
839 gross 1878 m=iter_restart
840 gross 2156 restarted=False
841 gross 1878 iter=0
842 gross 2100 if rtol>0:
843 gross 2156 r_dot_r = bilinearform(r, r)
844 gross 2100 if r_dot_r<0: raise NegativeNorm,"negative norm."
845     atol2=atol+rtol*math.sqrt(r_dot_r)
846     if verbose: print "GMRES: norm of right hand side = %e (absolute tolerance = %e)"%(math.sqrt(r_dot_r), atol2)
847     else:
848     atol2=atol
849     if verbose: print "GMRES: absolute tolerance = %e"%atol2
850 gross 2156 if atol2<=0:
851     raise ValueError,"Non-positive tolarance."
852 gross 2100
853 gross 1878 while True:
854     if iter >= iter_max: raise MaxIterReached,"maximum number of %s steps reached"%iter_max
855 gross 2156 if restarted:
856     r2 = r-Aprod(x-x2)
857     else:
858     r2=1*r
859     x2=x*1.
860     x,stopped=__GMRESm(r2, Aprod, x, bilinearform, atol2, iter_max=iter_max-iter, iter_restart=m, verbose=verbose)
861 gross 2100 iter+=iter_restart
862 gross 1878 if stopped: break
863 gross 2100 if verbose: print "GMRES: restart."
864 gross 2156 restarted=True
865 gross 2100 if verbose: print "GMRES: tolerance has reached."
866 gross 2156 return x
867 artak 1550
868 gross 2156 def __GMRESm(r, Aprod, x, bilinearform, atol, iter_max=100, iter_restart=20, verbose=False):
869 gross 1878 iter=0
870 artak 1519
871 gross 2100 h=numarray.zeros((iter_restart+1,iter_restart),numarray.Float64)
872 artak 1519 c=numarray.zeros(iter_restart,numarray.Float64)
873     s=numarray.zeros(iter_restart,numarray.Float64)
874 gross 2100 g=numarray.zeros(iter_restart+1,numarray.Float64)
875 artak 1519 v=[]
876    
877 gross 2100 r_dot_r = bilinearform(r, r)
878     if r_dot_r<0: raise NegativeNorm,"negative norm."
879 artak 1519 rho=math.sqrt(r_dot_r)
880    
881     v.append(r/rho)
882     g[0]=rho
883    
884 gross 2100 if verbose: print "GMRES: initial residual %e (absolute tolerance = %e)"%(rho,atol)
885     while not (rho<=atol or iter==iter_restart):
886 artak 1519
887     if iter >= iter_max: raise MaxIterReached,"maximum number of %s steps reached."%iter_max
888    
889 gross 2100 p=Aprod(v[iter])
890 artak 1519 v.append(p)
891    
892     v_norm1=math.sqrt(bilinearform(v[iter+1], v[iter+1]))
893    
894     # Modified Gram-Schmidt
895 gross 1878 for j in range(iter+1):
896 gross 2100 h[j,iter]=bilinearform(v[j],v[iter+1])
897     v[iter+1]-=h[j,iter]*v[j]
898 artak 1519
899 gross 2100 h[iter+1,iter]=math.sqrt(bilinearform(v[iter+1],v[iter+1]))
900     v_norm2=h[iter+1,iter]
901 artak 1519
902     # Reorthogonalize if needed
903     if v_norm1 + 0.001*v_norm2 == v_norm1: #Brown/Hindmarsh condition (default)
904 gross 1878 for j in range(iter+1):
905 artak 1519 hr=bilinearform(v[j],v[iter+1])
906 gross 2100 h[j,iter]=h[j,iter]+hr
907 gross 1878 v[iter+1] -= hr*v[j]
908 artak 1519
909     v_norm2=math.sqrt(bilinearform(v[iter+1], v[iter+1]))
910 gross 2100 h[iter+1,iter]=v_norm2
911 artak 1519
912     # watch out for happy breakdown
913 gross 1878 if not v_norm2 == 0:
914 gross 2100 v[iter+1]=v[iter+1]/h[iter+1,iter]
915 artak 1519
916     # Form and store the information for the new Givens rotation
917 gross 2100 if iter > 0: h[:iter+1,iter]=__givapp(c[:iter],s[:iter],h[:iter+1,iter])
918     mu=math.sqrt(h[iter,iter]*h[iter,iter]+h[iter+1,iter]*h[iter+1,iter])
919 artak 1519
920     if mu!=0 :
921 gross 2100 c[iter]=h[iter,iter]/mu
922     s[iter]=-h[iter+1,iter]/mu
923     h[iter,iter]=c[iter]*h[iter,iter]-s[iter]*h[iter+1,iter]
924     h[iter+1,iter]=0.0
925 gross 1878 g[iter:iter+2]=__givapp(c[iter],s[iter],g[iter:iter+2])
926 artak 1519 # Update the residual norm
927 gross 1878
928 artak 1519 rho=abs(g[iter+1])
929 gross 2100 if verbose: print "GMRES: iteration step %s: residual %e"%(iter,rho)
930 artak 1519 iter+=1
931    
932 gross 1878 # At this point either iter > iter_max or rho < tol.
933     # It's time to compute x and leave.
934    
935 gross 2100 if verbose: print "GMRES: iteration stopped after %s step."%iter
936 artak 1519 if iter > 0 :
937     y=numarray.zeros(iter,numarray.Float64)
938 gross 2100 y[iter-1] = g[iter-1] / h[iter-1,iter-1]
939 artak 1519 if iter > 1 :
940     i=iter-2
941     while i>=0 :
942 gross 2100 y[i] = ( g[i] - numarray.dot(h[i,i+1:iter], y[i+1:iter])) / h[i,i]
943 artak 1519 i=i-1
944     xhat=v[iter-1]*y[iter-1]
945     for i in range(iter-1):
946     xhat += v[i]*y[i]
947 gross 2100 else:
948     xhat=v[0] * 0
949 gross 1878
950 artak 1519 x += xhat
951     if iter<iter_restart-1:
952     stopped=True
953     else:
954     stopped=False
955    
956     return x,stopped
957    
958 gross 2156 def MINRES(r, Aprod, x, Msolve, bilinearform, atol=0, rtol=1.e-8, iter_max=100):
959     """
960     Solver for
961    
962     M{Ax=b}
963    
964     with a symmetric and positive definite operator A (more details required!).
965     It uses the minimum residual method (MINRES) with preconditioner M providing an approximation of A.
966    
967     The iteration is terminated if
968    
969     M{|r| <= atol+rtol*|r0|}
970    
971     where M{r0} is the initial residual and M{|.|} is the energy norm. In fact
972    
973     M{|r| =sqrt( bilinearform(Msolve(r),r))}
974    
975     For details on the preconditioned conjugate gradient method see the book:
976    
977     Templates for the Solution of Linear Systems by R. Barrett, M. Berry,
978     T.F. Chan, J. Demmel, J. Donato, J. Dongarra, V. Eijkhout, R. Pozo,
979     C. Romine, and H. van der Vorst.
980    
981     @param r: initial residual M{r=b-Ax}. C{r} is altered.
982     @type r: any object supporting inplace add (x+=y) and scaling (x=scalar*y)
983     @param x: an initial guess for the solution.
984     @type x: any object supporting inplace add (x+=y) and scaling (x=scalar*y)
985     @param Aprod: returns the value Ax
986     @type Aprod: function C{Aprod(x)} where C{x} is of the same object like argument C{x}.
987     The returned object needs to be of the same type like argument C{r}.
988     @param Msolve: solves Mx=r
989     @type Msolve: function C{Msolve(r)} where C{r} is of the same type like argument C{r}.
990     The returned object needs to be of the same type like argument C{x}.
991     @param bilinearform: inner product C{<x,r>}
992     @type bilinearform: function C{bilinearform(x,r)} where C{x} is of the same type like argument C{x} and C{r} is.
993     The returned value is a C{float}.
994     @param atol: absolute tolerance
995     @type atol: non-negative C{float}
996     @param rtol: relative tolerance
997     @type rtol: non-negative C{float}
998     @param iter_max: maximum number of iteration steps.
999     @type iter_max: C{int}
1000     @return: the solution approximation and the corresponding residual
1001     @rtype: C{tuple}
1002     @warning: C{r} and C{x} are altered.
1003     """
1004 artak 1481 #------------------------------------------------------------------
1005     # Set up y and v for the first Lanczos vector v1.
1006     # y = beta1 P' v1, where P = C**(-1).
1007     # v is really P' v1.
1008     #------------------------------------------------------------------
1009 gross 2156 r1 = r
1010     y = Msolve(r)
1011     beta1 = bilinearform(y,r)
1012 artak 1481
1013     if beta1< 0: raise NegativeNorm,"negative norm."
1014    
1015 gross 2156 # If r = 0 exactly, stop with x
1016     if beta1==0: return x
1017 artak 1481
1018 gross 2156 if beta1> 0: beta1 = math.sqrt(beta1)
1019 artak 1481
1020     #------------------------------------------------------------------
1021 artak 1484 # Initialize quantities.
1022 artak 1481 # ------------------------------------------------------------------
1023 artak 1482 iter = 0
1024     Anorm = 0
1025     ynorm = 0
1026 artak 1481 oldb = 0
1027     beta = beta1
1028     dbar = 0
1029     epsln = 0
1030     phibar = beta1
1031     rhs1 = beta1
1032     rhs2 = 0
1033     rnorm = phibar
1034     tnorm2 = 0
1035     ynorm2 = 0
1036     cs = -1
1037     sn = 0
1038 gross 2156 w = r*0.
1039     w2 = r*0.
1040 artak 1481 r2 = r1
1041     eps = 0.0001
1042    
1043     #---------------------------------------------------------------------
1044     # Main iteration loop.
1045     # --------------------------------------------------------------------
1046 gross 2100 while not rnorm<=atol+rtol*Anorm*ynorm: # checks ||r|| < (||A|| ||x||) * TOL
1047 artak 1481
1048     if iter >= iter_max: raise MaxIterReached,"maximum number of %s steps reached."%iter_max
1049     iter = iter + 1
1050    
1051     #-----------------------------------------------------------------
1052     # Obtain quantities for the next Lanczos vector vk+1, k = 1, 2,...
1053     # The general iteration is similar to the case k = 1 with v0 = 0:
1054     #
1055     # p1 = Operator * v1 - beta1 * v0,
1056     # alpha1 = v1'p1,
1057     # q2 = p2 - alpha1 * v1,
1058     # beta2^2 = q2'q2,
1059     # v2 = (1/beta2) q2.
1060     #
1061     # Again, y = betak P vk, where P = C**(-1).
1062     #-----------------------------------------------------------------
1063     s = 1/beta # Normalize previous vector (in y).
1064     v = s*y # v = vk if P = I
1065    
1066     y = Aprod(v)
1067 artak 1465
1068 artak 1481 if iter >= 2:
1069     y = y - (beta/oldb)*r1
1070    
1071     alfa = bilinearform(v,y) # alphak
1072 artak 1550 y += (- alfa/beta)*r2
1073 artak 1481 r1 = r2
1074     r2 = y
1075     y = Msolve(r2)
1076     oldb = beta # oldb = betak
1077 artak 1550 beta = bilinearform(y,r2) # beta = betak+1^2
1078 artak 1481 if beta < 0: raise NegativeNorm,"negative norm."
1079    
1080     beta = math.sqrt( beta )
1081     tnorm2 = tnorm2 + alfa*alfa + oldb*oldb + beta*beta
1082    
1083     if iter==1: # Initialize a few things.
1084     gmax = abs( alfa ) # alpha1
1085     gmin = gmax # alpha1
1086    
1087     # Apply previous rotation Qk-1 to get
1088     # [deltak epslnk+1] = [cs sn][dbark 0 ]
1089     # [gbar k dbar k+1] [sn -cs][alfak betak+1].
1090    
1091     oldeps = epsln
1092     delta = cs * dbar + sn * alfa # delta1 = 0 deltak
1093     gbar = sn * dbar - cs * alfa # gbar 1 = alfa1 gbar k
1094     epsln = sn * beta # epsln2 = 0 epslnk+1
1095     dbar = - cs * beta # dbar 2 = beta2 dbar k+1
1096    
1097     # Compute the next plane rotation Qk
1098    
1099     gamma = math.sqrt(gbar*gbar+beta*beta) # gammak
1100     gamma = max(gamma,eps)
1101     cs = gbar / gamma # ck
1102     sn = beta / gamma # sk
1103     phi = cs * phibar # phik
1104     phibar = sn * phibar # phibark+1
1105    
1106     # Update x.
1107    
1108     denom = 1/gamma
1109     w1 = w2
1110     w2 = w
1111     w = (v - oldeps*w1 - delta*w2) * denom
1112 artak 1550 x += phi*w
1113 artak 1481
1114     # Go round again.
1115    
1116     gmax = max(gmax,gamma)
1117     gmin = min(gmin,gamma)
1118     z = rhs1 / gamma
1119     ynorm2 = z*z + ynorm2
1120     rhs1 = rhs2 - delta*z
1121     rhs2 = - epsln*z
1122    
1123     # Estimate various norms and test for convergence.
1124    
1125     Anorm = math.sqrt( tnorm2 )
1126     ynorm = math.sqrt( ynorm2 )
1127    
1128     rnorm = phibar
1129    
1130     return x
1131 artak 1489
1132 gross 2156 def TFQMR(r, Aprod, x, bilinearform, atol=0, rtol=1.e-8, iter_max=100):
1133     """
1134     Solver for
1135 artak 1489
1136 gross 2156 M{Ax=b}
1137 artak 1489
1138 gross 2156 with a general operator A (more details required!).
1139     It uses the generalized minimum residual method (GMRES).
1140 artak 1489
1141 gross 2156 The iteration is terminated if
1142 artak 1489
1143 gross 2156 M{|r| <= atol+rtol*|r0|}
1144    
1145     where M{r0} is the initial residual and M{|.|} is the energy norm. In fact
1146    
1147     M{|r| =sqrt( bilinearform(r,r))}
1148    
1149     @param r: initial residual M{r=b-Ax}. C{r} is altered.
1150     @type r: any object supporting inplace add (x+=y) and scaling (x=scalar*y)
1151     @param x: an initial guess for the solution.
1152     @type x: same like C{r}
1153     @param Aprod: returns the value Ax
1154     @type Aprod: function C{Aprod(x)} where C{x} is of the same object like argument C{x}.
1155     The returned object needs to be of the same type like argument C{r}.
1156     @param bilinearform: inner product C{<x,r>}
1157     @type bilinearform: function C{bilinearform(x,r)} where C{x} is of the same type like argument C{x} and C{r} is.
1158     The returned value is a C{float}.
1159     @param atol: absolute tolerance
1160     @type atol: non-negative C{float}
1161     @param rtol: relative tolerance
1162     @type rtol: non-negative C{float}
1163     @param iter_max: maximum number of iteration steps.
1164     @type iter_max: C{int}
1165     @rtype: C{tuple}
1166     @warning: C{r} and C{x} are altered.
1167     """
1168 artak 1489 u1=0
1169     u2=0
1170     y1=0
1171     y2=0
1172    
1173     w = r
1174     y1 = r
1175     iter = 0
1176     d = 0
1177 gross 2156 v = Aprod(y1)
1178 artak 1489 u1 = v
1179    
1180     theta = 0.0;
1181     eta = 0.0;
1182 gross 2156 rho=bilinearform(r,r)
1183     if rho < 0: raise NegativeNorm,"negative norm."
1184     tau = math.sqrt(rho)
1185     norm_r0=tau
1186     while tau>atol+rtol*norm_r0:
1187 artak 1489 if iter >= iter_max: raise MaxIterReached,"maximum number of %s steps reached."%iter_max
1188    
1189     sigma = bilinearform(r,v)
1190 gross 2156 if sigma == 0.0: raise IterationBreakDown,'TFQMR breakdown, sigma=0'
1191 artak 1489 alpha = rho / sigma
1192    
1193     for j in range(2):
1194     #
1195     # Compute y2 and u2 only if you have to
1196     #
1197     if ( j == 1 ):
1198     y2 = y1 - alpha * v
1199 gross 2156 u2 = Aprod(y2)
1200 artak 1489
1201     m = 2 * (iter+1) - 2 + (j+1)
1202     if j==0:
1203     w = w - alpha * u1
1204     d = y1 + ( theta * theta * eta / alpha ) * d
1205     if j==1:
1206     w = w - alpha * u2
1207     d = y2 + ( theta * theta * eta / alpha ) * d
1208    
1209     theta = math.sqrt(bilinearform(w,w))/ tau
1210     c = 1.0 / math.sqrt ( 1.0 + theta * theta )
1211     tau = tau * theta * c
1212     eta = c * c * alpha
1213     x = x + eta * d
1214     #
1215     # Try to terminate the iteration at each pass through the loop
1216     #
1217 gross 2156 if rho == 0.0: raise IterationBreakDown,'TFQMR breakdown, rho=0'
1218 artak 1489
1219     rhon = bilinearform(r,w)
1220     beta = rhon / rho;
1221     rho = rhon;
1222     y1 = w + beta * y2;
1223 gross 2156 u1 = Aprod(y1)
1224 artak 1489 v = u1 + beta * ( u2 + beta * v )
1225    
1226 gross 2156 iter += 1
1227 artak 1489
1228     return x
1229    
1230    
1231 artak 1465 #############################################
1232    
1233 gross 1331 class ArithmeticTuple(object):
1234     """
1235     tuple supporting inplace update x+=y and scaling x=a*y where x,y is an ArithmeticTuple and a is a float.
1236    
1237     example of usage:
1238    
1239     from esys.escript import Data
1240     from numarray import array
1241     a=Data(...)
1242     b=array([1.,4.])
1243     x=ArithmeticTuple(a,b)
1244     y=5.*x
1245    
1246     """
1247     def __init__(self,*args):
1248     """
1249     initialize object with elements args.
1250    
1251     @param args: tuple of object that support implace add (x+=y) and scaling (x=a*y)
1252     """
1253     self.__items=list(args)
1254    
1255     def __len__(self):
1256     """
1257     number of items
1258    
1259     @return: number of items
1260     @rtype: C{int}
1261     """
1262     return len(self.__items)
1263    
1264     def __getitem__(self,index):
1265     """
1266     get an item
1267    
1268     @param index: item to be returned
1269     @type index: C{int}
1270     @return: item with index C{index}
1271     """
1272     return self.__items.__getitem__(index)
1273    
1274     def __mul__(self,other):
1275     """
1276     scaling from the right
1277    
1278     @param other: scaling factor
1279     @type other: C{float}
1280     @return: itemwise self*other
1281     @rtype: L{ArithmeticTuple}
1282     """
1283     out=[]
1284 gross 1896 try:
1285     l=len(other)
1286     if l!=len(self):
1287     raise ValueError,"length of of arguments don't match."
1288     for i in range(l): out.append(self[i]*other[i])
1289     except TypeError:
1290     for i in range(len(self)): out.append(self[i]*other)
1291 gross 1331 return ArithmeticTuple(*tuple(out))
1292    
1293     def __rmul__(self,other):
1294     """
1295     scaling from the left
1296    
1297     @param other: scaling factor
1298     @type other: C{float}
1299     @return: itemwise other*self
1300     @rtype: L{ArithmeticTuple}
1301     """
1302     out=[]
1303 gross 1896 try:
1304     l=len(other)
1305     if l!=len(self):
1306     raise ValueError,"length of of arguments don't match."
1307     for i in range(l): out.append(other[i]*self[i])
1308     except TypeError:
1309     for i in range(len(self)): out.append(other*self[i])
1310 gross 1331 return ArithmeticTuple(*tuple(out))
1311    
1312 artak 1465 def __div__(self,other):
1313     """
1314     dividing from the right
1315    
1316     @param other: scaling factor
1317     @type other: C{float}
1318     @return: itemwise self/other
1319     @rtype: L{ArithmeticTuple}
1320     """
1321 gross 1896 return self*(1/other)
1322 artak 1465
1323     def __rdiv__(self,other):
1324     """
1325     dividing from the left
1326    
1327     @param other: scaling factor
1328     @type other: C{float}
1329     @return: itemwise other/self
1330     @rtype: L{ArithmeticTuple}
1331     """
1332     out=[]
1333 gross 1896 try:
1334     l=len(other)
1335     if l!=len(self):
1336     raise ValueError,"length of of arguments don't match."
1337     for i in range(l): out.append(other[i]/self[i])
1338     except TypeError:
1339     for i in range(len(self)): out.append(other/self[i])
1340 artak 1465 return ArithmeticTuple(*tuple(out))
1341    
1342 gross 1331 def __iadd__(self,other):
1343     """
1344     in-place add of other to self
1345    
1346     @param other: increment
1347     @type other: C{ArithmeticTuple}
1348     """
1349     if len(self) != len(other):
1350     raise ValueError,"tuple length must match."
1351     for i in range(len(self)):
1352     self.__items[i]+=other[i]
1353     return self
1354    
1355 artak 1550 def __add__(self,other):
1356     """
1357     add other to self
1358    
1359     @param other: increment
1360     @type other: C{ArithmeticTuple}
1361     """
1362 gross 1896 out=[]
1363     try:
1364     l=len(other)
1365     if l!=len(self):
1366     raise ValueError,"length of of arguments don't match."
1367     for i in range(l): out.append(self[i]+other[i])
1368     except TypeError:
1369     for i in range(len(self)): out.append(self[i]+other)
1370     return ArithmeticTuple(*tuple(out))
1371 artak 1550
1372     def __sub__(self,other):
1373     """
1374     subtract other from self
1375    
1376     @param other: increment
1377     @type other: C{ArithmeticTuple}
1378     """
1379 gross 1896 out=[]
1380     try:
1381     l=len(other)
1382     if l!=len(self):
1383     raise ValueError,"length of of arguments don't match."
1384     for i in range(l): out.append(self[i]-other[i])
1385     except TypeError:
1386     for i in range(len(self)): out.append(self[i]-other)
1387     return ArithmeticTuple(*tuple(out))
1388 artak 1557
1389     def __isub__(self,other):
1390     """
1391     subtract other from self
1392 artak 1550
1393 artak 1557 @param other: increment
1394     @type other: C{ArithmeticTuple}
1395     """
1396     if len(self) != len(other):
1397     raise ValueError,"tuple length must match."
1398     for i in range(len(self)):
1399     self.__items[i]-=other[i]
1400     return self
1401    
1402 artak 1550 def __neg__(self):
1403     """
1404     negate
1405    
1406     """
1407 gross 1896 out=[]
1408 artak 1550 for i in range(len(self)):
1409 gross 1896 out.append(-self[i])
1410     return ArithmeticTuple(*tuple(out))
1411 artak 1550
1412    
1413 gross 1414 class HomogeneousSaddlePointProblem(object):
1414     """
1415 gross 1878 This provides a framwork for solving linear homogeneous saddle point problem of the form
1416 gross 1414
1417     Av+B^*p=f
1418     Bv =0
1419    
1420     for the unknowns v and p and given operators A and B and given right hand side f.
1421 gross 2100 B^* is the adjoint operator of B.
1422 gross 1414 """
1423     def __init__(self,**kwargs):
1424     self.setTolerance()
1425 gross 2100 self.setAbsoluteTolerance()
1426 gross 2156 self.setSubProblemTolerance()
1427 gross 1414
1428 gross 2100 #=============================================================
1429 gross 1414 def initialize(self):
1430     """
1431     initialize the problem (overwrite)
1432     """
1433     pass
1434 gross 2100
1435 gross 1414 def B(self,v):
1436 gross 2100 """
1437     returns Bv (overwrite)
1438     @rtype: equal to the type of p
1439    
1440     @note: boundary conditions on p should be zero!
1441     """
1442     raise NotImplementedError,"no operator B implemented."
1443    
1444     def inner_pBv(self,p,Bv):
1445 gross 1414 """
1446 gross 2100 returns inner product of element p and Bv (overwrite)
1447    
1448     @type p: equal to the type of p
1449     @type Bv: equal to the type of result of operator B
1450     @rtype: C{float}
1451    
1452 gross 1414 @rtype: equal to the type of p
1453     """
1454 gross 2100 raise NotImplementedError,"no inner product for p implemented."
1455 gross 1414
1456 gross 2100 def inner_p(self,p0,p1):
1457 gross 1414 """
1458 gross 2100 returns inner product of element p0 and p1 (overwrite)
1459 gross 1414
1460     @type p0: equal to the type of p
1461     @type p1: equal to the type of p
1462     @rtype: equal to the type of p
1463     """
1464 gross 2100 raise NotImplementedError,"no inner product for p implemented."
1465    
1466     def inner_v(self,v0,v1):
1467     """
1468     returns inner product of two element v0 and v1 (overwrite)
1469    
1470     @type v0: equal to the type of v
1471     @type v1: equal to the type of v
1472     @rtype: C{float}
1473    
1474     @rtype: equal to the type of v
1475     """
1476     raise NotImplementedError,"no inner product for v implemented."
1477 gross 1414 pass
1478    
1479     def solve_A(self,u,p):
1480     """
1481 gross 2100 solves Av=f-Au-B^*p with accuracy self.getSubProblemTolerance() (overwrite)
1482 gross 1414
1483     @rtype: equal to the type of v
1484     @note: boundary conditions on v should be zero!
1485     """
1486 gross 2100 raise NotImplementedError,"no operator A implemented."
1487 gross 1414
1488     def solve_prec(self,p):
1489     """
1490 gross 2100 provides a preconditioner for BA^{-1}B^* with accuracy self.getSubProblemTolerance() (overwrite)
1491 gross 1414
1492     @rtype: equal to the type of p
1493 gross 2100 @note: boundary conditions on p should be zero!
1494 gross 1414 """
1495 gross 2100 raise NotImplementedError,"no preconditioner for Schur complement implemented."
1496     #=============================================================
1497     def __Aprod_PCG(self,p):
1498     # return (v,Bv) with v=A^-1B*p
1499     #solve Av =B^*p as Av =f-Az-B^*(-p)
1500     v=self.solve_A(self.__z,-p)
1501     return ArithmeticTuple(v, self.B(v))
1502 gross 1414
1503 gross 2100 def __inner_PCG(self,p,r):
1504     return self.inner_pBv(p,r[1])
1505 gross 1414
1506 gross 2100 def __Msolve_PCG(self,r):
1507     return self.solve_prec(r[1])
1508     #=============================================================
1509     def __Aprod_GMRES(self,x):
1510     # return w,q from v, p
1511     # solve Aw =Av+B^*p as Aw =f-A(z-v)-B^*(-p)
1512     # and Sq=B(v-w)
1513     v=x[0]
1514     p=x[1]
1515     w=self.solve_A(self.__z-v,-p)
1516     Bw=self.B(w-v)
1517     q=self.solve_prec(Bw)
1518     return ArithmeticTuple(w,q)
1519 gross 1414
1520 gross 2100 def __inner_GMRES(self,a1,a2):
1521     return self.inner_p(a1[1],a2[1])+self.inner_v(a1[0],a2[0])
1522 artak 1465
1523 gross 2100 #=============================================================
1524     def norm(self,v,p):
1525     f=self.inner_p(p,p)+self.inner_v(v,v)
1526     if f<0:
1527     raise ValueError,"negative norm."
1528     return math.sqrt(f)
1529 artak 1550
1530 gross 2100 def solve(self,v,p,max_iter=20, verbose=False, show_details=False, useUzawa=True, iter_restart=20,max_correction_steps=3):
1531     """
1532     solves the saddle point problem using initial guesses v and p.
1533 gross 1414
1534 gross 2100 @param v: initial guess for velocity
1535     @param p: initial guess for pressure
1536     @type v: L{Data}
1537     @type p: L{Data}
1538     @param useUzawa: indicate the usage of the Uzawa scheme. Otherwise the problem is solved in coupled form.
1539     @param max_iter: maximum number of iteration steps per correction attempt.
1540     @param verbose: show information on the progress of the saddlepoint problem solver.
1541     @param show_details: show details of the sub problems solves
1542     @param iter_restart: restart the iteration after C{iter_restart} steps (only used if useUzaw=False)
1543     @param max_correction_steps: maximum number of iteration steps in the attempt get |Bv| to zero.
1544     @return: new approximations for velocity and pressure
1545     @type useUzawa: C{bool}
1546     @type max_iter: C{int}
1547     @type verbose: C{bool}
1548     @type show_details: C{bool}
1549     @type iter_restart: C{int}
1550     @type max_correction_steps: C{int}
1551     @rtype: C{tuple} of L{Data} objects
1552     """
1553     self.verbose=verbose
1554     self.show_details=show_details and self.verbose
1555     #=================================================================================
1556     # Az=f is solved as A(z-v)=f-Av-B^*0 (typically with z-v=0 on boundary)
1557     self.__z=v+self.solve_A(v,p*0)
1558     # tolerances:
1559     rtol=self.getTolerance()
1560     atol=self.getAbsoluteTolerance()
1561 gross 2156 if useUzawa:
1562     p0=self.solve_prec(self.B(self.__z))
1563     f0=self.norm(self.__z,p0)
1564     else:
1565     f0=util.sqrt(self.inner_v(self.__z,self.__z))
1566     if not f0>0: return self.__z, p*0
1567 gross 2100 ATOL=rtol*f0+atol
1568 gross 2156 if not ATOL>0: raise ValueError,"overall absolute tolerance needs to be positive."
1569 gross 2100 if self.verbose: print "saddle point solver: initial residual %e, tolerance = %e."%(f0,ATOL)
1570     # initialization
1571     self.iter=0
1572     correction_step=0
1573     converged=False
1574     # initial guess:
1575     q=p*1
1576 gross 2156 u=v*1
1577 gross 2100 while not converged :
1578     if useUzawa:
1579     # assume p is known: then v=z-A^-1B^*p
1580     #
1581     # which leads to BA^-1B^*p = Bz
1582     #
1583 gross 2156 # note that the residual r=Bz-BA^-1B^*p = B(z-A^-1B^*p) = Bv
1584     # we use the (v,Bv) to represent the residual
1585 gross 2100 #
1586 gross 2156 # the norm of the right hand side Bv = f0
1587 gross 2100 #
1588 gross 2156 # and the initial residual
1589     #
1590     # r=Bz-BA^-1B^*q = B(z-A^{-1}B^*q)=Bw with A(w-z)=Az-Az-B^*q = f -A*0 - B^{*}q
1591     #
1592     w=self.solve_A(self.__z,q)+self.__z
1593 gross 2100 if self.verbose: print "enter PCG method (iter_max=%s, atol=%e, subtolerance=%e)"%(max_iter,ATOL, self.getSubProblemTolerance())
1594 gross 2156 q,r=PCG(ArithmeticTuple(w,self.B(w)),self.__Aprod_PCG,q,self.__Msolve_PCG,self.__inner_PCG,atol=ATOL, rtol=0.,iter_max=max_iter, verbose=self.verbose)
1595 gross 2100 u=r[0]
1596 gross 2156 Bu=r[1]
1597 gross 2100 else:
1598     #
1599     # with v=dv+z
1600     #
1601 gross 2156 # A v + B p = f
1602     # B v = 0
1603 gross 2100 #
1604 gross 2156 # apply the preconditioner [[A^{-1} 0][(S^{-1} B A^{-1}) -S^{-1}]]
1605 gross 2100 #
1606 gross 2156 w=self.solve_A(u,q)
1607 gross 2100 if self.verbose: print "enter GMRES (iter_max=%s, atol=%e, subtolerance=%e)"%(max_iter,ATOL,self.getSubProblemTolerance())
1608 gross 2156 x=GMRES(ArithmeticTuple(w,self.solve_prec(self.B(u+w))),self.__Aprod_GMRES, ArithmeticTuple(u,q), \
1609     self.__inner_GMRES,atol=ATOL, rtol=0.,iter_max=max_iter, iter_restart=iter_restart, verbose=self.verbose)
1610     u=x[0]
1611 gross 2100 q=x[1]
1612     Bu=self.B(u)
1613 gross 2156 # now we check if |Bu| ~ 0 or more precise |Bu|_p <= rtol * |v|_v
1614 gross 2100 nu=self.inner_v(u,u)
1615     p2=self.solve_prec(Bu)
1616     nBu=self.inner_p(p2,p2)
1617     if not nu>=0 and not nBu>=0: raise NegativeNorm,"negative norm."
1618     nu=math.sqrt(nu)
1619     nBu=math.sqrt(nBu)
1620     if self.verbose: print "saddle point solver: norm v= %e (Bv = %e)"%(nu,nBu)
1621     QTOL=atol+nu*rtol
1622     if nBu <= QTOL:
1623     converged=True
1624     else:
1625     ATOL=QTOL/nBu*ATOL*0.3
1626     if self.verbose: print "correction step %s: tolerance reduced to %e."%(correction_step,ATOL)
1627     converged=False
1628     correction_step+=1
1629     if correction_step>max_correction_steps:
1630     raise CorrectionFailed,"Given up after %d correction steps."%correction_step
1631     if self.verbose: print "saddle point solver: tolerance reached."
1632     return u,q
1633 artak 1465
1634 gross 2100 #==========================================================================================================================
1635     def setTolerance(self,tolerance=1.e-4):
1636     """
1637     sets the relative tolerance for (v,p)
1638    
1639     @param tolerance: tolerance to be used
1640     @type tolerance: non-negative C{float}
1641     """
1642     if tolerance<0:
1643     raise ValueError,"tolerance must be positive."
1644     self.__rtol=tolerance
1645 gross 2156 self.setSubProblemTolerance()
1646    
1647 gross 1414 def getTolerance(self):
1648 gross 2100 """
1649     returns the relative tolerance
1650 gross 1414
1651 gross 2100 @return: relative tolerance
1652     @rtype: C{float}
1653     """
1654     return self.__rtol
1655     def setAbsoluteTolerance(self,tolerance=0.):
1656     """
1657     sets the absolute tolerance
1658 gross 1414
1659 gross 2100 @param tolerance: tolerance to be used
1660     @type tolerance: non-negative C{float}
1661     """
1662     if tolerance<0:
1663     raise ValueError,"tolerance must be non-negative."
1664     self.__atol=tolerance
1665     def getAbsoluteTolerance(self):
1666     """
1667     returns the absolute tolerance
1668 gross 1414
1669 gross 2100 @return: absolute tolerance
1670     @rtype: C{float}
1671     """
1672     return self.__atol
1673 gross 1469
1674 gross 2156 def setSubProblemTolerance(self,rtol=None):
1675 gross 2100 """
1676     sets the relative tolerance to solve the subproblem(s).
1677 artak 1489
1678 gross 2100 @param rtol: relative tolerence
1679     @type rtol: positive C{float}
1680     """
1681 gross 2156 if rtol == None:
1682     rtol=max(200.*util.EPSILON,self.getTolerance()**2)
1683 gross 2100 if rtol<=0:
1684     raise ValueError,"tolerance must be positive."
1685 gross 2156 self.__sub_tol=rtol
1686 gross 2100 def getSubProblemTolerance(self):
1687     """
1688     returns the subproblem reduction factor
1689 artak 1557
1690 gross 2100 @return: subproblem reduction factor
1691     @rtype: C{float}
1692     """
1693     return self.__sub_tol
1694 artak 1550
1695 gross 1878 def MaskFromBoundaryTag(domain,*tags):
1696     """
1697 gross 2100 create a mask on the Solution(domain) function space which one for samples
1698 gross 1878 that touch the boundary tagged by tags.
1699    
1700     usage: m=MaskFromBoundaryTag(domain,"left", "right")
1701    
1702     @param domain: a given domain
1703     @type domain: L{escript.Domain}
1704     @param tags: boundray tags
1705     @type tags: C{str}
1706     @return: a mask which marks samples that are touching the boundary tagged by any of the given tags.
1707     @rtype: L{escript.Data} of rank 0
1708     """
1709     pde=linearPDEs.LinearPDE(domain,numEquations=1, numSolutions=1)
1710 gross 1956 d=escript.Scalar(0.,escript.FunctionOnBoundary(domain))
1711 gross 1878 for t in tags: d.setTaggedValue(t,1.)
1712     pde.setValue(y=d)
1713     return util.whereNonZero(pde.getRightHandSide())
1714     #============================================================================================================================================
1715 gross 867 class SaddlePointProblem(object):
1716     """
1717     This implements a solver for a saddlepoint problem
1718    
1719 gross 877 M{f(u,p)=0}
1720     M{g(u)=0}
1721 gross 867
1722     for u and p. The problem is solved with an inexact Uszawa scheme for p:
1723    
1724 ksteube 990 M{Q_f (u^{k+1}-u^{k}) = - f(u^{k},p^{k})}
1725 gross 877 M{Q_g (p^{k+1}-p^{k}) = g(u^{k+1})}
1726 gross 867
1727     where Q_f is an approximation of the Jacobiean A_f of f with respect to u and Q_f is an approximation of
1728     A_g A_f^{-1} A_g with A_g is the jacobiean of g with respect to p. As a the construction of a 'proper'
1729     Q_g can be difficult, non-linear conjugate gradient method is applied to solve for p, so Q_g plays
1730     in fact the role of a preconditioner.
1731     """
1732     def __init__(self,verbose=False,*args):
1733     """
1734     initializes the problem
1735    
1736 ksteube 990 @param verbose: switches on the printing out some information
1737 gross 867 @type verbose: C{bool}
1738     @note: this method may be overwritten by a particular saddle point problem
1739     """
1740 gross 1659 print "SaddlePointProblem should not be used anymore!"
1741 gross 1107 if not isinstance(verbose,bool):
1742     raise TypeError("verbose needs to be of type bool.")
1743 gross 1106 self.__verbose=verbose
1744 gross 877 self.relaxation=1.
1745 gross 1878 DeprecationWarning("SaddlePointProblem should not be used anymore.")
1746 gross 867
1747     def trace(self,text):
1748     """
1749     prints text if verbose has been set
1750    
1751 ksteube 990 @param text: a text message
1752 gross 867 @type text: C{str}
1753     """
1754 ksteube 1567 if self.__verbose: print "%s: %s"%(str(self),text)
1755 gross 867
1756 gross 873 def solve_f(self,u,p,tol=1.e-8):
1757 gross 867 """
1758     solves
1759    
1760     A_f du = f(u,p)
1761    
1762     with tolerance C{tol} and return du. A_f is Jacobiean of f with respect to u.
1763    
1764     @param u: current approximation of u
1765     @type u: L{escript.Data}
1766     @param p: current approximation of p
1767     @type p: L{escript.Data}
1768 gross 873 @param tol: tolerance expected for du
1769 gross 867 @type tol: C{float}
1770     @return: increment du
1771     @rtype: L{escript.Data}
1772     @note: this method has to be overwritten by a particular saddle point problem
1773     """
1774     pass
1775    
1776 gross 873 def solve_g(self,u,tol=1.e-8):
1777 gross 867 """
1778     solves
1779    
1780     Q_g dp = g(u)
1781    
1782     with Q_g is a preconditioner for A_g A_f^{-1} A_g with A_g is the jacobiean of g with respect to p.
1783    
1784     @param u: current approximation of u
1785     @type u: L{escript.Data}
1786 gross 873 @param tol: tolerance expected for dp
1787     @type tol: C{float}
1788 gross 867 @return: increment dp
1789     @rtype: L{escript.Data}
1790     @note: this method has to be overwritten by a particular saddle point problem
1791     """
1792     pass
1793    
1794     def inner(self,p0,p1):
1795     """
1796     inner product of p0 and p1 approximating p. Typically this returns integrate(p0*p1)
1797     @return: inner product of p0 and p1
1798     @rtype: C{float}
1799     """
1800     pass
1801    
1802 gross 877 subiter_max=3
1803     def solve(self,u0,p0,tolerance=1.e-6,tolerance_u=None,iter_max=100,accepted_reduction=0.995,relaxation=None):
1804     """
1805     runs the solver
1806 gross 873
1807 gross 877 @param u0: initial guess for C{u}
1808     @type u0: L{esys.escript.Data}
1809     @param p0: initial guess for C{p}
1810     @type p0: L{esys.escript.Data}
1811     @param tolerance: tolerance for relative error in C{u} and C{p}
1812     @type tolerance: positive C{float}
1813     @param tolerance_u: tolerance for relative error in C{u} if different from C{tolerance}
1814     @type tolerance_u: positive C{float}
1815     @param iter_max: maximum number of iteration steps.
1816     @type iter_max: C{int}
1817     @param accepted_reduction: if the norm g cannot be reduced by C{accepted_reduction} backtracking to adapt the
1818     relaxation factor. If C{accepted_reduction=None} no backtracking is used.
1819     @type accepted_reduction: positive C{float} or C{None}
1820     @param relaxation: initial relaxation factor. If C{relaxation==None}, the last relaxation factor is used.
1821     @type relaxation: C{float} or C{None}
1822     """
1823     tol=1.e-2
1824     if tolerance_u==None: tolerance_u=tolerance
1825     if not relaxation==None: self.relaxation=relaxation
1826     if accepted_reduction ==None:
1827     angle_limit=0.
1828     elif accepted_reduction>=1.:
1829     angle_limit=0.
1830     else:
1831     angle_limit=util.sqrt(1-accepted_reduction**2)
1832     self.iter=0
1833     u=u0
1834     p=p0
1835     #
1836     # initialize things:
1837     #
1838     converged=False
1839     #
1840     # start loop:
1841     #
1842     # initial search direction is g
1843     #
1844     while not converged :
1845     if self.iter>iter_max:
1846     raise ArithmeticError("no convergence after %s steps."%self.iter)
1847     f_new=self.solve_f(u,p,tol)
1848     norm_f_new = util.Lsup(f_new)
1849     u_new=u-f_new
1850     g_new=self.solve_g(u_new,tol)
1851     self.iter+=1
1852     norm_g_new = util.sqrt(self.inner(g_new,g_new))
1853     if norm_f_new==0. and norm_g_new==0.: return u, p
1854     if self.iter>1 and not accepted_reduction==None:
1855     #
1856     # did we manage to reduce the norm of G? I
1857     # if not we start a backtracking procedure
1858     #
1859     # print "new/old norm = ",norm_g_new, norm_g, norm_g_new/norm_g
1860     if norm_g_new > accepted_reduction * norm_g:
1861     sub_iter=0
1862     s=self.relaxation
1863     d=g
1864     g_last=g
1865     self.trace(" start substepping: f = %s, g = %s, relaxation = %s."%(norm_f_new, norm_g_new, s))
1866     while sub_iter < self.subiter_max and norm_g_new > accepted_reduction * norm_g:
1867     dg= g_new-g_last
1868     norm_dg=abs(util.sqrt(self.inner(dg,dg))/self.relaxation)
1869     rad=self.inner(g_new,dg)/self.relaxation
1870     # print " ",sub_iter,": rad, norm_dg:",abs(rad), norm_dg*norm_g_new * angle_limit
1871     # print " ",sub_iter,": rad, norm_dg:",rad, norm_dg, norm_g_new, norm_g
1872     if abs(rad) < norm_dg*norm_g_new * angle_limit:
1873     if sub_iter>0: self.trace(" no further improvements expected from backtracking.")
1874     break
1875     r=self.relaxation
1876     self.relaxation= - rad/norm_dg**2
1877     s+=self.relaxation
1878     #####
1879     # a=g_new+self.relaxation*dg/r
1880     # print "predicted new norm = ",util.sqrt(self.inner(a,a)),util.sqrt(self.inner(g_new,g_new)), self.relaxation
1881     #####
1882     g_last=g_new
1883     p+=self.relaxation*d
1884     f_new=self.solve_f(u,p,tol)
1885     u_new=u-f_new
1886     g_new=self.solve_g(u_new,tol)
1887     self.iter+=1
1888     norm_f_new = util.Lsup(f_new)
1889     norm_g_new = util.sqrt(self.inner(g_new,g_new))
1890     # print " ",sub_iter," new g norm",norm_g_new
1891     self.trace(" %s th sub-step: f = %s, g = %s, relaxation = %s."%(sub_iter, norm_f_new, norm_g_new, s))
1892     #
1893     # can we expect reduction of g?
1894     #
1895     # u_last=u_new
1896     sub_iter+=1
1897     self.relaxation=s
1898     #
1899     # check for convergence:
1900     #
1901     norm_u_new = util.Lsup(u_new)
1902     p_new=p+self.relaxation*g_new
1903     norm_p_new = util.sqrt(self.inner(p_new,p_new))
1904 artak 1550 self.trace("%s th step: f = %s, f/u = %s, g = %s, g/p = %s, relaxation = %s."%(self.iter, norm_f_new ,norm_f_new/norm_u_new, norm_g_new, norm_g_new/norm_p_new, self.relaxation))
1905 gross 873
1906 gross 877 if self.iter>1:
1907     dg2=g_new-g
1908     df2=f_new-f
1909     norm_dg2=util.sqrt(self.inner(dg2,dg2))
1910     norm_df2=util.Lsup(df2)
1911     # print norm_g_new, norm_g, norm_dg, norm_p, tolerance
1912     tol_eq_g=tolerance*norm_dg2/(norm_g*abs(self.relaxation))*norm_p_new
1913     tol_eq_f=tolerance_u*norm_df2/norm_f*norm_u_new
1914     if norm_g_new <= tol_eq_g and norm_f_new <= tol_eq_f:
1915     converged=True
1916     f, norm_f, u, norm_u, g, norm_g, p, norm_p = f_new, norm_f_new, u_new, norm_u_new, g_new, norm_g_new, p_new, norm_p_new
1917     self.trace("convergence after %s steps."%self.iter)
1918     return u,p

Properties

Name Value
svn:eol-style native
svn:keywords Author Date Id Revision

  ViewVC Help
Powered by ViewVC 1.1.26