# Diff of /trunk/escript/py_src/pdetools.py

revision 637 by gross, Thu Mar 23 10:55:31 2006 UTC revision 877 by gross, Wed Oct 25 03:06:58 2006 UTC
# Line 7  Currently includes: Line 7  Currently includes:
7      - Projector - to project a discontinuous      - Projector - to project a discontinuous
8      - Locator - to trace values in data objects at a certain location      - Locator - to trace values in data objects at a certain location
9      - TimeIntegrationManager - to handel extraplotion in time      - TimeIntegrationManager - to handel extraplotion in time
10        - SaddlePointProblem - solver for Saddle point problems using the inexact uszawa scheme
11
12  @var __author__: name of author  @var __author__: name of author
# Line 36  class TimeIntegrationManager: Line 37  class TimeIntegrationManager:
37    """    """
38    a simple mechanism to manage time dependend values.    a simple mechanism to manage time dependend values.
39
40    typical usage is:    typical usage is::
41
42    dt=0.1 # time increment       dt=0.1 # time increment
43    tm=TimeIntegrationManager(inital_value,p=1)       tm=TimeIntegrationManager(inital_value,p=1)
44    while t<1.       while t<1.
45        v_guess=tm.extrapolate(dt) # extrapolate to t+dt           v_guess=tm.extrapolate(dt) # extrapolate to t+dt
46        v=...           v=...
47        tm.checkin(dt,v)           tm.checkin(dt,v)
48        t+=dt           t+=dt
49
50    @remark: currently only p=1 is supported.    @note: currently only p=1 is supported.
51    """    """
52    def __init__(self,*inital_values,**kwargs):    def __init__(self,*inital_values,**kwargs):
53       """       """
# Line 192  class NoPDE: Line 193  class NoPDE:
193
194       The constraint is overwriting any other condition.       The constraint is overwriting any other condition.
195
196       @remark: This class is similar to the L{LinearPDE} class with A=B=C=X=0 but has the intention       @note: This class is similar to the L{linearPDEs.LinearPDE} class with A=B=C=X=0 but has the intention
197                that all input parameter are given in L{Solution} or L{ReducedSolution}. The whole              that all input parameter are given in L{Solution} or L{ReducedSolution}. The whole
198                thing is a bit strange and I blame Robert.Woodcock@csiro.au for this.              thing is a bit strange and I blame Robert.Woodcock@csiro.au for this.
199       """       """
200       def __init__(self,domain,D=None,Y=None,q=None,r=None):       def __init__(self,domain,D=None,Y=None,q=None,r=None):
201           """           """
# Line 203  class NoPDE: Line 204  class NoPDE:
204           @param domain: domain of the PDE.           @param domain: domain of the PDE.
205           @type domain: L{Domain}           @type domain: L{Domain}
206           @param D: coefficient of the solution.           @param D: coefficient of the solution.
207           @type D: C{float}, C{int}, L{NumArray}, L{Data}           @type D: C{float}, C{int}, L{numarray.NumArray}, L{Data}
208           @param Y: right hand side           @param Y: right hand side
209           @type Y: C{float}, C{int}, L{NumArray}, L{Data}           @type Y: C{float}, C{int}, L{numarray.NumArray}, L{Data}
210           @param q: location of constraints           @param q: location of constraints
211           @type q: C{float}, C{int}, L{NumArray}, L{Data}           @type q: C{float}, C{int}, L{numarray.NumArray}, L{Data}
212           @param r: value of solution at locations of constraints           @param r: value of solution at locations of constraints
213           @type r: C{float}, C{int}, L{NumArray}, L{Data}           @type r: C{float}, C{int}, L{numarray.NumArray}, L{Data}
214           """           """
215           self.__domain=domain           self.__domain=domain
216           self.__D=D           self.__D=D
# Line 237  class NoPDE: Line 238  class NoPDE:
238           assigns values to the parameters.           assigns values to the parameters.
239
240           @param D: coefficient of the solution.           @param D: coefficient of the solution.
241           @type D: C{float}, C{int}, L{NumArray}, L{Data}           @type D: C{float}, C{int}, L{numarray.NumArray}, L{Data}
242           @param Y: right hand side           @param Y: right hand side
243           @type Y: C{float}, C{int}, L{NumArray}, L{Data}           @type Y: C{float}, C{int}, L{numarray.NumArray}, L{Data}
244           @param q: location of constraints           @param q: location of constraints
245           @type q: C{float}, C{int}, L{NumArray}, L{Data}           @type q: C{float}, C{int}, L{numarray.NumArray}, L{Data}
246           @param r: value of solution at locations of constraints           @param r: value of solution at locations of constraints
247           @type r: C{float}, C{int}, L{NumArray}, L{Data}           @type r: C{float}, C{int}, L{numarray.NumArray}, L{Data}
248           """           """
249           if not D==None:           if not D==None:
250              self.__D=D              self.__D=D
# Line 341  class Locator: Line 342  class Locator:
342      """      """
343          if isinstance(data,escript.Data):          if isinstance(data,escript.Data):
344             if data.getFunctionSpace()==self.getFunctionSpace():             if data.getFunctionSpace()==self.getFunctionSpace():
345               out=data.convertToNumArrayFromDPNo(self.getId()[0],self.getId()[1])               #out=data.convertToNumArrayFromDPNo(self.getId()[0],self.getId()[1])
346                 out=data.convertToNumArrayFromDPNo(self.getId()[0],self.getId()[1],self.getId()[2])
347             else:             else:
348               out=data.interpolate(self.getFunctionSpace()).convertToNumArrayFromDPNo(self.getId()[0],self.getId()[1])               #out=data.interpolate(self.getFunctionSpace()).convertToNumArrayFromDPNo(self.getId()[0],self.getId()[1])
349                 out=data.interpolate(self.getFunctionSpace()).convertToNumArrayFromDPNo(self.getId()[0],self.getId()[1],self.getId()[2])
350             if data.getRank()==0:             if data.getRank()==0:
351                return out[0]                return out[0]
352             else:             else:
# Line 351  class Locator: Line 354  class Locator:
354          else:          else:
355             return data             return data
356
358       """
359       This implements a solver for a saddlepoint problem
360
361       M{f(u,p)=0}
362       M{g(u)=0}
363
364       for u and p. The problem is solved with an inexact Uszawa scheme for p:
365
366       M{Q_f (u^{k+1}-u^{k}) = - f(u^{k},p^{k})
367       M{Q_g (p^{k+1}-p^{k}) =   g(u^{k+1})}
368
369       where Q_f is an approximation of the Jacobiean A_f of f with respect to u  and Q_f is an approximation of
370       A_g A_f^{-1} A_g with A_g is the jacobiean of g with respect to p. As a the construction of a 'proper'
371       Q_g can be difficult, non-linear conjugate gradient method is applied to solve for p, so Q_g plays
372       in fact the role of a preconditioner.
373       """
374       def __init__(self,verbose=False,*args):
375           """
376           initializes the problem
377
378           @parm verbose: switches on the printing out some information
379           @type verbose: C{bool}
380           @note: this method may be overwritten by a particular saddle point problem
381           """
382           self.__verbose=verbose
383           self.relaxation=1.
384
385       def trace(self,text):
386           """
387           prints text if verbose has been set
388
389           @parm text: a text message
390           @type text: C{str}
391           """
392           if self.__verbose: print "%s: %s"%(str(self),text)
393
394       def solve_f(self,u,p,tol=1.e-8):
395           """
396           solves
397
398           A_f du = f(u,p)
399
400           with tolerance C{tol} and return du. A_f is Jacobiean of f with respect to u.
401
402           @param u: current approximation of u
403           @type u: L{escript.Data}
404           @param p: current approximation of p
405           @type p: L{escript.Data}
406           @param tol: tolerance expected for du
407           @type tol: C{float}
408           @return: increment du
409           @rtype: L{escript.Data}
410           @note: this method has to be overwritten by a particular saddle point problem
411           """
412           pass
413
414       def solve_g(self,u,tol=1.e-8):
415           """
416           solves
417
418           Q_g dp = g(u)
419
420           with Q_g is a preconditioner for A_g A_f^{-1} A_g with  A_g is the jacobiean of g with respect to p.
421
422           @param u: current approximation of u
423           @type u: L{escript.Data}
424           @param tol: tolerance expected for dp
425           @type tol: C{float}
426           @return: increment dp
427           @rtype: L{escript.Data}
428           @note: this method has to be overwritten by a particular saddle point problem
429           """
430           pass
431
432       def inner(self,p0,p1):
433           """
434           inner product of p0 and p1 approximating p. Typically this returns integrate(p0*p1)
435           @return: inner product of p0 and p1
436           @rtype: C{float}
437           """
438           pass
439
440       subiter_max=3
441       def solve(self,u0,p0,tolerance=1.e-6,tolerance_u=None,iter_max=100,accepted_reduction=0.995,relaxation=None):
442            """
443            runs the solver
444
445            @param u0: initial guess for C{u}
446            @type u0: L{esys.escript.Data}
447            @param p0: initial guess for C{p}
448            @type p0: L{esys.escript.Data}
449            @param tolerance: tolerance for relative error in C{u} and C{p}
450            @type tolerance: positive C{float}
451            @param tolerance_u: tolerance for relative error in C{u} if different from C{tolerance}
452            @type tolerance_u: positive C{float}
453            @param iter_max: maximum number of iteration steps.
454            @type iter_max: C{int}
455            @param accepted_reduction: if the norm  g cannot be reduced by C{accepted_reduction} backtracking to adapt the
456                                       relaxation factor. If C{accepted_reduction=None} no backtracking is used.
457            @type accepted_reduction: positive C{float} or C{None}
458            @param relaxation: initial relaxation factor. If C{relaxation==None}, the last relaxation factor is used.
459            @type relaxation: C{float} or C{None}
460            """
461            tol=1.e-2
462            if tolerance_u==None: tolerance_u=tolerance
463            if not relaxation==None: self.relaxation=relaxation
464            if accepted_reduction ==None:
465                  angle_limit=0.
466            elif accepted_reduction>=1.:
467                  angle_limit=0.
468            else:
469                  angle_limit=util.sqrt(1-accepted_reduction**2)
470            self.iter=0
471            u=u0
472            p=p0
473            #
474            #   initialize things:
475            #
476            converged=False
477            #
478            #  start loop:
479            #
480            #  initial search direction is g
481            #
482            while not converged :
483                if self.iter>iter_max:
484                    raise ArithmeticError("no convergence after %s steps."%self.iter)
485                f_new=self.solve_f(u,p,tol)
486                norm_f_new = util.Lsup(f_new)
487                u_new=u-f_new
488                g_new=self.solve_g(u_new,tol)
489                self.iter+=1
490                norm_g_new = util.sqrt(self.inner(g_new,g_new))
491                if norm_f_new==0. and norm_g_new==0.: return u, p
492                if self.iter>1 and not accepted_reduction==None:
493                   #
494                   #   did we manage to reduce the norm of G? I
495                   #   if not we start a backtracking procedure
496                   #
497                   # print "new/old norm = ",norm_g_new, norm_g, norm_g_new/norm_g
498                   if norm_g_new > accepted_reduction * norm_g:
499                      sub_iter=0
500                      s=self.relaxation
501                      d=g
502                      g_last=g
503                      self.trace("    start substepping: f = %s, g = %s, relaxation = %s."%(norm_f_new, norm_g_new, s))
504                      while sub_iter < self.subiter_max and  norm_g_new > accepted_reduction * norm_g:
505                         dg= g_new-g_last
506                         norm_dg=abs(util.sqrt(self.inner(dg,dg))/self.relaxation)
510                         if abs(rad) < norm_dg*norm_g_new * angle_limit:
511                             if sub_iter>0: self.trace("    no further improvements expected from backtracking.")
512                             break
513                         r=self.relaxation
515                         s+=self.relaxation
516                         #####
517                         # a=g_new+self.relaxation*dg/r
518                         # print "predicted new norm = ",util.sqrt(self.inner(a,a)),util.sqrt(self.inner(g_new,g_new)), self.relaxation
519                         #####
520                         g_last=g_new
521                         p+=self.relaxation*d
522                         f_new=self.solve_f(u,p,tol)
523                         u_new=u-f_new
524                         g_new=self.solve_g(u_new,tol)
525                         self.iter+=1
526                         norm_f_new = util.Lsup(f_new)
527                         norm_g_new = util.sqrt(self.inner(g_new,g_new))
528                         # print "   ",sub_iter," new g norm",norm_g_new
529                         self.trace("    %s th sub-step: f = %s, g = %s, relaxation = %s."%(sub_iter, norm_f_new, norm_g_new, s))
530                         #
531                         #   can we expect reduction of g?
532                         #
533                         # u_last=u_new
534                         sub_iter+=1
535                      self.relaxation=s
536                #
537                #  check for convergence:
538                #
539                norm_u_new = util.Lsup(u_new)
540                p_new=p+self.relaxation*g_new
541                norm_p_new = util.sqrt(self.inner(p_new,p_new))
542                self.trace("%s th step: f/u = %s, g/p = %s, relaxation = %s."%(self.iter,norm_f_new/norm_u_new, norm_g_new/norm_p_new, self.relaxation))
543
544                if self.iter>1:
545                   dg2=g_new-g
546                   df2=f_new-f
547                   norm_dg2=util.sqrt(self.inner(dg2,dg2))
548                   norm_df2=util.Lsup(df2)
549                   # print norm_g_new, norm_g, norm_dg, norm_p, tolerance
550                   tol_eq_g=tolerance*norm_dg2/(norm_g*abs(self.relaxation))*norm_p_new
551                   tol_eq_f=tolerance_u*norm_df2/norm_f*norm_u_new
552                   if norm_g_new <= tol_eq_g and norm_f_new <= tol_eq_f:
553                       converged=True
554                       break
555                f, norm_f, u, norm_u, g, norm_g, p, norm_p = f_new, norm_f_new, u_new, norm_u_new, g_new, norm_g_new, p_new, norm_p_new
556            self.trace("convergence after %s steps."%self.iter)
557            return u,p
558    #   def solve(self,u0,p0,tolerance=1.e-6,iter_max=10,self.relaxation=1.):
559    #      tol=1.e-2
560    #      iter=0
561    #      converged=False
562    #      u=u0*1.
563    #      p=p0*1.
564    #      while not converged and iter<iter_max:
565    #          du=self.solve_f(u,p,tol)
566    #          u-=du
567    #          norm_du=util.Lsup(du)
568    #          norm_u=util.Lsup(u)
569    #
570    #          dp=self.relaxation*self.solve_g(u,tol)
571    #          p+=dp
572    #          norm_dp=util.sqrt(self.inner(dp,dp))
573    #          norm_p=util.sqrt(self.inner(p,p))
574    #          print iter,"-th step rel. errror u,p= (%s,%s) (%s,%s)(%s,%s)"%(norm_du,norm_dp,norm_du/norm_u,norm_dp/norm_p,norm_u,norm_p)
575    #          iter+=1
576    #
577    #          converged = (norm_du <= tolerance*norm_u) and  (norm_dp <= tolerance*norm_p)
578    #      if converged:
579    #          print "convergence after %s steps."%iter
580    #      else:
581    #          raise ArithmeticError("no convergence after %s steps."%iter)
582    #
583    #      return u,p
584
585  # vim: expandtab shiftwidth=4:  # vim: expandtab shiftwidth=4:

Legend:
 Removed from v.637 changed lines Added in v.877