/[escript]/temp/escript/py_src/pdetools.py
ViewVC logotype

Diff of /temp/escript/py_src/pdetools.py

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 873 by gross, Mon Oct 16 04:07:33 2006 UTC revision 877 by gross, Wed Oct 25 03:06:58 2006 UTC
# Line 358  class SaddlePointProblem(object): Line 358  class SaddlePointProblem(object):
358     """     """
359     This implements a solver for a saddlepoint problem     This implements a solver for a saddlepoint problem
360    
361     f(u,p)=0     M{f(u,p)=0}
362     g(u)=0     M{g(u)=0}
363    
364     for u and p. The problem is solved with an inexact Uszawa scheme for p:     for u and p. The problem is solved with an inexact Uszawa scheme for p:
365    
366     Q_f (u^{k+1}-u^{k}) = - f(u^{k},p^{k})     M{Q_f (u^{k+1}-u^{k}) = - f(u^{k},p^{k})
367     Q_g (p^{k+1}-p^{k}) =   g(u^{k+1})     M{Q_g (p^{k+1}-p^{k}) =   g(u^{k+1})}
368    
369     where Q_f is an approximation of the Jacobiean A_f of f with respect to u  and Q_f is an approximation of     where Q_f is an approximation of the Jacobiean A_f of f with respect to u  and Q_f is an approximation of
370     A_g A_f^{-1} A_g with A_g is the jacobiean of g with respect to p. As a the construction of a 'proper'     A_g A_f^{-1} A_g with A_g is the jacobiean of g with respect to p. As a the construction of a 'proper'
# Line 380  class SaddlePointProblem(object): Line 380  class SaddlePointProblem(object):
380         @note: this method may be overwritten by a particular saddle point problem         @note: this method may be overwritten by a particular saddle point problem
381         """         """
382         self.__verbose=verbose         self.__verbose=verbose
383           self.relaxation=1.
384    
385     def trace(self,text):     def trace(self,text):
386         """         """
# Line 436  class SaddlePointProblem(object): Line 437  class SaddlePointProblem(object):
437         """         """
438         pass         pass
439    
440     def solve(self,u0,p0,tolerance=1.e-6,iter_max=10,relaxation=1.):     subiter_max=3
441        tol=1.e-2     def solve(self,u0,p0,tolerance=1.e-6,tolerance_u=None,iter_max=100,accepted_reduction=0.995,relaxation=None):
442        iter=0          """
443        converged=False          runs the solver
444        u=u0*1.  
445        p=p0*1.          @param u0: initial guess for C{u}
446        while not converged and iter<iter_max:          @type u0: L{esys.escript.Data}
447            du=self.solve_f(u,p,tol)          @param p0: initial guess for C{p}
448            u-=du          @type p0: L{esys.escript.Data}
449            norm_du=util.Lsup(du)          @param tolerance: tolerance for relative error in C{u} and C{p}
450            norm_u=util.Lsup(u)          @type tolerance: positive C{float}
451                    @param tolerance_u: tolerance for relative error in C{u} if different from C{tolerance}
452            dp=relaxation*self.solve_g(u,tol)          @type tolerance_u: positive C{float}
453            p+=dp          @param iter_max: maximum number of iteration steps.
454            norm_dp=util.sqrt(self.inner(dp,dp))          @type iter_max: C{int}
455            norm_p=util.sqrt(self.inner(p,p))          @param accepted_reduction: if the norm  g cannot be reduced by C{accepted_reduction} backtracking to adapt the
456            print iter,"-th step rel. errror u,p= (%s,%s)(%s,%s)"%(norm_du/norm_u,norm_dp/norm_p,norm_u,norm_p)                                     relaxation factor. If C{accepted_reduction=None} no backtracking is used.
457            iter+=1          @type accepted_reduction: positive C{float} or C{None}
458            @param relaxation: initial relaxation factor. If C{relaxation==None}, the last relaxation factor is used.
459            converged = (norm_du <= tolerance*norm_u) and  (norm_dp <= tolerance*norm_p)          @type relaxation: C{float} or C{None}
460        if converged:          """
461            print "convergence after %s steps."%iter          tol=1.e-2
462        else:          if tolerance_u==None: tolerance_u=tolerance
463            raise ArithmeticError("no convergence after %s steps."%iter)          if not relaxation==None: self.relaxation=relaxation
464            if accepted_reduction ==None:
465        return u,p                angle_limit=0.
466            elif accepted_reduction>=1.:
467                  angle_limit=0.
468            else:
469                  angle_limit=util.sqrt(1-accepted_reduction**2)
470            self.iter=0
471            u=u0
472            p=p0
473            #
474            #   initialize things:
475            #
476            converged=False
477            #
478            #  start loop:
479            #
480            #  initial search direction is g
481            #
482            while not converged :
483                if self.iter>iter_max:
484                    raise ArithmeticError("no convergence after %s steps."%self.iter)
485                f_new=self.solve_f(u,p,tol)
486                norm_f_new = util.Lsup(f_new)
487                u_new=u-f_new
488                g_new=self.solve_g(u_new,tol)
489                self.iter+=1
490                norm_g_new = util.sqrt(self.inner(g_new,g_new))
491                if norm_f_new==0. and norm_g_new==0.: return u, p
492                if self.iter>1 and not accepted_reduction==None:
493                   #
494                   #   did we manage to reduce the norm of G? I
495                   #   if not we start a backtracking procedure
496                   #
497                   # print "new/old norm = ",norm_g_new, norm_g, norm_g_new/norm_g
498                   if norm_g_new > accepted_reduction * norm_g:
499                      sub_iter=0
500                      s=self.relaxation
501                      d=g
502                      g_last=g
503                      self.trace("    start substepping: f = %s, g = %s, relaxation = %s."%(norm_f_new, norm_g_new, s))
504                      while sub_iter < self.subiter_max and  norm_g_new > accepted_reduction * norm_g:
505                         dg= g_new-g_last
506                         norm_dg=abs(util.sqrt(self.inner(dg,dg))/self.relaxation)
507                         rad=self.inner(g_new,dg)/self.relaxation
508                         # print "   ",sub_iter,": rad, norm_dg:",abs(rad), norm_dg*norm_g_new * angle_limit
509                         # print "   ",sub_iter,": rad, norm_dg:",rad, norm_dg, norm_g_new, norm_g
510                         if abs(rad) < norm_dg*norm_g_new * angle_limit:
511                             if sub_iter>0: self.trace("    no further improvements expected from backtracking.")
512                             break
513                         r=self.relaxation
514                         self.relaxation= - rad/norm_dg**2
515                         s+=self.relaxation
516                         #####
517                         # a=g_new+self.relaxation*dg/r
518                         # print "predicted new norm = ",util.sqrt(self.inner(a,a)),util.sqrt(self.inner(g_new,g_new)), self.relaxation
519                         #####
520                         g_last=g_new
521                         p+=self.relaxation*d
522                         f_new=self.solve_f(u,p,tol)
523                         u_new=u-f_new
524                         g_new=self.solve_g(u_new,tol)
525                         self.iter+=1
526                         norm_f_new = util.Lsup(f_new)
527                         norm_g_new = util.sqrt(self.inner(g_new,g_new))
528                         # print "   ",sub_iter," new g norm",norm_g_new
529                         self.trace("    %s th sub-step: f = %s, g = %s, relaxation = %s."%(sub_iter, norm_f_new, norm_g_new, s))
530                         #
531                         #   can we expect reduction of g?
532                         #
533                         # u_last=u_new
534                         sub_iter+=1
535                      self.relaxation=s
536                #
537                #  check for convergence:
538                #
539                norm_u_new = util.Lsup(u_new)
540                p_new=p+self.relaxation*g_new
541                norm_p_new = util.sqrt(self.inner(p_new,p_new))
542                self.trace("%s th step: f/u = %s, g/p = %s, relaxation = %s."%(self.iter,norm_f_new/norm_u_new, norm_g_new/norm_p_new, self.relaxation))
543    
544                if self.iter>1:
545                   dg2=g_new-g
546                   df2=f_new-f
547                   norm_dg2=util.sqrt(self.inner(dg2,dg2))
548                   norm_df2=util.Lsup(df2)
549                   # print norm_g_new, norm_g, norm_dg, norm_p, tolerance
550                   tol_eq_g=tolerance*norm_dg2/(norm_g*abs(self.relaxation))*norm_p_new
551                   tol_eq_f=tolerance_u*norm_df2/norm_f*norm_u_new
552                   if norm_g_new <= tol_eq_g and norm_f_new <= tol_eq_f:
553                       converged=True
554                       break
555                f, norm_f, u, norm_u, g, norm_g, p, norm_p = f_new, norm_f_new, u_new, norm_u_new, g_new, norm_g_new, p_new, norm_p_new
556            self.trace("convergence after %s steps."%self.iter)
557            return u,p
558    #   def solve(self,u0,p0,tolerance=1.e-6,iter_max=10,self.relaxation=1.):
559    #      tol=1.e-2
560    #      iter=0
561    #      converged=False
562    #      u=u0*1.
563    #      p=p0*1.
564    #      while not converged and iter<iter_max:
565    #          du=self.solve_f(u,p,tol)
566    #          u-=du
567    #          norm_du=util.Lsup(du)
568    #          norm_u=util.Lsup(u)
569    #        
570    #          dp=self.relaxation*self.solve_g(u,tol)
571    #          p+=dp
572    #          norm_dp=util.sqrt(self.inner(dp,dp))
573    #          norm_p=util.sqrt(self.inner(p,p))
574    #          print iter,"-th step rel. errror u,p= (%s,%s) (%s,%s)(%s,%s)"%(norm_du,norm_dp,norm_du/norm_u,norm_dp/norm_p,norm_u,norm_p)
575    #          iter+=1
576    #
577    #          converged = (norm_du <= tolerance*norm_u) and  (norm_dp <= tolerance*norm_p)
578    #      if converged:
579    #          print "convergence after %s steps."%iter
580    #      else:
581    #          raise ArithmeticError("no convergence after %s steps."%iter)
582    #
583    #      return u,p
584                        
585  # vim: expandtab shiftwidth=4:  # vim: expandtab shiftwidth=4:

Legend:
Removed from v.873  
changed lines
  Added in v.877

  ViewVC Help
Powered by ViewVC 1.1.26