/[escript]/trunk/escript/py_src/pdetools.py
ViewVC logotype

Diff of /trunk/escript/py_src/pdetools.py

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 2455 by jfenwick, Wed Jun 3 03:29:07 2009 UTC revision 2548 by jfenwick, Mon Jul 20 06:20:06 2009 UTC
# Line 1  Line 1 
1    
2  ########################################################  ########################################################
3  #  #
4  # Copyright (c) 2003-2008 by University of Queensland  # Copyright (c) 2003-2009 by University of Queensland
5  # Earth Systems Science Computational Center (ESSCC)  # Earth Systems Science Computational Center (ESSCC)
6  # http://www.uq.edu.au/esscc  # http://www.uq.edu.au/esscc
7  #  #
# Line 145  class Projector: Line 145  class Projector:
145      """      """
146      self.__pde = linearPDEs.LinearPDE(domain)      self.__pde = linearPDEs.LinearPDE(domain)
147      if fast:      if fast:
148          self.__pde.setSolverMethod(linearPDEs.LinearPDE.LUMPING)          self.__pde.getSolverOptions().setSolverMethod(linearPDEs.SolverOptions.LUMPING)
149      self.__pde.setSymmetryOn()      self.__pde.setSymmetryOn()
150      self.__pde.setReducedOrderTo(reduce)      self.__pde.setReducedOrderTo(reduce)
151      self.__pde.setValue(D = 1.)      self.__pde.setValue(D = 1.)
152      return      return
153      def getSolverOptions(self):
154        """
155        Returns the solver options of the PDE solver.
156        
157        @rtype: L{linearPDEs.SolverOptions}
158        """
159    
160    def __call__(self, input_data):    def __call__(self, input_data):
161      """      """
# Line 320  class Locator: Line 326  class Locator:
326    
327         @param where: function space         @param where: function space
328         @type where: L{escript.FunctionSpace}         @type where: L{escript.FunctionSpace}
329         @param x: coefficient of the solution         @param x: location(s) of the Locator
330         @type x: C{numpy.ndarray} or C{list} of C{numpy.ndarray}         @type x: C{numpy.ndarray} or C{list} of C{numpy.ndarray}
331         """         """
332         if isinstance(where,escript.FunctionSpace):         if isinstance(where,escript.FunctionSpace):
333            self.__function_space=where            self.__function_space=where
334         else:         else:
335            self.__function_space=escript.ContinuousFunction(where)            self.__function_space=escript.ContinuousFunction(where)
336           iterative=False
337         if isinstance(x, list):         if isinstance(x, list):
338               if len(x)==0:
339                  raise "ValueError", "At least one point must be given."
340               try:
341                 iter(x[0])
342                 iterative=True
343               except TypeError:
344                 iterative=False
345           if iterative:
346             self.__id=[]             self.__id=[]
347             for p in x:             for p in x:
348                self.__id.append(util.length(self.__function_space.getX()-p[:self.__function_space.getDim()]).minGlobalDataPoint())                self.__id.append(util.length(self.__function_space.getX()-p[:self.__function_space.getDim()]).minGlobalDataPoint())
# Line 390  class Locator: Line 405  class Locator:
405          object otherwise the object is returned.          object otherwise the object is returned.
406          """          """
407          if isinstance(data,escript.Data):          if isinstance(data,escript.Data):
408             if data.getFunctionSpace()==self.getFunctionSpace():             dat=util.interpolate(data,self.getFunctionSpace())
              dat=data  
            else:  
              dat=data.interpolate(self.getFunctionSpace())  
409             id=self.getId()             id=self.getId()
410             r=data.getRank()             r=data.getRank()
411             if isinstance(id,list):             if isinstance(id,list):
412                 out=[]                 out=[]
413                 for i in id:                 for i in id:
414                    o=numpy.array(data.getTupleForGlobalDataPoint(*i))                    o=numpy.array(dat.getTupleForGlobalDataPoint(*i))
415                    if data.getRank()==0:                    if data.getRank()==0:
416                       out.append(o[0])                       out.append(o[0])
417                    else:                    else:
418                       out.append(o)                       out.append(o)
419                 return out                 return out
420             else:             else:
421               out=numpy.array(data.getTupleForGlobalDataPoint(*id))               out=numpy.array(dat.getTupleForGlobalDataPoint(*id))
422               if data.getRank()==0:               if data.getRank()==0:
423                  return out[0]                  return out[0]
424               else:               else:
# Line 1465  class HomogeneousSaddlePointProblem(obje Line 1477  class HomogeneousSaddlePointProblem(obje
1477        for the unknowns M{v} and M{p} and given operators M{A} and M{B} and        for the unknowns M{v} and M{p} and given operators M{A} and M{B} and
1478        given right hand side M{f}. M{B^*} is the adjoint operator of M{B}.        given right hand side M{f}. M{B^*} is the adjoint operator of M{B}.
1479        """        """
1480        def __init__(self,**kwargs):        def __init__(self, adaptSubTolerance=True, **kwargs):
1481        """
1482        initializes the saddle point problem
1483        
1484        @param adaptSubTolerance: If True the tolerance for subproblem is set automatically.
1485        @type adaptSubTolerance: C{bool}
1486        """
1487          self.setTolerance()          self.setTolerance()
1488          self.setAbsoluteTolerance()          self.setAbsoluteTolerance()
1489          self.setSubProblemTolerance()      self.__adaptSubTolerance=adaptSubTolerance
   
1490        #=============================================================        #=============================================================
1491        def initialize(self):        def initialize(self):
1492          """          """
# Line 1558  class HomogeneousSaddlePointProblem(obje Line 1575  class HomogeneousSaddlePointProblem(obje
1575           @note: boundary conditions on p should be zero!           @note: boundary conditions on p should be zero!
1576           """           """
1577           raise NotImplementedError,"no preconditioner for Schur complement implemented."           raise NotImplementedError,"no preconditioner for Schur complement implemented."
1578          def setSubProblemTolerance(self):
1579             """
1580         Updates the tolerance for subproblems
1581         @note: method is typically the method is overwritten.
1582             """
1583             pass
1584        #=============================================================        #=============================================================
1585        def __Aprod_PCG(self,p):        def __Aprod_PCG(self,p):
1586            dv=self.solve_AinvBt(p)            dv=self.solve_AinvBt(p)
# Line 1569  class HomogeneousSaddlePointProblem(obje Line 1592  class HomogeneousSaddlePointProblem(obje
1592        def __Msolve_PCG(self,r):        def __Msolve_PCG(self,r):
1593            return self.solve_prec(r[1])            return self.solve_prec(r[1])
1594        #=============================================================        #=============================================================
 # rename solve_prec and change argument v to Bv  
 # chnage the argument of inner_pBv to v->Bv  
 # add Bv  
 # inner p still needed?  
 # change norm_Bv argument to Bv  
1595        def __Aprod_GMRES(self,p):        def __Aprod_GMRES(self,p):
1596            return self.solve_prec(self.Bv(self.solve_AinvBt(p)))            return self.solve_prec(self.Bv(self.solve_AinvBt(p)))
1597        def __inner_GMRES(self,p0,p1):        def __inner_GMRES(self,p0,p1):
1598           return self.inner_p(p0,p1)           return self.inner_p(p0,p1)
1599    
1600        #=============================================================        #=============================================================
1601        def norm_p(self,p):        def norm_p(self,p):
1602            """            """
# Line 1590  class HomogeneousSaddlePointProblem(obje Line 1609  class HomogeneousSaddlePointProblem(obje
1609            f=self.inner_p(p,p)            f=self.inner_p(p,p)
1610            if f<0: raise ValueError,"negative pressure norm."            if f<0: raise ValueError,"negative pressure norm."
1611            return math.sqrt(f)            return math.sqrt(f)
1612                    def adaptSubTolerance(self):
1613          """
1614        def solve(self,v,p,max_iter=20, verbose=False, show_details=False, usePCG=True, iter_restart=20, max_correction_steps=10):        Returns True if tolerance adaption for subproblem is choosen.
1615          """
1616              self.__adaptSubTolerance
1617          
1618          def solve(self,v,p,max_iter=20, verbose=False, usePCG=True, iter_restart=20, max_correction_steps=10):
1619           """           """
1620           Solves the saddle point problem using initial guesses v and p.           Solves the saddle point problem using initial guesses v and p.
1621    
# Line 1605  class HomogeneousSaddlePointProblem(obje Line 1628  class HomogeneousSaddlePointProblem(obje
1628                            attempt                            attempt
1629           @param verbose: if True, shows information on the progress of the           @param verbose: if True, shows information on the progress of the
1630                           saddlepoint problem solver.                           saddlepoint problem solver.
          @param show_details: if True, shows details of the sub problem solver  
1631           @param iter_restart: restart the iteration after C{iter_restart} steps           @param iter_restart: restart the iteration after C{iter_restart} steps
1632                                (only used if useUzaw=False)                                (only used if useUzaw=False)
1633           @type usePCG: C{bool}           @type usePCG: C{bool}
1634           @type max_iter: C{int}           @type max_iter: C{int}
1635           @type verbose: C{bool}           @type verbose: C{bool}
          @type show_details: C{bool}  
1636           @type iter_restart: C{int}           @type iter_restart: C{int}
1637           @rtype: C{tuple} of L{Data} objects           @rtype: C{tuple} of L{Data} objects
1638           """           """
1639           self.verbose=verbose           self.verbose=verbose
          self.show_details=show_details and self.verbose  
1640           rtol=self.getTolerance()           rtol=self.getTolerance()
1641           atol=self.getAbsoluteTolerance()           atol=self.getAbsoluteTolerance()
1642         if self.adaptSubTolerance(): self.setSubProblemTolerance()
1643           correction_step=0           correction_step=0
1644           converged=False           converged=False
1645           while not converged:           while not converged:
# Line 1663  class HomogeneousSaddlePointProblem(obje Line 1684  class HomogeneousSaddlePointProblem(obje
1684           if tolerance<0:           if tolerance<0:
1685               raise ValueError,"tolerance must be positive."               raise ValueError,"tolerance must be positive."
1686           self.__rtol=tolerance           self.__rtol=tolerance
          self.setSubProblemTolerance()  
1687    
1688        def getTolerance(self):        def getTolerance(self):
1689           """           """
# Line 1694  class HomogeneousSaddlePointProblem(obje Line 1714  class HomogeneousSaddlePointProblem(obje
1714           """           """
1715           return self.__atol           return self.__atol
1716    
1717        def setSubProblemTolerance(self,rtol=None):        def getSubProblemTolerance(self):
1718           """           """
1719           Sets the relative tolerance to solve the subproblem(s).           Sets the relative tolerance to solve the subproblem(s).
1720    
1721           @param rtol: relative tolerence           @param rtol: relative tolerence
1722           @type rtol: positive C{float}           @type rtol: positive C{float}
1723           """           """
1724           if rtol == None:           return max(200.*util.EPSILON,self.getTolerance()**2)
               rtol=max(200.*util.EPSILON,self.getTolerance()**2)  
          if rtol<=0:  
              raise ValueError,"tolerance must be positive."  
          self.__sub_tol=rtol  
   
       def getSubProblemTolerance(self):  
          """  
          Returns the subproblem reduction factor.  
   
          @return: subproblem reduction factor  
          @rtype: C{float}  
          """  
          return self.__sub_tol  
1725    
1726  def MaskFromBoundaryTag(domain,*tags):  def MaskFromBoundaryTag(domain,*tags):
1727     """     """
# Line 1737  def MaskFromBoundaryTag(domain,*tags): Line 1744  def MaskFromBoundaryTag(domain,*tags):
1744     pde.setValue(y=d)     pde.setValue(y=d)
1745     return util.whereNonZero(pde.getRightHandSide())     return util.whereNonZero(pde.getRightHandSide())
1746    
1747  #==============================================================================  def MaskFromTag(domain,*tags):
 class SaddlePointProblem(object):  
1748     """     """
1749     This implements a solver for a saddle point problem     Creates a mask on the Solution(domain) function space where the value is
1750       one for samples that touch regions tagged by tags.
1751    
1752     M{f(u,p)=0}     Usage: m=MaskFromTag(domain, "ham")
1753     M{g(u)=0}  
1754       @param domain: domain to be used
1755       @type domain: L{escript.Domain}
1756       @param tags: boundary tags
1757       @type tags: C{str}
1758       @return: a mask which marks samples that are touching the boundary tagged
1759                by any of the given tags
1760       @rtype: L{escript.Data} of rank 0
1761       """
1762       pde=linearPDEs.LinearPDE(domain,numEquations=1, numSolutions=1)
1763       d=escript.Scalar(0.,escript.Function(domain))
1764       for t in tags: d.setTaggedValue(t,1.)
1765       pde.setValue(Y=d)
1766       return util.whereNonZero(pde.getRightHandSide())
1767    
    for u and p. The problem is solved with an inexact Uszawa scheme for p:  
   
    M{Q_f (u^{k+1}-u^{k}) = - f(u^{k},p^{k})}  
    M{Q_g (p^{k+1}-p^{k}) =   g(u^{k+1})}  
   
    where Q_f is an approximation of the Jacobian A_f of f with respect to u and  
    Q_f is an approximation of A_g A_f^{-1} A_g with A_g is the Jacobian of g  
    with respect to p. As a the construction of a 'proper' Q_g can be difficult,  
    non-linear conjugate gradient method is applied to solve for p, so Q_g plays  
    in fact the role of a preconditioner.  
    """  
    def __init__(self,verbose=False,*args):  
        """  
        Initializes the problem.  
   
        @param verbose: if True, some information is printed in the process  
        @type verbose: C{bool}  
        @note: this method may be overwritten by a particular saddle point  
               problem  
        """  
        print "SaddlePointProblem should not be used anymore!"  
        if not isinstance(verbose,bool):  
             raise TypeError("verbose needs to be of type bool.")  
        self.__verbose=verbose  
        self.relaxation=1.  
        DeprecationWarning("SaddlePointProblem should not be used anymore.")  
   
    def trace(self,text):  
        """  
        Prints C{text} if verbose has been set.  
   
        @param text: a text message  
        @type text: C{str}  
        """  
        if self.__verbose: print "%s: %s"%(str(self),text)  
   
    def solve_f(self,u,p,tol=1.e-8):  
        """  
        Solves  
   
        A_f du = f(u,p)  
   
        with tolerance C{tol} and returns du. A_f is Jacobian of f with respect  
        to u.  
   
        @param u: current approximation of u  
        @type u: L{escript.Data}  
        @param p: current approximation of p  
        @type p: L{escript.Data}  
        @param tol: tolerance expected for du  
        @type tol: C{float}  
        @return: increment du  
        @rtype: L{escript.Data}  
        @note: this method has to be overwritten by a particular saddle point  
               problem  
        """  
        pass  
   
    def solve_g(self,u,tol=1.e-8):  
        """  
        Solves  
   
        Q_g dp = g(u)  
   
        where Q_g is a preconditioner for A_g A_f^{-1} A_g with A_g is the  
        Jacobian of g with respect to p.  
   
        @param u: current approximation of u  
        @type u: L{escript.Data}  
        @param tol: tolerance expected for dp  
        @type tol: C{float}  
        @return: increment dp  
        @rtype: L{escript.Data}  
        @note: this method has to be overwritten by a particular saddle point  
               problem  
        """  
        pass  
   
    def inner(self,p0,p1):  
        """  
        Inner product of p0 and p1 approximating p. Typically this returns  
        C{integrate(p0*p1)}.  
        @return: inner product of p0 and p1  
        @rtype: C{float}  
        """  
        pass  
   
    subiter_max=3  
    def solve(self,u0,p0,tolerance=1.e-6,tolerance_u=None,iter_max=100,accepted_reduction=0.995,relaxation=None):  
         """  
         Runs the solver.  
   
         @param u0: initial guess for C{u}  
         @type u0: L{esys.escript.Data}  
         @param p0: initial guess for C{p}  
         @type p0: L{esys.escript.Data}  
         @param tolerance: tolerance for relative error in C{u} and C{p}  
         @type tolerance: positive C{float}  
         @param tolerance_u: tolerance for relative error in C{u} if different  
                             from C{tolerance}  
         @type tolerance_u: positive C{float}  
         @param iter_max: maximum number of iteration steps  
         @type iter_max: C{int}  
         @param accepted_reduction: if the norm g cannot be reduced by  
                                    C{accepted_reduction} backtracking to adapt  
                                    the relaxation factor. If  
                                    C{accepted_reduction=None} no backtracking  
                                    is used.  
         @type accepted_reduction: positive C{float} or C{None}  
         @param relaxation: initial relaxation factor. If C{relaxation==None},  
                            the last relaxation factor is used.  
         @type relaxation: C{float} or C{None}  
         """  
         tol=1.e-2  
         if tolerance_u==None: tolerance_u=tolerance  
         if not relaxation==None: self.relaxation=relaxation  
         if accepted_reduction ==None:  
               angle_limit=0.  
         elif accepted_reduction>=1.:  
               angle_limit=0.  
         else:  
               angle_limit=util.sqrt(1-accepted_reduction**2)  
         self.iter=0  
         u=u0  
         p=p0  
         #  
         #   initialize things:  
         #  
         converged=False  
         #  
         #  start loop:  
         #  
         #  initial search direction is g  
         #  
         while not converged :  
             if self.iter>iter_max:  
                 raise ArithmeticError("no convergence after %s steps."%self.iter)  
             f_new=self.solve_f(u,p,tol)  
             norm_f_new = util.Lsup(f_new)  
             u_new=u-f_new  
             g_new=self.solve_g(u_new,tol)  
             self.iter+=1  
             norm_g_new = util.sqrt(self.inner(g_new,g_new))  
             if norm_f_new==0. and norm_g_new==0.: return u, p  
             if self.iter>1 and not accepted_reduction==None:  
                #  
                #   did we manage to reduce the norm of G? I  
                #   if not we start a backtracking procedure  
                #  
                # print "new/old norm = ",norm_g_new, norm_g, norm_g_new/norm_g  
                if norm_g_new > accepted_reduction * norm_g:  
                   sub_iter=0  
                   s=self.relaxation  
                   d=g  
                   g_last=g  
                   self.trace("    start substepping: f = %s, g = %s, relaxation = %s."%(norm_f_new, norm_g_new, s))  
                   while sub_iter < self.subiter_max and  norm_g_new > accepted_reduction * norm_g:  
                      dg= g_new-g_last  
                      norm_dg=abs(util.sqrt(self.inner(dg,dg))/self.relaxation)  
                      rad=self.inner(g_new,dg)/self.relaxation  
                      # print "   ",sub_iter,": rad, norm_dg:",abs(rad), norm_dg*norm_g_new * angle_limit  
                      # print "   ",sub_iter,": rad, norm_dg:",rad, norm_dg, norm_g_new, norm_g  
                      if abs(rad) < norm_dg*norm_g_new * angle_limit:  
                          if sub_iter>0: self.trace("    no further improvements expected from backtracking.")  
                          break  
                      r=self.relaxation  
                      self.relaxation= - rad/norm_dg**2  
                      s+=self.relaxation  
                      #####  
                      # a=g_new+self.relaxation*dg/r  
                      # print "predicted new norm = ",util.sqrt(self.inner(a,a)),util.sqrt(self.inner(g_new,g_new)), self.relaxation  
                      #####  
                      g_last=g_new  
                      p+=self.relaxation*d  
                      f_new=self.solve_f(u,p,tol)  
                      u_new=u-f_new  
                      g_new=self.solve_g(u_new,tol)  
                      self.iter+=1  
                      norm_f_new = util.Lsup(f_new)  
                      norm_g_new = util.sqrt(self.inner(g_new,g_new))  
                      # print "   ",sub_iter," new g norm",norm_g_new  
                      self.trace("    %s th sub-step: f = %s, g = %s, relaxation = %s."%(sub_iter, norm_f_new, norm_g_new, s))  
                      #  
                      #   can we expect reduction of g?  
                      #  
                      # u_last=u_new  
                      sub_iter+=1  
                   self.relaxation=s  
             #  
             #  check for convergence:  
             #  
             norm_u_new = util.Lsup(u_new)  
             p_new=p+self.relaxation*g_new  
             norm_p_new = util.sqrt(self.inner(p_new,p_new))  
             self.trace("%s th step: f = %s, f/u = %s, g = %s, g/p = %s, relaxation = %s."%(self.iter, norm_f_new ,norm_f_new/norm_u_new, norm_g_new, norm_g_new/norm_p_new, self.relaxation))  
   
             if self.iter>1:  
                dg2=g_new-g  
                df2=f_new-f  
                norm_dg2=util.sqrt(self.inner(dg2,dg2))  
                norm_df2=util.Lsup(df2)  
                # print norm_g_new, norm_g, norm_dg, norm_p, tolerance  
                tol_eq_g=tolerance*norm_dg2/(norm_g*abs(self.relaxation))*norm_p_new  
                tol_eq_f=tolerance_u*norm_df2/norm_f*norm_u_new  
                if norm_g_new <= tol_eq_g and norm_f_new <= tol_eq_f:  
                    converged=True  
             f, norm_f, u, norm_u, g, norm_g, p, norm_p = f_new, norm_f_new, u_new, norm_u_new, g_new, norm_g_new, p_new, norm_p_new  
         self.trace("convergence after %s steps."%self.iter)  
         return u,p  
1768    

Legend:
Removed from v.2455  
changed lines
  Added in v.2548

  ViewVC Help
Powered by ViewVC 1.1.26