# Diff of /trunk/escript/py_src/pdetools.py

revision 2473 by jfenwick, Wed Jun 3 03:29:07 2009 UTC revision 2474 by gross, Tue Jun 16 06:32:15 2009 UTC
# Line 145  class Projector: Line 145  class Projector:
145      """      """
146      self.__pde = linearPDEs.LinearPDE(domain)      self.__pde = linearPDEs.LinearPDE(domain)
147      if fast:      if fast:
148          self.__pde.setSolverMethod(linearPDEs.LinearPDE.LUMPING)          self.__pde.getSolverOptions().setSolverMethod(linearPDEs.SolverOptions.LUMPING)
149      self.__pde.setSymmetryOn()      self.__pde.setSymmetryOn()
150      self.__pde.setReducedOrderTo(reduce)      self.__pde.setReducedOrderTo(reduce)
151      self.__pde.setValue(D = 1.)      self.__pde.setValue(D = 1.)
152      return      return
153      def getSolverOptions(self):
154        """
155        Returns the solver options of the PDE solver.
156
157        @rtype: L{linearPDEs.SolverOptions}
158        """
159
160    def __call__(self, input_data):    def __call__(self, input_data):
161      """      """
1471        for the unknowns M{v} and M{p} and given operators M{A} and M{B} and        for the unknowns M{v} and M{p} and given operators M{A} and M{B} and
1472        given right hand side M{f}. M{B^*} is the adjoint operator of M{B}.        given right hand side M{f}. M{B^*} is the adjoint operator of M{B}.
1473        """        """
1474        def __init__(self,**kwargs):        def __init__(self, adaptSubTolerance=True, **kwargs):
1475        """
1476        initializes the saddle point problem
1477
1478        @param adaptSubTolerance: If True the tolerance for subproblem is set automatically.
1480        """
1481          self.setTolerance()          self.setTolerance()
1482          self.setAbsoluteTolerance()          self.setAbsoluteTolerance()

1484        #=============================================================        #=============================================================
1485        def initialize(self):        def initialize(self):
1486          """          """
1569           @note: boundary conditions on p should be zero!           @note: boundary conditions on p should be zero!
1570           """           """
1571           raise NotImplementedError,"no preconditioner for Schur complement implemented."           raise NotImplementedError,"no preconditioner for Schur complement implemented."
1572          def setSubProblemTolerance(self):
1573             """
1574         Updates the tolerance for subproblems
1575         @note: method is typically the method is overwritten.
1576             """
1577             pass
1578        #=============================================================        #=============================================================
1579        def __Aprod_PCG(self,p):        def __Aprod_PCG(self,p):
1580            dv=self.solve_AinvBt(p)            dv=self.solve_AinvBt(p)
1586        def __Msolve_PCG(self,r):        def __Msolve_PCG(self,r):
1587            return self.solve_prec(r[1])            return self.solve_prec(r[1])
1588        #=============================================================        #=============================================================
# rename solve_prec and change argument v to Bv
# chnage the argument of inner_pBv to v->Bv
# inner p still needed?
# change norm_Bv argument to Bv
1589        def __Aprod_GMRES(self,p):        def __Aprod_GMRES(self,p):
1590            return self.solve_prec(self.Bv(self.solve_AinvBt(p)))            return self.solve_prec(self.Bv(self.solve_AinvBt(p)))
1591        def __inner_GMRES(self,p0,p1):        def __inner_GMRES(self,p0,p1):
1592           return self.inner_p(p0,p1)           return self.inner_p(p0,p1)
1593
1594        #=============================================================        #=============================================================
1595        def norm_p(self,p):        def norm_p(self,p):
1596            """            """
1603            f=self.inner_p(p,p)            f=self.inner_p(p,p)
1604            if f<0: raise ValueError,"negative pressure norm."            if f<0: raise ValueError,"negative pressure norm."
1605            return math.sqrt(f)            return math.sqrt(f)
1607          """
1608        def solve(self,v,p,max_iter=20, verbose=False, show_details=False, usePCG=True, iter_restart=20, max_correction_steps=10):        Returns True if tolerance adaption for subproblem is choosen.
1609          """
1611
1612          def solve(self,v,p,max_iter=20, verbose=False, usePCG=True, iter_restart=20, max_correction_steps=10):
1613           """           """
1614           Solves the saddle point problem using initial guesses v and p.           Solves the saddle point problem using initial guesses v and p.
1615
1622                            attempt                            attempt
1623           @param verbose: if True, shows information on the progress of the           @param verbose: if True, shows information on the progress of the
@param show_details: if True, shows details of the sub problem solver
1625           @param iter_restart: restart the iteration after C{iter_restart} steps           @param iter_restart: restart the iteration after C{iter_restart} steps
1626                                (only used if useUzaw=False)                                (only used if useUzaw=False)
1627           @type usePCG: C{bool}           @type usePCG: C{bool}
1628           @type max_iter: C{int}           @type max_iter: C{int}
1629           @type verbose: C{bool}           @type verbose: C{bool}
@type show_details: C{bool}
1630           @type iter_restart: C{int}           @type iter_restart: C{int}
1631           @rtype: C{tuple} of L{Data} objects           @rtype: C{tuple} of L{Data} objects
1632           """           """
1633           self.verbose=verbose           self.verbose=verbose
self.show_details=show_details and self.verbose
1634           rtol=self.getTolerance()           rtol=self.getTolerance()
1635           atol=self.getAbsoluteTolerance()           atol=self.getAbsoluteTolerance()
1637           correction_step=0           correction_step=0
1638           converged=False           converged=False
1639           while not converged:           while not converged:
1678           if tolerance<0:           if tolerance<0:
1679               raise ValueError,"tolerance must be positive."               raise ValueError,"tolerance must be positive."
1680           self.__rtol=tolerance           self.__rtol=tolerance
self.setSubProblemTolerance()
1681
1682        def getTolerance(self):        def getTolerance(self):
1683           """           """
1708           """           """
1709           return self.__atol           return self.__atol
1710
1711        def setSubProblemTolerance(self,rtol=None):        def getSubProblemTolerance(self):
1712           """           """
1713           Sets the relative tolerance to solve the subproblem(s).           Sets the relative tolerance to solve the subproblem(s).
1714
1715           @param rtol: relative tolerence           @param rtol: relative tolerence
1716           @type rtol: positive C{float}           @type rtol: positive C{float}
1717           """           """
1718           if rtol == None:           return max(200.*util.EPSILON,self.getTolerance()**2)
rtol=max(200.*util.EPSILON,self.getTolerance()**2)
if rtol<=0:
raise ValueError,"tolerance must be positive."
self.__sub_tol=rtol

def getSubProblemTolerance(self):
"""
Returns the subproblem reduction factor.

@return: subproblem reduction factor
@rtype: C{float}
"""
return self.__sub_tol
1719
1721     """     """
1738     pde.setValue(y=d)     pde.setValue(y=d)
1739     return util.whereNonZero(pde.getRightHandSide())     return util.whereNonZero(pde.getRightHandSide())
1740
#==============================================================================
"""
This implements a solver for a saddle point problem

M{f(u,p)=0}
M{g(u)=0}

for u and p. The problem is solved with an inexact Uszawa scheme for p:

M{Q_f (u^{k+1}-u^{k}) = - f(u^{k},p^{k})}
M{Q_g (p^{k+1}-p^{k}) =   g(u^{k+1})}

where Q_f is an approximation of the Jacobian A_f of f with respect to u and
Q_f is an approximation of A_g A_f^{-1} A_g with A_g is the Jacobian of g
with respect to p. As a the construction of a 'proper' Q_g can be difficult,
non-linear conjugate gradient method is applied to solve for p, so Q_g plays
in fact the role of a preconditioner.
"""
def __init__(self,verbose=False,*args):
"""
Initializes the problem.

@param verbose: if True, some information is printed in the process
@type verbose: C{bool}
@note: this method may be overwritten by a particular saddle point
problem
"""
print "SaddlePointProblem should not be used anymore!"
if not isinstance(verbose,bool):
raise TypeError("verbose needs to be of type bool.")
self.__verbose=verbose
self.relaxation=1.
DeprecationWarning("SaddlePointProblem should not be used anymore.")

def trace(self,text):
"""
Prints C{text} if verbose has been set.

@param text: a text message
@type text: C{str}
"""
if self.__verbose: print "%s: %s"%(str(self),text)

def solve_f(self,u,p,tol=1.e-8):
"""
Solves

A_f du = f(u,p)

with tolerance C{tol} and returns du. A_f is Jacobian of f with respect
to u.

@param u: current approximation of u
@type u: L{escript.Data}
@param p: current approximation of p
@type p: L{escript.Data}
@param tol: tolerance expected for du
@type tol: C{float}
@return: increment du
@rtype: L{escript.Data}
@note: this method has to be overwritten by a particular saddle point
problem
"""
pass

def solve_g(self,u,tol=1.e-8):
"""
Solves

Q_g dp = g(u)

where Q_g is a preconditioner for A_g A_f^{-1} A_g with A_g is the
Jacobian of g with respect to p.

@param u: current approximation of u
@type u: L{escript.Data}
@param tol: tolerance expected for dp
@type tol: C{float}
@return: increment dp
@rtype: L{escript.Data}
@note: this method has to be overwritten by a particular saddle point
problem
"""
pass

def inner(self,p0,p1):
"""
Inner product of p0 and p1 approximating p. Typically this returns
C{integrate(p0*p1)}.
@return: inner product of p0 and p1
@rtype: C{float}
"""
pass

subiter_max=3
def solve(self,u0,p0,tolerance=1.e-6,tolerance_u=None,iter_max=100,accepted_reduction=0.995,relaxation=None):
"""
Runs the solver.

@param u0: initial guess for C{u}
@type u0: L{esys.escript.Data}
@param p0: initial guess for C{p}
@type p0: L{esys.escript.Data}
@param tolerance: tolerance for relative error in C{u} and C{p}
@type tolerance: positive C{float}
@param tolerance_u: tolerance for relative error in C{u} if different
from C{tolerance}
@type tolerance_u: positive C{float}
@param iter_max: maximum number of iteration steps
@type iter_max: C{int}
@param accepted_reduction: if the norm g cannot be reduced by
the relaxation factor. If
C{accepted_reduction=None} no backtracking
is used.
@type accepted_reduction: positive C{float} or C{None}
@param relaxation: initial relaxation factor. If C{relaxation==None},
the last relaxation factor is used.
@type relaxation: C{float} or C{None}
"""
tol=1.e-2
if tolerance_u==None: tolerance_u=tolerance
if not relaxation==None: self.relaxation=relaxation
if accepted_reduction ==None:
angle_limit=0.
elif accepted_reduction>=1.:
angle_limit=0.
else:
angle_limit=util.sqrt(1-accepted_reduction**2)
self.iter=0
u=u0
p=p0
#
#   initialize things:
#
converged=False
#
#  start loop:
#
#  initial search direction is g
#
while not converged :
if self.iter>iter_max:
raise ArithmeticError("no convergence after %s steps."%self.iter)
f_new=self.solve_f(u,p,tol)
norm_f_new = util.Lsup(f_new)
u_new=u-f_new
g_new=self.solve_g(u_new,tol)
self.iter+=1
norm_g_new = util.sqrt(self.inner(g_new,g_new))
if norm_f_new==0. and norm_g_new==0.: return u, p
if self.iter>1 and not accepted_reduction==None:
#
#   did we manage to reduce the norm of G? I
#   if not we start a backtracking procedure
#
# print "new/old norm = ",norm_g_new, norm_g, norm_g_new/norm_g
if norm_g_new > accepted_reduction * norm_g:
sub_iter=0
s=self.relaxation
d=g
g_last=g
self.trace("    start substepping: f = %s, g = %s, relaxation = %s."%(norm_f_new, norm_g_new, s))
while sub_iter < self.subiter_max and  norm_g_new > accepted_reduction * norm_g:
dg= g_new-g_last
norm_dg=abs(util.sqrt(self.inner(dg,dg))/self.relaxation)
if abs(rad) < norm_dg*norm_g_new * angle_limit:
if sub_iter>0: self.trace("    no further improvements expected from backtracking.")
break
r=self.relaxation
s+=self.relaxation
#####
# a=g_new+self.relaxation*dg/r
# print "predicted new norm = ",util.sqrt(self.inner(a,a)),util.sqrt(self.inner(g_new,g_new)), self.relaxation
#####
g_last=g_new
p+=self.relaxation*d
f_new=self.solve_f(u,p,tol)
u_new=u-f_new
g_new=self.solve_g(u_new,tol)
self.iter+=1
norm_f_new = util.Lsup(f_new)
norm_g_new = util.sqrt(self.inner(g_new,g_new))
# print "   ",sub_iter," new g norm",norm_g_new
self.trace("    %s th sub-step: f = %s, g = %s, relaxation = %s."%(sub_iter, norm_f_new, norm_g_new, s))
#
#   can we expect reduction of g?
#
# u_last=u_new
sub_iter+=1
self.relaxation=s
#
#  check for convergence:
#
norm_u_new = util.Lsup(u_new)
p_new=p+self.relaxation*g_new
norm_p_new = util.sqrt(self.inner(p_new,p_new))
self.trace("%s th step: f = %s, f/u = %s, g = %s, g/p = %s, relaxation = %s."%(self.iter, norm_f_new ,norm_f_new/norm_u_new, norm_g_new, norm_g_new/norm_p_new, self.relaxation))

if self.iter>1:
dg2=g_new-g
df2=f_new-f
norm_dg2=util.sqrt(self.inner(dg2,dg2))
norm_df2=util.Lsup(df2)
# print norm_g_new, norm_g, norm_dg, norm_p, tolerance
tol_eq_g=tolerance*norm_dg2/(norm_g*abs(self.relaxation))*norm_p_new
tol_eq_f=tolerance_u*norm_df2/norm_f*norm_u_new
if norm_g_new <= tol_eq_g and norm_f_new <= tol_eq_f:
converged=True
f, norm_f, u, norm_u, g, norm_g, p, norm_p = f_new, norm_f_new, u_new, norm_u_new, g_new, norm_g_new, p_new, norm_p_new
self.trace("convergence after %s steps."%self.iter)
return u,p
1741

Legend:
 Removed from v.2473 changed lines Added in v.2474