1 |
# $Id$ |
2 |
|
3 |
""" |
4 |
Provides some tools related to PDEs. |
5 |
|
6 |
Currently includes: |
7 |
- Projector - to project a discontinuous |
8 |
- Locator - to trace values in data objects at a certain location |
9 |
- TimeIntegrationManager - to handel extraplotion in time |
10 |
- SaddlePointProblem - solver for Saddle point problems using the inexact uszawa scheme |
11 |
|
12 |
@var __author__: name of author |
13 |
@var __copyright__: copyrights |
14 |
@var __license__: licence agreement |
15 |
@var __url__: url entry point on documentation |
16 |
@var __version__: version |
17 |
@var __date__: date of the version |
18 |
""" |
19 |
|
20 |
__author__="Lutz Gross, l.gross@uq.edu.au" |
21 |
__copyright__=""" Copyright (c) 2006 by ACcESS MNRF |
22 |
http://www.access.edu.au |
23 |
Primary Business: Queensland, Australia""" |
24 |
__license__="""Licensed under the Open Software License version 3.0 |
25 |
http://www.opensource.org/licenses/osl-3.0.php""" |
26 |
__url__="http://www.iservo.edu.au/esys" |
27 |
__version__="$Revision$" |
28 |
__date__="$Date$" |
29 |
|
30 |
|
31 |
import escript |
32 |
import linearPDEs |
33 |
import numarray |
34 |
import util |
35 |
|
36 |
class TimeIntegrationManager: |
37 |
""" |
38 |
a simple mechanism to manage time dependend values. |
39 |
|
40 |
typical usage is:: |
41 |
|
42 |
dt=0.1 # time increment |
43 |
tm=TimeIntegrationManager(inital_value,p=1) |
44 |
while t<1. |
45 |
v_guess=tm.extrapolate(dt) # extrapolate to t+dt |
46 |
v=... |
47 |
tm.checkin(dt,v) |
48 |
t+=dt |
49 |
|
50 |
@note: currently only p=1 is supported. |
51 |
""" |
52 |
def __init__(self,*inital_values,**kwargs): |
53 |
""" |
54 |
sets up the value manager where inital_value is the initial value and p is order used for extrapolation |
55 |
""" |
56 |
if kwargs.has_key("p"): |
57 |
self.__p=kwargs["p"] |
58 |
else: |
59 |
self.__p=1 |
60 |
if kwargs.has_key("time"): |
61 |
self.__t=kwargs["time"] |
62 |
else: |
63 |
self.__t=0. |
64 |
self.__v_mem=[inital_values] |
65 |
self.__order=0 |
66 |
self.__dt_mem=[] |
67 |
self.__num_val=len(inital_values) |
68 |
|
69 |
def getTime(self): |
70 |
return self.__t |
71 |
def getValue(self): |
72 |
out=self.__v_mem[0] |
73 |
if len(out)==1: |
74 |
return out[0] |
75 |
else: |
76 |
return out |
77 |
|
78 |
def checkin(self,dt,*values): |
79 |
""" |
80 |
adds new values to the manager. the p+1 last value get lost |
81 |
""" |
82 |
o=min(self.__order+1,self.__p) |
83 |
self.__order=min(self.__order+1,self.__p) |
84 |
v_mem_new=[values] |
85 |
dt_mem_new=[dt] |
86 |
for i in range(o-1): |
87 |
v_mem_new.append(self.__v_mem[i]) |
88 |
dt_mem_new.append(self.__dt_mem[i]) |
89 |
v_mem_new.append(self.__v_mem[o-1]) |
90 |
self.__order=o |
91 |
self.__v_mem=v_mem_new |
92 |
self.__dt_mem=dt_mem_new |
93 |
self.__t+=dt |
94 |
|
95 |
def extrapolate(self,dt): |
96 |
""" |
97 |
extrapolates to dt forward in time. |
98 |
""" |
99 |
if self.__order==0: |
100 |
out=self.__v_mem[0] |
101 |
else: |
102 |
out=[] |
103 |
for i in range(self.__num_val): |
104 |
out.append((1.+dt/self.__dt_mem[0])*self.__v_mem[0][i]-dt/self.__dt_mem[0]*self.__v_mem[1][i]) |
105 |
|
106 |
if len(out)==0: |
107 |
return None |
108 |
elif len(out)==1: |
109 |
return out[0] |
110 |
else: |
111 |
return out |
112 |
|
113 |
|
114 |
class Projector: |
115 |
""" |
116 |
The Projector is a factory which projects a discontiuous function onto a |
117 |
continuous function on the a given domain. |
118 |
""" |
119 |
def __init__(self, domain, reduce = True, fast=True): |
120 |
""" |
121 |
Create a continuous function space projector for a domain. |
122 |
|
123 |
@param domain: Domain of the projection. |
124 |
@param reduce: Flag to reduce projection order (default is True) |
125 |
@param fast: Flag to use a fast method based on matrix lumping (default is true) |
126 |
""" |
127 |
self.__pde = linearPDEs.LinearPDE(domain) |
128 |
if fast: |
129 |
self.__pde.setSolverMethod(linearPDEs.LinearPDE.LUMPING) |
130 |
self.__pde.setSymmetryOn() |
131 |
self.__pde.setReducedOrderTo(reduce) |
132 |
self.__pde.setValue(D = 1.) |
133 |
return |
134 |
|
135 |
def __del__(self): |
136 |
return |
137 |
|
138 |
def __call__(self, input_data): |
139 |
""" |
140 |
Projects input_data onto a continuous function |
141 |
|
142 |
@param input_data: The input_data to be projected. |
143 |
""" |
144 |
out=escript.Data(0.,input_data.getShape(),self.__pde.getFunctionSpaceForSolution()) |
145 |
if input_data.getRank()==0: |
146 |
self.__pde.setValue(Y = input_data) |
147 |
out=self.__pde.getSolution() |
148 |
elif input_data.getRank()==1: |
149 |
for i0 in range(input_data.getShape()[0]): |
150 |
self.__pde.setValue(Y = input_data[i0]) |
151 |
out[i0]=self.__pde.getSolution() |
152 |
elif input_data.getRank()==2: |
153 |
for i0 in range(input_data.getShape()[0]): |
154 |
for i1 in range(input_data.getShape()[1]): |
155 |
self.__pde.setValue(Y = input_data[i0,i1]) |
156 |
out[i0,i1]=self.__pde.getSolution() |
157 |
elif input_data.getRank()==3: |
158 |
for i0 in range(input_data.getShape()[0]): |
159 |
for i1 in range(input_data.getShape()[1]): |
160 |
for i2 in range(input_data.getShape()[2]): |
161 |
self.__pde.setValue(Y = input_data[i0,i1,i2]) |
162 |
out[i0,i1,i2]=self.__pde.getSolution() |
163 |
else: |
164 |
for i0 in range(input_data.getShape()[0]): |
165 |
for i1 in range(input_data.getShape()[1]): |
166 |
for i2 in range(input_data.getShape()[2]): |
167 |
for i3 in range(input_data.getShape()[3]): |
168 |
self.__pde.setValue(Y = input_data[i0,i1,i2,i3]) |
169 |
out[i0,i1,i2,i3]=self.__pde.getSolution() |
170 |
return out |
171 |
|
172 |
class NoPDE: |
173 |
""" |
174 |
solves the following problem for u: |
175 |
|
176 |
M{kronecker[i,j]*D[j]*u[j]=Y[i]} |
177 |
|
178 |
with constraint |
179 |
|
180 |
M{u[j]=r[j]} where M{q[j]>0} |
181 |
|
182 |
where D, Y, r and q are given functions of rank 1. |
183 |
|
184 |
In the case of scalars this takes the form |
185 |
|
186 |
M{D*u=Y} |
187 |
|
188 |
with constraint |
189 |
|
190 |
M{u=r} where M{q>0} |
191 |
|
192 |
where D, Y, r and q are given scalar functions. |
193 |
|
194 |
The constraint is overwriting any other condition. |
195 |
|
196 |
@note: This class is similar to the L{linearPDEs.LinearPDE} class with A=B=C=X=0 but has the intention |
197 |
that all input parameter are given in L{Solution} or L{ReducedSolution}. The whole |
198 |
thing is a bit strange and I blame Robert.Woodcock@csiro.au for this. |
199 |
""" |
200 |
def __init__(self,domain,D=None,Y=None,q=None,r=None): |
201 |
""" |
202 |
initialize the problem |
203 |
|
204 |
@param domain: domain of the PDE. |
205 |
@type domain: L{Domain} |
206 |
@param D: coefficient of the solution. |
207 |
@type D: C{float}, C{int}, L{numarray.NumArray}, L{Data} |
208 |
@param Y: right hand side |
209 |
@type Y: C{float}, C{int}, L{numarray.NumArray}, L{Data} |
210 |
@param q: location of constraints |
211 |
@type q: C{float}, C{int}, L{numarray.NumArray}, L{Data} |
212 |
@param r: value of solution at locations of constraints |
213 |
@type r: C{float}, C{int}, L{numarray.NumArray}, L{Data} |
214 |
""" |
215 |
self.__domain=domain |
216 |
self.__D=D |
217 |
self.__Y=Y |
218 |
self.__q=q |
219 |
self.__r=r |
220 |
self.__u=None |
221 |
self.__function_space=escript.Solution(self.__domain) |
222 |
def setReducedOn(self): |
223 |
""" |
224 |
sets the L{FunctionSpace} of the solution to L{ReducedSolution} |
225 |
""" |
226 |
self.__function_space=escript.ReducedSolution(self.__domain) |
227 |
self.__u=None |
228 |
|
229 |
def setReducedOff(self): |
230 |
""" |
231 |
sets the L{FunctionSpace} of the solution to L{Solution} |
232 |
""" |
233 |
self.__function_space=escript.Solution(self.__domain) |
234 |
self.__u=None |
235 |
|
236 |
def setValue(self,D=None,Y=None,q=None,r=None): |
237 |
""" |
238 |
assigns values to the parameters. |
239 |
|
240 |
@param D: coefficient of the solution. |
241 |
@type D: C{float}, C{int}, L{numarray.NumArray}, L{Data} |
242 |
@param Y: right hand side |
243 |
@type Y: C{float}, C{int}, L{numarray.NumArray}, L{Data} |
244 |
@param q: location of constraints |
245 |
@type q: C{float}, C{int}, L{numarray.NumArray}, L{Data} |
246 |
@param r: value of solution at locations of constraints |
247 |
@type r: C{float}, C{int}, L{numarray.NumArray}, L{Data} |
248 |
""" |
249 |
if not D==None: |
250 |
self.__D=D |
251 |
self.__u=None |
252 |
if not Y==None: |
253 |
self.__Y=Y |
254 |
self.__u=None |
255 |
if not q==None: |
256 |
self.__q=q |
257 |
self.__u=None |
258 |
if not r==None: |
259 |
self.__r=r |
260 |
self.__u=None |
261 |
|
262 |
def getSolution(self): |
263 |
""" |
264 |
returns the solution |
265 |
|
266 |
@return: the solution of the problem |
267 |
@rtype: L{Data} object in the L{FunctionSpace} L{Solution} or L{ReducedSolution}. |
268 |
""" |
269 |
if self.__u==None: |
270 |
if self.__D==None: |
271 |
raise ValueError,"coefficient D is undefined" |
272 |
D=escript.Data(self.__D,self.__function_space) |
273 |
if D.getRank()>1: |
274 |
raise ValueError,"coefficient D must have rank 0 or 1" |
275 |
if self.__Y==None: |
276 |
self.__u=escript.Data(0.,D.getShape(),self.__function_space) |
277 |
else: |
278 |
self.__u=util.quotient(self.__Y,D) |
279 |
if not self.__q==None: |
280 |
q=util.wherePositive(escript.Data(self.__q,self.__function_space)) |
281 |
self.__u*=(1.-q) |
282 |
if not self.__r==None: self.__u+=q*self.__r |
283 |
return self.__u |
284 |
|
285 |
class Locator: |
286 |
""" |
287 |
Locator provides access to the values of data objects at a given |
288 |
spatial coordinate x. |
289 |
|
290 |
In fact, a Locator object finds the sample in the set of samples of a |
291 |
given function space or domain where which is closest to the given |
292 |
point x. |
293 |
""" |
294 |
|
295 |
def __init__(self,where,x=numarray.zeros((3,))): |
296 |
""" |
297 |
Initializes a Locator to access values in Data objects on the Doamin |
298 |
or FunctionSpace where for the sample point which |
299 |
closest to the given point x. |
300 |
|
301 |
@param where: function space |
302 |
@type where: L{escript.FunctionSpace} |
303 |
@param x: coefficient of the solution. |
304 |
@type x: L{numarray.NumArray} or C{list} of L{numarray.NumArray} |
305 |
""" |
306 |
if isinstance(where,escript.FunctionSpace): |
307 |
self.__function_space=where |
308 |
else: |
309 |
self.__function_space=escript.ContinuousFunction(where) |
310 |
if isinstance(x, list): |
311 |
self.__id=[] |
312 |
for p in x: |
313 |
self.__id.append(util.length(self.__function_space.getX()-p[:self.__function_space.getDim()]).minGlobalDataPoint()) |
314 |
else: |
315 |
self.__id=util.length(self.__function_space.getX()-x[:self.__function_space.getDim()]).minGlobalDataPoint() |
316 |
|
317 |
def __str__(self): |
318 |
""" |
319 |
Returns the coordinates of the Locator as a string. |
320 |
""" |
321 |
x=self.getX() |
322 |
if instance(x,list): |
323 |
out="[" |
324 |
first=True |
325 |
for xx in x: |
326 |
if not first: |
327 |
out+="," |
328 |
else: |
329 |
first=False |
330 |
out+=str(xx) |
331 |
out+="]>" |
332 |
else: |
333 |
out=str(x) |
334 |
return out |
335 |
|
336 |
def getX(self): |
337 |
""" |
338 |
Returns the exact coordinates of the Locator. |
339 |
""" |
340 |
return self(self.getFunctionSpace().getX()) |
341 |
|
342 |
def getFunctionSpace(self): |
343 |
""" |
344 |
Returns the function space of the Locator. |
345 |
""" |
346 |
return self.__function_space |
347 |
|
348 |
def getId(self,item=None): |
349 |
""" |
350 |
Returns the identifier of the location. |
351 |
""" |
352 |
if item == None: |
353 |
return self.__id |
354 |
else: |
355 |
if isinstance(self.__id,list): |
356 |
return self.__id[item] |
357 |
else: |
358 |
return self.__id |
359 |
|
360 |
|
361 |
def __call__(self,data): |
362 |
""" |
363 |
Returns the value of data at the Locator of a Data object otherwise |
364 |
the object is returned. |
365 |
""" |
366 |
return self.getValue(data) |
367 |
|
368 |
def getValue(self,data): |
369 |
""" |
370 |
Returns the value of data at the Locator if data is a Data object |
371 |
otherwise the object is returned. |
372 |
""" |
373 |
if isinstance(data,escript.Data): |
374 |
if data.getFunctionSpace()==self.getFunctionSpace(): |
375 |
dat=data |
376 |
else: |
377 |
dat=data.interpolate(self.getFunctionSpace()) |
378 |
id=self.getId() |
379 |
r=data.getRank() |
380 |
if isinstance(id,list): |
381 |
out=[] |
382 |
for i in id: |
383 |
o=data.getValueOfGlobalDataPoint(*i) |
384 |
if data.getRank()==0: |
385 |
out.append(o[0]) |
386 |
else: |
387 |
out.append(o) |
388 |
return out |
389 |
else: |
390 |
out=data.getValueOfGlobalDataPoint(*id) |
391 |
if data.getRank()==0: |
392 |
return out[0] |
393 |
else: |
394 |
return out |
395 |
else: |
396 |
return data |
397 |
|
398 |
class SaddlePointProblem(object): |
399 |
""" |
400 |
This implements a solver for a saddlepoint problem |
401 |
|
402 |
M{f(u,p)=0} |
403 |
M{g(u)=0} |
404 |
|
405 |
for u and p. The problem is solved with an inexact Uszawa scheme for p: |
406 |
|
407 |
M{Q_f (u^{k+1}-u^{k}) = - f(u^{k},p^{k})} |
408 |
M{Q_g (p^{k+1}-p^{k}) = g(u^{k+1})} |
409 |
|
410 |
where Q_f is an approximation of the Jacobiean A_f of f with respect to u and Q_f is an approximation of |
411 |
A_g A_f^{-1} A_g with A_g is the jacobiean of g with respect to p. As a the construction of a 'proper' |
412 |
Q_g can be difficult, non-linear conjugate gradient method is applied to solve for p, so Q_g plays |
413 |
in fact the role of a preconditioner. |
414 |
""" |
415 |
def __init__(self,verbose=False,*args): |
416 |
""" |
417 |
initializes the problem |
418 |
|
419 |
@param verbose: switches on the printing out some information |
420 |
@type verbose: C{bool} |
421 |
@note: this method may be overwritten by a particular saddle point problem |
422 |
""" |
423 |
if verbose: |
424 |
self.__verbose=True |
425 |
else: |
426 |
self.__verbose=False |
427 |
self.relaxation=1. |
428 |
|
429 |
def trace(self,text): |
430 |
""" |
431 |
prints text if verbose has been set |
432 |
|
433 |
@param text: a text message |
434 |
@type text: C{str} |
435 |
""" |
436 |
if self.__verbose: print "%s: %s"%(str(self),text) |
437 |
|
438 |
def solve_f(self,u,p,tol=1.e-8): |
439 |
""" |
440 |
solves |
441 |
|
442 |
A_f du = f(u,p) |
443 |
|
444 |
with tolerance C{tol} and return du. A_f is Jacobiean of f with respect to u. |
445 |
|
446 |
@param u: current approximation of u |
447 |
@type u: L{escript.Data} |
448 |
@param p: current approximation of p |
449 |
@type p: L{escript.Data} |
450 |
@param tol: tolerance expected for du |
451 |
@type tol: C{float} |
452 |
@return: increment du |
453 |
@rtype: L{escript.Data} |
454 |
@note: this method has to be overwritten by a particular saddle point problem |
455 |
""" |
456 |
pass |
457 |
|
458 |
def solve_g(self,u,tol=1.e-8): |
459 |
""" |
460 |
solves |
461 |
|
462 |
Q_g dp = g(u) |
463 |
|
464 |
with Q_g is a preconditioner for A_g A_f^{-1} A_g with A_g is the jacobiean of g with respect to p. |
465 |
|
466 |
@param u: current approximation of u |
467 |
@type u: L{escript.Data} |
468 |
@param tol: tolerance expected for dp |
469 |
@type tol: C{float} |
470 |
@return: increment dp |
471 |
@rtype: L{escript.Data} |
472 |
@note: this method has to be overwritten by a particular saddle point problem |
473 |
""" |
474 |
pass |
475 |
|
476 |
def inner(self,p0,p1): |
477 |
""" |
478 |
inner product of p0 and p1 approximating p. Typically this returns integrate(p0*p1) |
479 |
@return: inner product of p0 and p1 |
480 |
@rtype: C{float} |
481 |
""" |
482 |
pass |
483 |
|
484 |
subiter_max=3 |
485 |
def solve(self,u0,p0,tolerance=1.e-6,tolerance_u=None,iter_max=100,accepted_reduction=0.995,relaxation=None): |
486 |
""" |
487 |
runs the solver |
488 |
|
489 |
@param u0: initial guess for C{u} |
490 |
@type u0: L{esys.escript.Data} |
491 |
@param p0: initial guess for C{p} |
492 |
@type p0: L{esys.escript.Data} |
493 |
@param tolerance: tolerance for relative error in C{u} and C{p} |
494 |
@type tolerance: positive C{float} |
495 |
@param tolerance_u: tolerance for relative error in C{u} if different from C{tolerance} |
496 |
@type tolerance_u: positive C{float} |
497 |
@param iter_max: maximum number of iteration steps. |
498 |
@type iter_max: C{int} |
499 |
@param accepted_reduction: if the norm g cannot be reduced by C{accepted_reduction} backtracking to adapt the |
500 |
relaxation factor. If C{accepted_reduction=None} no backtracking is used. |
501 |
@type accepted_reduction: positive C{float} or C{None} |
502 |
@param relaxation: initial relaxation factor. If C{relaxation==None}, the last relaxation factor is used. |
503 |
@type relaxation: C{float} or C{None} |
504 |
""" |
505 |
tol=1.e-2 |
506 |
if tolerance_u==None: tolerance_u=tolerance |
507 |
if not relaxation==None: self.relaxation=relaxation |
508 |
if accepted_reduction ==None: |
509 |
angle_limit=0. |
510 |
elif accepted_reduction>=1.: |
511 |
angle_limit=0. |
512 |
else: |
513 |
angle_limit=util.sqrt(1-accepted_reduction**2) |
514 |
self.iter=0 |
515 |
u=u0 |
516 |
p=p0 |
517 |
# |
518 |
# initialize things: |
519 |
# |
520 |
converged=False |
521 |
# |
522 |
# start loop: |
523 |
# |
524 |
# initial search direction is g |
525 |
# |
526 |
while not converged : |
527 |
if self.iter>iter_max: |
528 |
raise ArithmeticError("no convergence after %s steps."%self.iter) |
529 |
f_new=self.solve_f(u,p,tol) |
530 |
norm_f_new = util.Lsup(f_new) |
531 |
u_new=u-f_new |
532 |
g_new=self.solve_g(u_new,tol) |
533 |
self.iter+=1 |
534 |
norm_g_new = util.sqrt(self.inner(g_new,g_new)) |
535 |
if norm_f_new==0. and norm_g_new==0.: return u, p |
536 |
if self.iter>1 and not accepted_reduction==None: |
537 |
# |
538 |
# did we manage to reduce the norm of G? I |
539 |
# if not we start a backtracking procedure |
540 |
# |
541 |
# print "new/old norm = ",norm_g_new, norm_g, norm_g_new/norm_g |
542 |
if norm_g_new > accepted_reduction * norm_g: |
543 |
sub_iter=0 |
544 |
s=self.relaxation |
545 |
d=g |
546 |
g_last=g |
547 |
self.trace(" start substepping: f = %s, g = %s, relaxation = %s."%(norm_f_new, norm_g_new, s)) |
548 |
while sub_iter < self.subiter_max and norm_g_new > accepted_reduction * norm_g: |
549 |
dg= g_new-g_last |
550 |
norm_dg=abs(util.sqrt(self.inner(dg,dg))/self.relaxation) |
551 |
rad=self.inner(g_new,dg)/self.relaxation |
552 |
# print " ",sub_iter,": rad, norm_dg:",abs(rad), norm_dg*norm_g_new * angle_limit |
553 |
# print " ",sub_iter,": rad, norm_dg:",rad, norm_dg, norm_g_new, norm_g |
554 |
if abs(rad) < norm_dg*norm_g_new * angle_limit: |
555 |
if sub_iter>0: self.trace(" no further improvements expected from backtracking.") |
556 |
break |
557 |
r=self.relaxation |
558 |
self.relaxation= - rad/norm_dg**2 |
559 |
s+=self.relaxation |
560 |
##### |
561 |
# a=g_new+self.relaxation*dg/r |
562 |
# print "predicted new norm = ",util.sqrt(self.inner(a,a)),util.sqrt(self.inner(g_new,g_new)), self.relaxation |
563 |
##### |
564 |
g_last=g_new |
565 |
p+=self.relaxation*d |
566 |
f_new=self.solve_f(u,p,tol) |
567 |
u_new=u-f_new |
568 |
g_new=self.solve_g(u_new,tol) |
569 |
self.iter+=1 |
570 |
norm_f_new = util.Lsup(f_new) |
571 |
norm_g_new = util.sqrt(self.inner(g_new,g_new)) |
572 |
# print " ",sub_iter," new g norm",norm_g_new |
573 |
self.trace(" %s th sub-step: f = %s, g = %s, relaxation = %s."%(sub_iter, norm_f_new, norm_g_new, s)) |
574 |
# |
575 |
# can we expect reduction of g? |
576 |
# |
577 |
# u_last=u_new |
578 |
sub_iter+=1 |
579 |
self.relaxation=s |
580 |
# |
581 |
# check for convergence: |
582 |
# |
583 |
norm_u_new = util.Lsup(u_new) |
584 |
p_new=p+self.relaxation*g_new |
585 |
norm_p_new = util.sqrt(self.inner(p_new,p_new)) |
586 |
self.trace("%s th step: f/u = %s, g/p = %s, relaxation = %s."%(self.iter,norm_f_new/norm_u_new, norm_g_new/norm_p_new, self.relaxation)) |
587 |
|
588 |
if self.iter>1: |
589 |
dg2=g_new-g |
590 |
df2=f_new-f |
591 |
norm_dg2=util.sqrt(self.inner(dg2,dg2)) |
592 |
norm_df2=util.Lsup(df2) |
593 |
# print norm_g_new, norm_g, norm_dg, norm_p, tolerance |
594 |
tol_eq_g=tolerance*norm_dg2/(norm_g*abs(self.relaxation))*norm_p_new |
595 |
tol_eq_f=tolerance_u*norm_df2/norm_f*norm_u_new |
596 |
if norm_g_new <= tol_eq_g and norm_f_new <= tol_eq_f: |
597 |
converged=True |
598 |
f, norm_f, u, norm_u, g, norm_g, p, norm_p = f_new, norm_f_new, u_new, norm_u_new, g_new, norm_g_new, p_new, norm_p_new |
599 |
self.trace("convergence after %s steps."%self.iter) |
600 |
return u,p |
601 |
# def solve(self,u0,p0,tolerance=1.e-6,iter_max=10,self.relaxation=1.): |
602 |
# tol=1.e-2 |
603 |
# iter=0 |
604 |
# converged=False |
605 |
# u=u0*1. |
606 |
# p=p0*1. |
607 |
# while not converged and iter<iter_max: |
608 |
# du=self.solve_f(u,p,tol) |
609 |
# u-=du |
610 |
# norm_du=util.Lsup(du) |
611 |
# norm_u=util.Lsup(u) |
612 |
# |
613 |
# dp=self.relaxation*self.solve_g(u,tol) |
614 |
# p+=dp |
615 |
# norm_dp=util.sqrt(self.inner(dp,dp)) |
616 |
# norm_p=util.sqrt(self.inner(p,p)) |
617 |
# print iter,"-th step rel. errror u,p= (%s,%s) (%s,%s)(%s,%s)"%(norm_du,norm_dp,norm_du/norm_u,norm_dp/norm_p,norm_u,norm_p) |
618 |
# iter+=1 |
619 |
# |
620 |
# converged = (norm_du <= tolerance*norm_u) and (norm_dp <= tolerance*norm_p) |
621 |
# if converged: |
622 |
# print "convergence after %s steps."%iter |
623 |
# else: |
624 |
# raise ArithmeticError("no convergence after %s steps."%iter) |
625 |
# |
626 |
# return u,p |
627 |
|
628 |
# vim: expandtab shiftwidth=4: |