1 |
# $Id$ |
2 |
|
3 |
""" |
4 |
Provides some tools related to PDEs. |
5 |
|
6 |
Currently includes: |
7 |
- Projector - to project a discontinuous |
8 |
- Locator - to trace values in data objects at a certain location |
9 |
- TimeIntegrationManager - to handel extraplotion in time |
10 |
- SaddlePointProblem - solver for Saddle point problems using the inexact uszawa scheme |
11 |
|
12 |
@var __author__: name of author |
13 |
@var __copyright__: copyrights |
14 |
@var __license__: licence agreement |
15 |
@var __url__: url entry point on documentation |
16 |
@var __version__: version |
17 |
@var __date__: date of the version |
18 |
""" |
19 |
|
20 |
__author__="Lutz Gross, l.gross@uq.edu.au" |
21 |
__copyright__=""" Copyright (c) 2006 by ACcESS MNRF |
22 |
http://www.access.edu.au |
23 |
Primary Business: Queensland, Australia""" |
24 |
__license__="""Licensed under the Open Software License version 3.0 |
25 |
http://www.opensource.org/licenses/osl-3.0.php""" |
26 |
__url__="http://www.iservo.edu.au/esys" |
27 |
__version__="$Revision$" |
28 |
__date__="$Date$" |
29 |
|
30 |
|
31 |
import escript |
32 |
import linearPDEs |
33 |
import numarray |
34 |
import util |
35 |
|
36 |
class TimeIntegrationManager: |
37 |
""" |
38 |
a simple mechanism to manage time dependend values. |
39 |
|
40 |
typical usage is:: |
41 |
|
42 |
dt=0.1 # time increment |
43 |
tm=TimeIntegrationManager(inital_value,p=1) |
44 |
while t<1. |
45 |
v_guess=tm.extrapolate(dt) # extrapolate to t+dt |
46 |
v=... |
47 |
tm.checkin(dt,v) |
48 |
t+=dt |
49 |
|
50 |
@note: currently only p=1 is supported. |
51 |
""" |
52 |
def __init__(self,*inital_values,**kwargs): |
53 |
""" |
54 |
sets up the value manager where inital_value is the initial value and p is order used for extrapolation |
55 |
""" |
56 |
if kwargs.has_key("p"): |
57 |
self.__p=kwargs["p"] |
58 |
else: |
59 |
self.__p=1 |
60 |
if kwargs.has_key("time"): |
61 |
self.__t=kwargs["time"] |
62 |
else: |
63 |
self.__t=0. |
64 |
self.__v_mem=[inital_values] |
65 |
self.__order=0 |
66 |
self.__dt_mem=[] |
67 |
self.__num_val=len(inital_values) |
68 |
|
69 |
def getTime(self): |
70 |
return self.__t |
71 |
def getValue(self): |
72 |
out=self.__v_mem[0] |
73 |
if len(out)==1: |
74 |
return out[0] |
75 |
else: |
76 |
return out |
77 |
|
78 |
def checkin(self,dt,*values): |
79 |
""" |
80 |
adds new values to the manager. the p+1 last value get lost |
81 |
""" |
82 |
o=min(self.__order+1,self.__p) |
83 |
self.__order=min(self.__order+1,self.__p) |
84 |
v_mem_new=[values] |
85 |
dt_mem_new=[dt] |
86 |
for i in range(o-1): |
87 |
v_mem_new.append(self.__v_mem[i]) |
88 |
dt_mem_new.append(self.__dt_mem[i]) |
89 |
v_mem_new.append(self.__v_mem[o-1]) |
90 |
self.__order=o |
91 |
self.__v_mem=v_mem_new |
92 |
self.__dt_mem=dt_mem_new |
93 |
self.__t+=dt |
94 |
|
95 |
def extrapolate(self,dt): |
96 |
""" |
97 |
extrapolates to dt forward in time. |
98 |
""" |
99 |
if self.__order==0: |
100 |
out=self.__v_mem[0] |
101 |
else: |
102 |
out=[] |
103 |
for i in range(self.__num_val): |
104 |
out.append((1.+dt/self.__dt_mem[0])*self.__v_mem[0][i]-dt/self.__dt_mem[0]*self.__v_mem[1][i]) |
105 |
|
106 |
if len(out)==0: |
107 |
return None |
108 |
elif len(out)==1: |
109 |
return out[0] |
110 |
else: |
111 |
return out |
112 |
|
113 |
|
114 |
class Projector: |
115 |
""" |
116 |
The Projector is a factory which projects a discontiuous function onto a |
117 |
continuous function on the a given domain. |
118 |
""" |
119 |
def __init__(self, domain, reduce = True, fast=True): |
120 |
""" |
121 |
Create a continuous function space projector for a domain. |
122 |
|
123 |
@param domain: Domain of the projection. |
124 |
@param reduce: Flag to reduce projection order (default is True) |
125 |
@param fast: Flag to use a fast method based on matrix lumping (default is true) |
126 |
""" |
127 |
self.__pde = linearPDEs.LinearPDE(domain) |
128 |
if fast: |
129 |
self.__pde.setSolverMethod(linearPDEs.LinearPDE.LUMPING) |
130 |
self.__pde.setSymmetryOn() |
131 |
self.__pde.setReducedOrderTo(reduce) |
132 |
self.__pde.setValue(D = 1.) |
133 |
return |
134 |
|
135 |
def __del__(self): |
136 |
return |
137 |
|
138 |
def __call__(self, input_data): |
139 |
""" |
140 |
Projects input_data onto a continuous function |
141 |
|
142 |
@param input_data: The input_data to be projected. |
143 |
""" |
144 |
out=escript.Data(0.,input_data.getShape(),self.__pde.getFunctionSpaceForSolution()) |
145 |
if input_data.getRank()==0: |
146 |
self.__pde.setValue(Y = input_data) |
147 |
out=self.__pde.getSolution() |
148 |
elif input_data.getRank()==1: |
149 |
for i0 in range(input_data.getShape()[0]): |
150 |
self.__pde.setValue(Y = input_data[i0]) |
151 |
out[i0]=self.__pde.getSolution() |
152 |
elif input_data.getRank()==2: |
153 |
for i0 in range(input_data.getShape()[0]): |
154 |
for i1 in range(input_data.getShape()[1]): |
155 |
self.__pde.setValue(Y = input_data[i0,i1]) |
156 |
out[i0,i1]=self.__pde.getSolution() |
157 |
elif input_data.getRank()==3: |
158 |
for i0 in range(input_data.getShape()[0]): |
159 |
for i1 in range(input_data.getShape()[1]): |
160 |
for i2 in range(input_data.getShape()[2]): |
161 |
self.__pde.setValue(Y = input_data[i0,i1,i2]) |
162 |
out[i0,i1,i2]=self.__pde.getSolution() |
163 |
else: |
164 |
for i0 in range(input_data.getShape()[0]): |
165 |
for i1 in range(input_data.getShape()[1]): |
166 |
for i2 in range(input_data.getShape()[2]): |
167 |
for i3 in range(input_data.getShape()[3]): |
168 |
self.__pde.setValue(Y = input_data[i0,i1,i2,i3]) |
169 |
out[i0,i1,i2,i3]=self.__pde.getSolution() |
170 |
return out |
171 |
|
172 |
class NoPDE: |
173 |
""" |
174 |
solves the following problem for u: |
175 |
|
176 |
M{kronecker[i,j]*D[j]*u[j]=Y[i]} |
177 |
|
178 |
with constraint |
179 |
|
180 |
M{u[j]=r[j]} where M{q[j]>0} |
181 |
|
182 |
where D, Y, r and q are given functions of rank 1. |
183 |
|
184 |
In the case of scalars this takes the form |
185 |
|
186 |
M{D*u=Y} |
187 |
|
188 |
with constraint |
189 |
|
190 |
M{u=r} where M{q>0} |
191 |
|
192 |
where D, Y, r and q are given scalar functions. |
193 |
|
194 |
The constraint is overwriting any other condition. |
195 |
|
196 |
@note: This class is similar to the L{linearPDEs.LinearPDE} class with A=B=C=X=0 but has the intention |
197 |
that all input parameter are given in L{Solution} or L{ReducedSolution}. The whole |
198 |
thing is a bit strange and I blame Robert.Woodcock@csiro.au for this. |
199 |
""" |
200 |
def __init__(self,domain,D=None,Y=None,q=None,r=None): |
201 |
""" |
202 |
initialize the problem |
203 |
|
204 |
@param domain: domain of the PDE. |
205 |
@type domain: L{Domain} |
206 |
@param D: coefficient of the solution. |
207 |
@type D: C{float}, C{int}, L{numarray.NumArray}, L{Data} |
208 |
@param Y: right hand side |
209 |
@type Y: C{float}, C{int}, L{numarray.NumArray}, L{Data} |
210 |
@param q: location of constraints |
211 |
@type q: C{float}, C{int}, L{numarray.NumArray}, L{Data} |
212 |
@param r: value of solution at locations of constraints |
213 |
@type r: C{float}, C{int}, L{numarray.NumArray}, L{Data} |
214 |
""" |
215 |
self.__domain=domain |
216 |
self.__D=D |
217 |
self.__Y=Y |
218 |
self.__q=q |
219 |
self.__r=r |
220 |
self.__u=None |
221 |
self.__function_space=escript.Solution(self.__domain) |
222 |
def setReducedOn(self): |
223 |
""" |
224 |
sets the L{FunctionSpace} of the solution to L{ReducedSolution} |
225 |
""" |
226 |
self.__function_space=escript.ReducedSolution(self.__domain) |
227 |
self.__u=None |
228 |
|
229 |
def setReducedOff(self): |
230 |
""" |
231 |
sets the L{FunctionSpace} of the solution to L{Solution} |
232 |
""" |
233 |
self.__function_space=escript.Solution(self.__domain) |
234 |
self.__u=None |
235 |
|
236 |
def setValue(self,D=None,Y=None,q=None,r=None): |
237 |
""" |
238 |
assigns values to the parameters. |
239 |
|
240 |
@param D: coefficient of the solution. |
241 |
@type D: C{float}, C{int}, L{numarray.NumArray}, L{Data} |
242 |
@param Y: right hand side |
243 |
@type Y: C{float}, C{int}, L{numarray.NumArray}, L{Data} |
244 |
@param q: location of constraints |
245 |
@type q: C{float}, C{int}, L{numarray.NumArray}, L{Data} |
246 |
@param r: value of solution at locations of constraints |
247 |
@type r: C{float}, C{int}, L{numarray.NumArray}, L{Data} |
248 |
""" |
249 |
if not D==None: |
250 |
self.__D=D |
251 |
self.__u=None |
252 |
if not Y==None: |
253 |
self.__Y=Y |
254 |
self.__u=None |
255 |
if not q==None: |
256 |
self.__q=q |
257 |
self.__u=None |
258 |
if not r==None: |
259 |
self.__r=r |
260 |
self.__u=None |
261 |
|
262 |
def getSolution(self): |
263 |
""" |
264 |
returns the solution |
265 |
|
266 |
@return: the solution of the problem |
267 |
@rtype: L{Data} object in the L{FunctionSpace} L{Solution} or L{ReducedSolution}. |
268 |
""" |
269 |
if self.__u==None: |
270 |
if self.__D==None: |
271 |
raise ValueError,"coefficient D is undefined" |
272 |
D=escript.Data(self.__D,self.__function_space) |
273 |
if D.getRank()>1: |
274 |
raise ValueError,"coefficient D must have rank 0 or 1" |
275 |
if self.__Y==None: |
276 |
self.__u=escript.Data(0.,D.getShape(),self.__function_space) |
277 |
else: |
278 |
self.__u=util.quotient(self.__Y,D) |
279 |
if not self.__q==None: |
280 |
q=util.wherePositive(escript.Data(self.__q,self.__function_space)) |
281 |
self.__u*=(1.-q) |
282 |
if not self.__r==None: self.__u+=q*self.__r |
283 |
return self.__u |
284 |
|
285 |
class Locator: |
286 |
""" |
287 |
Locator provides access to the values of data objects at a given |
288 |
spatial coordinate x. |
289 |
|
290 |
In fact, a Locator object finds the sample in the set of samples of a |
291 |
given function space or domain where which is closest to the given |
292 |
point x. |
293 |
""" |
294 |
|
295 |
def __init__(self,where,x=numarray.zeros((3,))): |
296 |
""" |
297 |
Initializes a Locator to access values in Data objects on the Doamin |
298 |
or FunctionSpace where for the sample point which |
299 |
closest to the given point x. |
300 |
""" |
301 |
if isinstance(where,escript.FunctionSpace): |
302 |
self.__function_space=where |
303 |
else: |
304 |
self.__function_space=escript.ContinuousFunction(where) |
305 |
self.__id=util.length(self.__function_space.getX()-x[:self.__function_space.getDim()]).mindp() |
306 |
|
307 |
def __str__(self): |
308 |
""" |
309 |
Returns the coordinates of the Locator as a string. |
310 |
""" |
311 |
return "<Locator %s>"%str(self.getX()) |
312 |
|
313 |
def getFunctionSpace(self): |
314 |
""" |
315 |
Returns the function space of the Locator. |
316 |
""" |
317 |
return self.__function_space |
318 |
|
319 |
def getId(self): |
320 |
""" |
321 |
Returns the identifier of the location. |
322 |
""" |
323 |
return self.__id |
324 |
|
325 |
def getX(self): |
326 |
""" |
327 |
Returns the exact coordinates of the Locator. |
328 |
""" |
329 |
return self(self.getFunctionSpace().getX()) |
330 |
|
331 |
def __call__(self,data): |
332 |
""" |
333 |
Returns the value of data at the Locator of a Data object otherwise |
334 |
the object is returned. |
335 |
""" |
336 |
return self.getValue(data) |
337 |
|
338 |
def getValue(self,data): |
339 |
""" |
340 |
Returns the value of data at the Locator if data is a Data object |
341 |
otherwise the object is returned. |
342 |
""" |
343 |
if isinstance(data,escript.Data): |
344 |
if data.getFunctionSpace()==self.getFunctionSpace(): |
345 |
#out=data.convertToNumArrayFromDPNo(self.getId()[0],self.getId()[1]) |
346 |
out=data.convertToNumArrayFromDPNo(self.getId()[0],self.getId()[1],self.getId()[2]) |
347 |
else: |
348 |
#out=data.interpolate(self.getFunctionSpace()).convertToNumArrayFromDPNo(self.getId()[0],self.getId()[1]) |
349 |
out=data.interpolate(self.getFunctionSpace()).convertToNumArrayFromDPNo(self.getId()[0],self.getId()[1],self.getId()[2]) |
350 |
if data.getRank()==0: |
351 |
return out[0] |
352 |
else: |
353 |
return out |
354 |
else: |
355 |
return data |
356 |
|
357 |
class SaddlePointProblem(object): |
358 |
""" |
359 |
This implements a solver for a saddlepoint problem |
360 |
|
361 |
M{f(u,p)=0} |
362 |
M{g(u)=0} |
363 |
|
364 |
for u and p. The problem is solved with an inexact Uszawa scheme for p: |
365 |
|
366 |
M{Q_f (u^{k+1}-u^{k}) = - f(u^{k},p^{k}) |
367 |
M{Q_g (p^{k+1}-p^{k}) = g(u^{k+1})} |
368 |
|
369 |
where Q_f is an approximation of the Jacobiean A_f of f with respect to u and Q_f is an approximation of |
370 |
A_g A_f^{-1} A_g with A_g is the jacobiean of g with respect to p. As a the construction of a 'proper' |
371 |
Q_g can be difficult, non-linear conjugate gradient method is applied to solve for p, so Q_g plays |
372 |
in fact the role of a preconditioner. |
373 |
""" |
374 |
def __init__(self,verbose=False,*args): |
375 |
""" |
376 |
initializes the problem |
377 |
|
378 |
@parm verbose: switches on the printing out some information |
379 |
@type verbose: C{bool} |
380 |
@note: this method may be overwritten by a particular saddle point problem |
381 |
""" |
382 |
self.__verbose=verbose |
383 |
self.relaxation=1. |
384 |
|
385 |
def trace(self,text): |
386 |
""" |
387 |
prints text if verbose has been set |
388 |
|
389 |
@parm text: a text message |
390 |
@type text: C{str} |
391 |
""" |
392 |
if self.__verbose: print "%s: %s"%(str(self),text) |
393 |
|
394 |
def solve_f(self,u,p,tol=1.e-8): |
395 |
""" |
396 |
solves |
397 |
|
398 |
A_f du = f(u,p) |
399 |
|
400 |
with tolerance C{tol} and return du. A_f is Jacobiean of f with respect to u. |
401 |
|
402 |
@param u: current approximation of u |
403 |
@type u: L{escript.Data} |
404 |
@param p: current approximation of p |
405 |
@type p: L{escript.Data} |
406 |
@param tol: tolerance expected for du |
407 |
@type tol: C{float} |
408 |
@return: increment du |
409 |
@rtype: L{escript.Data} |
410 |
@note: this method has to be overwritten by a particular saddle point problem |
411 |
""" |
412 |
pass |
413 |
|
414 |
def solve_g(self,u,tol=1.e-8): |
415 |
""" |
416 |
solves |
417 |
|
418 |
Q_g dp = g(u) |
419 |
|
420 |
with Q_g is a preconditioner for A_g A_f^{-1} A_g with A_g is the jacobiean of g with respect to p. |
421 |
|
422 |
@param u: current approximation of u |
423 |
@type u: L{escript.Data} |
424 |
@param tol: tolerance expected for dp |
425 |
@type tol: C{float} |
426 |
@return: increment dp |
427 |
@rtype: L{escript.Data} |
428 |
@note: this method has to be overwritten by a particular saddle point problem |
429 |
""" |
430 |
pass |
431 |
|
432 |
def inner(self,p0,p1): |
433 |
""" |
434 |
inner product of p0 and p1 approximating p. Typically this returns integrate(p0*p1) |
435 |
@return: inner product of p0 and p1 |
436 |
@rtype: C{float} |
437 |
""" |
438 |
pass |
439 |
|
440 |
subiter_max=3 |
441 |
def solve(self,u0,p0,tolerance=1.e-6,tolerance_u=None,iter_max=100,accepted_reduction=0.995,relaxation=None): |
442 |
""" |
443 |
runs the solver |
444 |
|
445 |
@param u0: initial guess for C{u} |
446 |
@type u0: L{esys.escript.Data} |
447 |
@param p0: initial guess for C{p} |
448 |
@type p0: L{esys.escript.Data} |
449 |
@param tolerance: tolerance for relative error in C{u} and C{p} |
450 |
@type tolerance: positive C{float} |
451 |
@param tolerance_u: tolerance for relative error in C{u} if different from C{tolerance} |
452 |
@type tolerance_u: positive C{float} |
453 |
@param iter_max: maximum number of iteration steps. |
454 |
@type iter_max: C{int} |
455 |
@param accepted_reduction: if the norm g cannot be reduced by C{accepted_reduction} backtracking to adapt the |
456 |
relaxation factor. If C{accepted_reduction=None} no backtracking is used. |
457 |
@type accepted_reduction: positive C{float} or C{None} |
458 |
@param relaxation: initial relaxation factor. If C{relaxation==None}, the last relaxation factor is used. |
459 |
@type relaxation: C{float} or C{None} |
460 |
""" |
461 |
tol=1.e-2 |
462 |
if tolerance_u==None: tolerance_u=tolerance |
463 |
if not relaxation==None: self.relaxation=relaxation |
464 |
if accepted_reduction ==None: |
465 |
angle_limit=0. |
466 |
elif accepted_reduction>=1.: |
467 |
angle_limit=0. |
468 |
else: |
469 |
angle_limit=util.sqrt(1-accepted_reduction**2) |
470 |
self.iter=0 |
471 |
u=u0 |
472 |
p=p0 |
473 |
# |
474 |
# initialize things: |
475 |
# |
476 |
converged=False |
477 |
# |
478 |
# start loop: |
479 |
# |
480 |
# initial search direction is g |
481 |
# |
482 |
while not converged : |
483 |
if self.iter>iter_max: |
484 |
raise ArithmeticError("no convergence after %s steps."%self.iter) |
485 |
f_new=self.solve_f(u,p,tol) |
486 |
norm_f_new = util.Lsup(f_new) |
487 |
u_new=u-f_new |
488 |
g_new=self.solve_g(u_new,tol) |
489 |
self.iter+=1 |
490 |
norm_g_new = util.sqrt(self.inner(g_new,g_new)) |
491 |
if norm_f_new==0. and norm_g_new==0.: return u, p |
492 |
if self.iter>1 and not accepted_reduction==None: |
493 |
# |
494 |
# did we manage to reduce the norm of G? I |
495 |
# if not we start a backtracking procedure |
496 |
# |
497 |
# print "new/old norm = ",norm_g_new, norm_g, norm_g_new/norm_g |
498 |
if norm_g_new > accepted_reduction * norm_g: |
499 |
sub_iter=0 |
500 |
s=self.relaxation |
501 |
d=g |
502 |
g_last=g |
503 |
self.trace(" start substepping: f = %s, g = %s, relaxation = %s."%(norm_f_new, norm_g_new, s)) |
504 |
while sub_iter < self.subiter_max and norm_g_new > accepted_reduction * norm_g: |
505 |
dg= g_new-g_last |
506 |
norm_dg=abs(util.sqrt(self.inner(dg,dg))/self.relaxation) |
507 |
rad=self.inner(g_new,dg)/self.relaxation |
508 |
# print " ",sub_iter,": rad, norm_dg:",abs(rad), norm_dg*norm_g_new * angle_limit |
509 |
# print " ",sub_iter,": rad, norm_dg:",rad, norm_dg, norm_g_new, norm_g |
510 |
if abs(rad) < norm_dg*norm_g_new * angle_limit: |
511 |
if sub_iter>0: self.trace(" no further improvements expected from backtracking.") |
512 |
break |
513 |
r=self.relaxation |
514 |
self.relaxation= - rad/norm_dg**2 |
515 |
s+=self.relaxation |
516 |
##### |
517 |
# a=g_new+self.relaxation*dg/r |
518 |
# print "predicted new norm = ",util.sqrt(self.inner(a,a)),util.sqrt(self.inner(g_new,g_new)), self.relaxation |
519 |
##### |
520 |
g_last=g_new |
521 |
p+=self.relaxation*d |
522 |
f_new=self.solve_f(u,p,tol) |
523 |
u_new=u-f_new |
524 |
g_new=self.solve_g(u_new,tol) |
525 |
self.iter+=1 |
526 |
norm_f_new = util.Lsup(f_new) |
527 |
norm_g_new = util.sqrt(self.inner(g_new,g_new)) |
528 |
# print " ",sub_iter," new g norm",norm_g_new |
529 |
self.trace(" %s th sub-step: f = %s, g = %s, relaxation = %s."%(sub_iter, norm_f_new, norm_g_new, s)) |
530 |
# |
531 |
# can we expect reduction of g? |
532 |
# |
533 |
# u_last=u_new |
534 |
sub_iter+=1 |
535 |
self.relaxation=s |
536 |
# |
537 |
# check for convergence: |
538 |
# |
539 |
norm_u_new = util.Lsup(u_new) |
540 |
p_new=p+self.relaxation*g_new |
541 |
norm_p_new = util.sqrt(self.inner(p_new,p_new)) |
542 |
self.trace("%s th step: f/u = %s, g/p = %s, relaxation = %s."%(self.iter,norm_f_new/norm_u_new, norm_g_new/norm_p_new, self.relaxation)) |
543 |
|
544 |
if self.iter>1: |
545 |
dg2=g_new-g |
546 |
df2=f_new-f |
547 |
norm_dg2=util.sqrt(self.inner(dg2,dg2)) |
548 |
norm_df2=util.Lsup(df2) |
549 |
# print norm_g_new, norm_g, norm_dg, norm_p, tolerance |
550 |
tol_eq_g=tolerance*norm_dg2/(norm_g*abs(self.relaxation))*norm_p_new |
551 |
tol_eq_f=tolerance_u*norm_df2/norm_f*norm_u_new |
552 |
if norm_g_new <= tol_eq_g and norm_f_new <= tol_eq_f: |
553 |
converged=True |
554 |
break |
555 |
f, norm_f, u, norm_u, g, norm_g, p, norm_p = f_new, norm_f_new, u_new, norm_u_new, g_new, norm_g_new, p_new, norm_p_new |
556 |
self.trace("convergence after %s steps."%self.iter) |
557 |
return u,p |
558 |
# def solve(self,u0,p0,tolerance=1.e-6,iter_max=10,self.relaxation=1.): |
559 |
# tol=1.e-2 |
560 |
# iter=0 |
561 |
# converged=False |
562 |
# u=u0*1. |
563 |
# p=p0*1. |
564 |
# while not converged and iter<iter_max: |
565 |
# du=self.solve_f(u,p,tol) |
566 |
# u-=du |
567 |
# norm_du=util.Lsup(du) |
568 |
# norm_u=util.Lsup(u) |
569 |
# |
570 |
# dp=self.relaxation*self.solve_g(u,tol) |
571 |
# p+=dp |
572 |
# norm_dp=util.sqrt(self.inner(dp,dp)) |
573 |
# norm_p=util.sqrt(self.inner(p,p)) |
574 |
# print iter,"-th step rel. errror u,p= (%s,%s) (%s,%s)(%s,%s)"%(norm_du,norm_dp,norm_du/norm_u,norm_dp/norm_p,norm_u,norm_p) |
575 |
# iter+=1 |
576 |
# |
577 |
# converged = (norm_du <= tolerance*norm_u) and (norm_dp <= tolerance*norm_p) |
578 |
# if converged: |
579 |
# print "convergence after %s steps."%iter |
580 |
# else: |
581 |
# raise ArithmeticError("no convergence after %s steps."%iter) |
582 |
# |
583 |
# return u,p |
584 |
|
585 |
# vim: expandtab shiftwidth=4: |