1 |
# |
2 |
# $Id$ |
3 |
# |
4 |
####################################################### |
5 |
# |
6 |
# Copyright 2003-2007 by ACceSS MNRF |
7 |
# Copyright 2007 by University of Queensland |
8 |
# |
9 |
# http://esscc.uq.edu.au |
10 |
# Primary Business: Queensland, Australia |
11 |
# Licensed under the Open Software License version 3.0 |
12 |
# http://www.opensource.org/licenses/osl-3.0.php |
13 |
# |
14 |
####################################################### |
15 |
# |
16 |
|
17 |
""" |
18 |
Provides some tools related to PDEs. |
19 |
|
20 |
Currently includes: |
21 |
- Projector - to project a discontinuous |
22 |
- Locator - to trace values in data objects at a certain location |
23 |
- TimeIntegrationManager - to handel extraplotion in time |
24 |
- SaddlePointProblem - solver for Saddle point problems using the inexact uszawa scheme |
25 |
|
26 |
@var __author__: name of author |
27 |
@var __copyright__: copyrights |
28 |
@var __license__: licence agreement |
29 |
@var __url__: url entry point on documentation |
30 |
@var __version__: version |
31 |
@var __date__: date of the version |
32 |
""" |
33 |
|
34 |
__author__="Lutz Gross, l.gross@uq.edu.au" |
35 |
__copyright__=""" Copyright (c) 2006 by ACcESS MNRF |
36 |
http://www.access.edu.au |
37 |
Primary Business: Queensland, Australia""" |
38 |
__license__="""Licensed under the Open Software License version 3.0 |
39 |
http://www.opensource.org/licenses/osl-3.0.php""" |
40 |
__url__="http://www.iservo.edu.au/esys" |
41 |
__version__="$Revision$" |
42 |
__date__="$Date$" |
43 |
|
44 |
|
45 |
import escript |
46 |
import linearPDEs |
47 |
import numarray |
48 |
import util |
49 |
import math |
50 |
|
51 |
##### Added by Artak |
52 |
# from Numeric import zeros,Int,Float64 |
53 |
################################### |
54 |
|
55 |
|
56 |
class TimeIntegrationManager: |
57 |
""" |
58 |
a simple mechanism to manage time dependend values. |
59 |
|
60 |
typical usage is:: |
61 |
|
62 |
dt=0.1 # time increment |
63 |
tm=TimeIntegrationManager(inital_value,p=1) |
64 |
while t<1. |
65 |
v_guess=tm.extrapolate(dt) # extrapolate to t+dt |
66 |
v=... |
67 |
tm.checkin(dt,v) |
68 |
t+=dt |
69 |
|
70 |
@note: currently only p=1 is supported. |
71 |
""" |
72 |
def __init__(self,*inital_values,**kwargs): |
73 |
""" |
74 |
sets up the value manager where inital_value is the initial value and p is order used for extrapolation |
75 |
""" |
76 |
if kwargs.has_key("p"): |
77 |
self.__p=kwargs["p"] |
78 |
else: |
79 |
self.__p=1 |
80 |
if kwargs.has_key("time"): |
81 |
self.__t=kwargs["time"] |
82 |
else: |
83 |
self.__t=0. |
84 |
self.__v_mem=[inital_values] |
85 |
self.__order=0 |
86 |
self.__dt_mem=[] |
87 |
self.__num_val=len(inital_values) |
88 |
|
89 |
def getTime(self): |
90 |
return self.__t |
91 |
def getValue(self): |
92 |
out=self.__v_mem[0] |
93 |
if len(out)==1: |
94 |
return out[0] |
95 |
else: |
96 |
return out |
97 |
|
98 |
def checkin(self,dt,*values): |
99 |
""" |
100 |
adds new values to the manager. the p+1 last value get lost |
101 |
""" |
102 |
o=min(self.__order+1,self.__p) |
103 |
self.__order=min(self.__order+1,self.__p) |
104 |
v_mem_new=[values] |
105 |
dt_mem_new=[dt] |
106 |
for i in range(o-1): |
107 |
v_mem_new.append(self.__v_mem[i]) |
108 |
dt_mem_new.append(self.__dt_mem[i]) |
109 |
v_mem_new.append(self.__v_mem[o-1]) |
110 |
self.__order=o |
111 |
self.__v_mem=v_mem_new |
112 |
self.__dt_mem=dt_mem_new |
113 |
self.__t+=dt |
114 |
|
115 |
def extrapolate(self,dt): |
116 |
""" |
117 |
extrapolates to dt forward in time. |
118 |
""" |
119 |
if self.__order==0: |
120 |
out=self.__v_mem[0] |
121 |
else: |
122 |
out=[] |
123 |
for i in range(self.__num_val): |
124 |
out.append((1.+dt/self.__dt_mem[0])*self.__v_mem[0][i]-dt/self.__dt_mem[0]*self.__v_mem[1][i]) |
125 |
|
126 |
if len(out)==0: |
127 |
return None |
128 |
elif len(out)==1: |
129 |
return out[0] |
130 |
else: |
131 |
return out |
132 |
|
133 |
|
134 |
class Projector: |
135 |
""" |
136 |
The Projector is a factory which projects a discontiuous function onto a |
137 |
continuous function on the a given domain. |
138 |
""" |
139 |
def __init__(self, domain, reduce = True, fast=True): |
140 |
""" |
141 |
Create a continuous function space projector for a domain. |
142 |
|
143 |
@param domain: Domain of the projection. |
144 |
@param reduce: Flag to reduce projection order (default is True) |
145 |
@param fast: Flag to use a fast method based on matrix lumping (default is true) |
146 |
""" |
147 |
self.__pde = linearPDEs.LinearPDE(domain) |
148 |
if fast: |
149 |
self.__pde.setSolverMethod(linearPDEs.LinearPDE.LUMPING) |
150 |
self.__pde.setSymmetryOn() |
151 |
self.__pde.setReducedOrderTo(reduce) |
152 |
self.__pde.setValue(D = 1.) |
153 |
return |
154 |
|
155 |
def __call__(self, input_data): |
156 |
""" |
157 |
Projects input_data onto a continuous function |
158 |
|
159 |
@param input_data: The input_data to be projected. |
160 |
""" |
161 |
out=escript.Data(0.,input_data.getShape(),self.__pde.getFunctionSpaceForSolution()) |
162 |
self.__pde.setValue(Y = escript.Data(), Y_reduced = escript.Data()) |
163 |
if input_data.getRank()==0: |
164 |
self.__pde.setValue(Y = input_data) |
165 |
out=self.__pde.getSolution() |
166 |
elif input_data.getRank()==1: |
167 |
for i0 in range(input_data.getShape()[0]): |
168 |
self.__pde.setValue(Y = input_data[i0]) |
169 |
out[i0]=self.__pde.getSolution() |
170 |
elif input_data.getRank()==2: |
171 |
for i0 in range(input_data.getShape()[0]): |
172 |
for i1 in range(input_data.getShape()[1]): |
173 |
self.__pde.setValue(Y = input_data[i0,i1]) |
174 |
out[i0,i1]=self.__pde.getSolution() |
175 |
elif input_data.getRank()==3: |
176 |
for i0 in range(input_data.getShape()[0]): |
177 |
for i1 in range(input_data.getShape()[1]): |
178 |
for i2 in range(input_data.getShape()[2]): |
179 |
self.__pde.setValue(Y = input_data[i0,i1,i2]) |
180 |
out[i0,i1,i2]=self.__pde.getSolution() |
181 |
else: |
182 |
for i0 in range(input_data.getShape()[0]): |
183 |
for i1 in range(input_data.getShape()[1]): |
184 |
for i2 in range(input_data.getShape()[2]): |
185 |
for i3 in range(input_data.getShape()[3]): |
186 |
self.__pde.setValue(Y = input_data[i0,i1,i2,i3]) |
187 |
out[i0,i1,i2,i3]=self.__pde.getSolution() |
188 |
return out |
189 |
|
190 |
class NoPDE: |
191 |
""" |
192 |
solves the following problem for u: |
193 |
|
194 |
M{kronecker[i,j]*D[j]*u[j]=Y[i]} |
195 |
|
196 |
with constraint |
197 |
|
198 |
M{u[j]=r[j]} where M{q[j]>0} |
199 |
|
200 |
where D, Y, r and q are given functions of rank 1. |
201 |
|
202 |
In the case of scalars this takes the form |
203 |
|
204 |
M{D*u=Y} |
205 |
|
206 |
with constraint |
207 |
|
208 |
M{u=r} where M{q>0} |
209 |
|
210 |
where D, Y, r and q are given scalar functions. |
211 |
|
212 |
The constraint is overwriting any other condition. |
213 |
|
214 |
@note: This class is similar to the L{linearPDEs.LinearPDE} class with A=B=C=X=0 but has the intention |
215 |
that all input parameter are given in L{Solution} or L{ReducedSolution}. The whole |
216 |
thing is a bit strange and I blame Robert.Woodcock@csiro.au for this. |
217 |
""" |
218 |
def __init__(self,domain,D=None,Y=None,q=None,r=None): |
219 |
""" |
220 |
initialize the problem |
221 |
|
222 |
@param domain: domain of the PDE. |
223 |
@type domain: L{Domain} |
224 |
@param D: coefficient of the solution. |
225 |
@type D: C{float}, C{int}, L{numarray.NumArray}, L{Data} |
226 |
@param Y: right hand side |
227 |
@type Y: C{float}, C{int}, L{numarray.NumArray}, L{Data} |
228 |
@param q: location of constraints |
229 |
@type q: C{float}, C{int}, L{numarray.NumArray}, L{Data} |
230 |
@param r: value of solution at locations of constraints |
231 |
@type r: C{float}, C{int}, L{numarray.NumArray}, L{Data} |
232 |
""" |
233 |
self.__domain=domain |
234 |
self.__D=D |
235 |
self.__Y=Y |
236 |
self.__q=q |
237 |
self.__r=r |
238 |
self.__u=None |
239 |
self.__function_space=escript.Solution(self.__domain) |
240 |
def setReducedOn(self): |
241 |
""" |
242 |
sets the L{FunctionSpace} of the solution to L{ReducedSolution} |
243 |
""" |
244 |
self.__function_space=escript.ReducedSolution(self.__domain) |
245 |
self.__u=None |
246 |
|
247 |
def setReducedOff(self): |
248 |
""" |
249 |
sets the L{FunctionSpace} of the solution to L{Solution} |
250 |
""" |
251 |
self.__function_space=escript.Solution(self.__domain) |
252 |
self.__u=None |
253 |
|
254 |
def setValue(self,D=None,Y=None,q=None,r=None): |
255 |
""" |
256 |
assigns values to the parameters. |
257 |
|
258 |
@param D: coefficient of the solution. |
259 |
@type D: C{float}, C{int}, L{numarray.NumArray}, L{Data} |
260 |
@param Y: right hand side |
261 |
@type Y: C{float}, C{int}, L{numarray.NumArray}, L{Data} |
262 |
@param q: location of constraints |
263 |
@type q: C{float}, C{int}, L{numarray.NumArray}, L{Data} |
264 |
@param r: value of solution at locations of constraints |
265 |
@type r: C{float}, C{int}, L{numarray.NumArray}, L{Data} |
266 |
""" |
267 |
if not D==None: |
268 |
self.__D=D |
269 |
self.__u=None |
270 |
if not Y==None: |
271 |
self.__Y=Y |
272 |
self.__u=None |
273 |
if not q==None: |
274 |
self.__q=q |
275 |
self.__u=None |
276 |
if not r==None: |
277 |
self.__r=r |
278 |
self.__u=None |
279 |
|
280 |
def getSolution(self): |
281 |
""" |
282 |
returns the solution |
283 |
|
284 |
@return: the solution of the problem |
285 |
@rtype: L{Data} object in the L{FunctionSpace} L{Solution} or L{ReducedSolution}. |
286 |
""" |
287 |
if self.__u==None: |
288 |
if self.__D==None: |
289 |
raise ValueError,"coefficient D is undefined" |
290 |
D=escript.Data(self.__D,self.__function_space) |
291 |
if D.getRank()>1: |
292 |
raise ValueError,"coefficient D must have rank 0 or 1" |
293 |
if self.__Y==None: |
294 |
self.__u=escript.Data(0.,D.getShape(),self.__function_space) |
295 |
else: |
296 |
self.__u=util.quotient(self.__Y,D) |
297 |
if not self.__q==None: |
298 |
q=util.wherePositive(escript.Data(self.__q,self.__function_space)) |
299 |
self.__u*=(1.-q) |
300 |
if not self.__r==None: self.__u+=q*self.__r |
301 |
return self.__u |
302 |
|
303 |
class Locator: |
304 |
""" |
305 |
Locator provides access to the values of data objects at a given |
306 |
spatial coordinate x. |
307 |
|
308 |
In fact, a Locator object finds the sample in the set of samples of a |
309 |
given function space or domain where which is closest to the given |
310 |
point x. |
311 |
""" |
312 |
|
313 |
def __init__(self,where,x=numarray.zeros((3,))): |
314 |
""" |
315 |
Initializes a Locator to access values in Data objects on the Doamin |
316 |
or FunctionSpace where for the sample point which |
317 |
closest to the given point x. |
318 |
|
319 |
@param where: function space |
320 |
@type where: L{escript.FunctionSpace} |
321 |
@param x: coefficient of the solution. |
322 |
@type x: L{numarray.NumArray} or C{list} of L{numarray.NumArray} |
323 |
""" |
324 |
if isinstance(where,escript.FunctionSpace): |
325 |
self.__function_space=where |
326 |
else: |
327 |
self.__function_space=escript.ContinuousFunction(where) |
328 |
if isinstance(x, list): |
329 |
self.__id=[] |
330 |
for p in x: |
331 |
self.__id.append(util.length(self.__function_space.getX()-p[:self.__function_space.getDim()]).minGlobalDataPoint()) |
332 |
else: |
333 |
self.__id=util.length(self.__function_space.getX()-x[:self.__function_space.getDim()]).minGlobalDataPoint() |
334 |
|
335 |
def __str__(self): |
336 |
""" |
337 |
Returns the coordinates of the Locator as a string. |
338 |
""" |
339 |
x=self.getX() |
340 |
if instance(x,list): |
341 |
out="[" |
342 |
first=True |
343 |
for xx in x: |
344 |
if not first: |
345 |
out+="," |
346 |
else: |
347 |
first=False |
348 |
out+=str(xx) |
349 |
out+="]>" |
350 |
else: |
351 |
out=str(x) |
352 |
return out |
353 |
|
354 |
def getX(self): |
355 |
""" |
356 |
Returns the exact coordinates of the Locator. |
357 |
""" |
358 |
return self(self.getFunctionSpace().getX()) |
359 |
|
360 |
def getFunctionSpace(self): |
361 |
""" |
362 |
Returns the function space of the Locator. |
363 |
""" |
364 |
return self.__function_space |
365 |
|
366 |
def getId(self,item=None): |
367 |
""" |
368 |
Returns the identifier of the location. |
369 |
""" |
370 |
if item == None: |
371 |
return self.__id |
372 |
else: |
373 |
if isinstance(self.__id,list): |
374 |
return self.__id[item] |
375 |
else: |
376 |
return self.__id |
377 |
|
378 |
|
379 |
def __call__(self,data): |
380 |
""" |
381 |
Returns the value of data at the Locator of a Data object otherwise |
382 |
the object is returned. |
383 |
""" |
384 |
return self.getValue(data) |
385 |
|
386 |
def getValue(self,data): |
387 |
""" |
388 |
Returns the value of data at the Locator if data is a Data object |
389 |
otherwise the object is returned. |
390 |
""" |
391 |
if isinstance(data,escript.Data): |
392 |
if data.getFunctionSpace()==self.getFunctionSpace(): |
393 |
dat=data |
394 |
else: |
395 |
dat=data.interpolate(self.getFunctionSpace()) |
396 |
id=self.getId() |
397 |
r=data.getRank() |
398 |
if isinstance(id,list): |
399 |
out=[] |
400 |
for i in id: |
401 |
o=data.getValueOfGlobalDataPoint(*i) |
402 |
if data.getRank()==0: |
403 |
out.append(o[0]) |
404 |
else: |
405 |
out.append(o) |
406 |
return out |
407 |
else: |
408 |
out=data.getValueOfGlobalDataPoint(*id) |
409 |
if data.getRank()==0: |
410 |
return out[0] |
411 |
else: |
412 |
return out |
413 |
else: |
414 |
return data |
415 |
|
416 |
class SolverSchemeException(Exception): |
417 |
""" |
418 |
exceptions thrown by solvers |
419 |
""" |
420 |
pass |
421 |
|
422 |
class IndefinitePreconditioner(SolverSchemeException): |
423 |
""" |
424 |
the preconditioner is not positive definite. |
425 |
""" |
426 |
pass |
427 |
class MaxIterReached(SolverSchemeException): |
428 |
""" |
429 |
maxium number of iteration steps is reached. |
430 |
""" |
431 |
pass |
432 |
class IterationBreakDown(SolverSchemeException): |
433 |
""" |
434 |
iteration scheme econouters an incurable breakdown. |
435 |
""" |
436 |
pass |
437 |
class NegativeNorm(SolverSchemeException): |
438 |
""" |
439 |
a norm calculation returns a negative norm. |
440 |
""" |
441 |
pass |
442 |
|
443 |
class IterationHistory(object): |
444 |
""" |
445 |
The IterationHistory class is used to define a stopping criterium. It keeps track of the |
446 |
residual norms. The stoppingcriterium indicates termination if the residual norm has been reduced by |
447 |
a given tolerance. |
448 |
""" |
449 |
def __init__(self,tolerance=math.sqrt(util.EPSILON),verbose=False): |
450 |
""" |
451 |
Initialization |
452 |
|
453 |
@param tolerance: tolerance |
454 |
@type tolerance: positive C{float} |
455 |
@param verbose: switches on the printing out some information |
456 |
@type verbose: C{bool} |
457 |
""" |
458 |
if not tolerance>0.: |
459 |
raise ValueError,"tolerance needs to be positive." |
460 |
self.tolerance=tolerance |
461 |
self.verbose=verbose |
462 |
self.history=[] |
463 |
def stoppingcriterium(self,norm_r,r,x): |
464 |
""" |
465 |
returns True if the C{norm_r} is C{tolerance}*C{norm_r[0]} where C{norm_r[0]} is the residual norm at the first call. |
466 |
|
467 |
|
468 |
@param norm_r: current residual norm |
469 |
@type norm_r: non-negative C{float} |
470 |
@param r: current residual (not used) |
471 |
@param x: current solution approximation (not used) |
472 |
@return: C{True} is the stopping criterium is fullfilled. Otherwise C{False} is returned. |
473 |
@rtype: C{bool} |
474 |
|
475 |
""" |
476 |
self.history.append(norm_r) |
477 |
if self.verbose: print "iter: #s: inner(rhat,r) = #e"#(len(self.history)-1, self.history[-1]) |
478 |
return self.history[-1]<=self.tolerance * self.history[0] |
479 |
|
480 |
def stoppingcriterium2(self,norm_r,norm_b,solver="GMRES",TOL=None): |
481 |
""" |
482 |
returns True if the C{norm_r} is C{tolerance}*C{norm_b} |
483 |
|
484 |
|
485 |
@param norm_r: current residual norm |
486 |
@type norm_r: non-negative C{float} |
487 |
@param norm_b: norm of right hand side |
488 |
@type norm_b: non-negative C{float} |
489 |
@return: C{True} is the stopping criterium is fullfilled. Otherwise C{False} is returned. |
490 |
@rtype: C{bool} |
491 |
|
492 |
""" |
493 |
if TOL==None: |
494 |
TOL=self.tolerance |
495 |
self.history.append(norm_r) |
496 |
if self.verbose: print "iter: #s: norm(r) = #e"#(len(self.history)-1, self.history[-1]) |
497 |
return self.history[-1]<=TOL * norm_b |
498 |
|
499 |
def PCG(b, Aprod, Msolve, bilinearform, stoppingcriterium, x=None, iter_max=100): |
500 |
""" |
501 |
Solver for |
502 |
|
503 |
M{Ax=b} |
504 |
|
505 |
with a symmetric and positive definite operator A (more details required!). |
506 |
It uses the conjugate gradient method with preconditioner M providing an approximation of A. |
507 |
|
508 |
The iteration is terminated if the C{stoppingcriterium} function return C{True}. |
509 |
|
510 |
For details on the preconditioned conjugate gradient method see the book: |
511 |
|
512 |
Templates for the Solution of Linear Systems by R. Barrett, M. Berry, |
513 |
T.F. Chan, J. Demmel, J. Donato, J. Dongarra, V. Eijkhout, R. Pozo, |
514 |
C. Romine, and H. van der Vorst. |
515 |
|
516 |
@param b: the right hand side of the liner system. C{b} is altered. |
517 |
@type b: any object supporting inplace add (x+=y) and scaling (x=scalar*y) |
518 |
@param Aprod: returns the value Ax |
519 |
@type Aprod: function C{Aprod(x)} where C{x} is of the same object like argument C{x}. The returned object needs to be of the same type like argument C{b}. |
520 |
@param Msolve: solves Mx=r |
521 |
@type Msolve: function C{Msolve(r)} where C{r} is of the same type like argument C{b}. The returned object needs to be of the same |
522 |
type like argument C{x}. |
523 |
@param bilinearform: inner product C{<x,r>} |
524 |
@type bilinearform: function C{bilinearform(x,r)} where C{x} is of the same type like argument C{x} and C{r} is . The returned value is a C{float}. |
525 |
@param stoppingcriterium: function which returns True if a stopping criterium is meet. C{stoppingcriterium} has the arguments C{norm_r}, C{r} and C{x} giving the current norm of the residual (=C{sqrt(bilinearform(Msolve(r),r)}), the current residual and the current solution approximation. C{stoppingcriterium} is called in each iteration step. |
526 |
@type stoppingcriterium: function that returns C{True} or C{False} |
527 |
@param x: an initial guess for the solution. If no C{x} is given 0*b is used. |
528 |
@type x: any object supporting inplace add (x+=y) and scaling (x=scalar*y) |
529 |
@param iter_max: maximum number of iteration steps. |
530 |
@type iter_max: C{int} |
531 |
@return: the solution approximation and the corresponding residual |
532 |
@rtype: C{tuple} |
533 |
@warning: C{b} and C{x} are altered. |
534 |
""" |
535 |
iter=0 |
536 |
if x==None: |
537 |
x=0*b |
538 |
else: |
539 |
b += (-1)*Aprod(x) |
540 |
r=b |
541 |
rhat=Msolve(r) |
542 |
d = rhat |
543 |
rhat_dot_r = bilinearform(rhat, r) |
544 |
if rhat_dot_r<0: raise NegativeNorm,"negative norm." |
545 |
|
546 |
while not stoppingcriterium(math.sqrt(rhat_dot_r),r,x): |
547 |
iter+=1 |
548 |
if iter >= iter_max: raise MaxIterReached,"maximum number of %s steps reached."%iter_max |
549 |
|
550 |
q=Aprod(d) |
551 |
alpha = rhat_dot_r / bilinearform(d, q) |
552 |
x += alpha * d |
553 |
r += (-alpha) * q |
554 |
|
555 |
rhat=Msolve(r) |
556 |
rhat_dot_r_new = bilinearform(rhat, r) |
557 |
beta = rhat_dot_r_new / rhat_dot_r |
558 |
rhat+=beta * d |
559 |
d=rhat |
560 |
|
561 |
rhat_dot_r = rhat_dot_r_new |
562 |
if rhat_dot_r<0: raise NegativeNorm,"negative norm." |
563 |
|
564 |
return x,r |
565 |
|
566 |
|
567 |
############################ |
568 |
# Added by Artak |
569 |
#################################3 |
570 |
|
571 |
#Apply a sequence of k Givens rotations, used within gmres codes |
572 |
# vrot=givapp(c, s, vin, k) |
573 |
def givapp(c,s,vin): |
574 |
vrot=vin # warning: vin is altered!!!! |
575 |
if isinstance(c,float): |
576 |
vrot=[c*vrot[0]-s*vrot[1],s*vrot[0]+c*vrot[1]] |
577 |
else: |
578 |
for i in range(len(c)): |
579 |
w1=c[i]*vrot[i]-s[i]*vrot[i+1] |
580 |
w2=s[i]*vrot[i]+c[i]*vrot[i+1] |
581 |
vrot[i:i+2]=w1,w2 |
582 |
return vrot |
583 |
|
584 |
def GMRES(b, Aprod, Msolve, bilinearform, stoppingcriterium, x=None, iter_max=100, iter_restart=20): |
585 |
m=iter_restart |
586 |
iter=0 |
587 |
while True: |
588 |
if iter >= iter_max: raise MaxIterReached,"maximum number of %s steps reached"%iter_max |
589 |
x,stopped=GMRESm(b, Aprod, Msolve, bilinearform, stoppingcriterium, x=x, iter_max=iter_max-iter, iter_restart=m) |
590 |
if stopped: break |
591 |
iter+=iter_restart |
592 |
return x |
593 |
|
594 |
def GMRESm(b, Aprod, Msolve, bilinearform, stoppingcriterium, x=None, iter_max=100, iter_restart=20): |
595 |
iter=0 |
596 |
r=Msolve(b) |
597 |
r_dot_r = bilinearform(r, r) |
598 |
if r_dot_r<0: raise NegativeNorm,"negative norm." |
599 |
norm_b=math.sqrt(r_dot_r) |
600 |
|
601 |
if x==None: |
602 |
x=0 |
603 |
else: |
604 |
r=Msolve(b-Aprod(x)) |
605 |
r_dot_r = bilinearform(r, r) |
606 |
if r_dot_r<0: raise NegativeNorm,"negative norm." |
607 |
|
608 |
h=numarray.zeros((iter_restart,iter_restart),numarray.Float64) |
609 |
c=numarray.zeros(iter_restart,numarray.Float64) |
610 |
s=numarray.zeros(iter_restart,numarray.Float64) |
611 |
g=numarray.zeros(iter_restart,numarray.Float64) |
612 |
v=[] |
613 |
|
614 |
rho=math.sqrt(r_dot_r) |
615 |
|
616 |
v.append(r/rho) |
617 |
g[0]=rho |
618 |
while not (stoppingcriterium(rho,norm_b) or iter==iter_restart-1): |
619 |
|
620 |
if iter >= iter_max: raise MaxIterReached,"maximum number of %s steps reached."%iter_max |
621 |
|
622 |
|
623 |
p=Msolve(Aprod(v[iter])) |
624 |
|
625 |
v.append(p) |
626 |
|
627 |
v_norm1=math.sqrt(bilinearform(v[iter+1], v[iter+1])) |
628 |
|
629 |
# Modified Gram-Schmidt |
630 |
for j in range(iter+1): |
631 |
h[j][iter]=bilinearform(v[j],v[iter+1]) |
632 |
v[iter+1]+=(-1.)*h[j][iter]*v[j] |
633 |
|
634 |
h[iter+1][iter]=math.sqrt(bilinearform(v[iter+1],v[iter+1])) |
635 |
v_norm2=h[iter+1][iter] |
636 |
|
637 |
|
638 |
# Reorthogonalize if needed |
639 |
if v_norm1 + 0.001*v_norm2 == v_norm1: #Brown/Hindmarsh condition (default) |
640 |
for j in range(iter+1): |
641 |
hr=bilinearform(v[j],v[iter+1]) |
642 |
h[j][iter]=h[j][iter]+hr #vhat |
643 |
v[iter+1] +=(-1.)*hr*v[j] |
644 |
|
645 |
v_norm2=math.sqrt(bilinearform(v[iter+1], v[iter+1])) |
646 |
h[iter+1][iter]=v_norm2 |
647 |
|
648 |
# watch out for happy breakdown |
649 |
if v_norm2 != 0: |
650 |
v[iter+1]=v[iter+1]/h[iter+1][iter] |
651 |
|
652 |
# Form and store the information for the new Givens rotation |
653 |
if iter > 0 : |
654 |
hhat=[] |
655 |
for i in range(iter+1) : hhat.append(h[i][iter]) |
656 |
hhat=givapp(c[0:iter],s[0:iter],hhat); |
657 |
for i in range(iter+1) : h[i][iter]=hhat[i] |
658 |
|
659 |
mu=math.sqrt(h[iter][iter]*h[iter][iter]+h[iter+1][iter]*h[iter+1][iter]) |
660 |
if mu!=0 : |
661 |
c[iter]=h[iter][iter]/mu |
662 |
s[iter]=-h[iter+1][iter]/mu |
663 |
h[iter][iter]=c[iter]*h[iter][iter]-s[iter]*h[iter+1][iter] |
664 |
h[iter+1][iter]=0.0 |
665 |
g[iter:iter+2]=givapp(c[iter],s[iter],g[iter:iter+2]) |
666 |
|
667 |
# Update the residual norm |
668 |
rho=abs(g[iter+1]) |
669 |
iter+=1 |
670 |
|
671 |
# At this point either iter > iter_max or rho < tol. |
672 |
# It's time to compute x and leave. |
673 |
|
674 |
if iter > 0 : |
675 |
y=numarray.zeros(iter,numarray.Float64) |
676 |
y[iter-1] = g[iter-1] / h[iter-1][iter-1] |
677 |
if iter > 1 : |
678 |
i=iter-2 |
679 |
while i>=0 : |
680 |
y[i] = ( g[i] - numarray.dot(h[i][i+1:iter], y[i+1:iter])) / h[i][i] |
681 |
i=i-1 |
682 |
xhat=v[iter-1]*y[iter-1] |
683 |
for i in range(iter-1): |
684 |
xhat += v[i]*y[i] |
685 |
else : xhat=v[0] |
686 |
|
687 |
x += xhat |
688 |
if iter<iter_restart-1: |
689 |
stopped=True |
690 |
else: |
691 |
stopped=False |
692 |
|
693 |
return x,stopped |
694 |
|
695 |
###################################################### |
696 |
def dirder(x, w, bilinearform, Aprod, Msolve, f0, b ): |
697 |
###################################################### |
698 |
|
699 |
# DIRDER estimates the directional derivative of a function F. |
700 |
|
701 |
|
702 |
# Hardwired difference increment. |
703 |
# |
704 |
epsnew = 1.0e-07 |
705 |
# |
706 |
# Scale the step. |
707 |
# |
708 |
norm_w=math.sqrt(bilinearform(w,w)) |
709 |
if norm_w== 0.0: |
710 |
return x/x |
711 |
|
712 |
epsnew = epsnew / norm_w |
713 |
|
714 |
if norm_w > 0.0: |
715 |
epsnew = epsnew * math.sqrt(bilinearform(x,x)) |
716 |
# |
717 |
# DEL and F1 could share the same space if storage |
718 |
# is more important than clarity. |
719 |
# |
720 |
|
721 |
DEL = x + epsnew * w |
722 |
f1 = -Msolve(Aprod(DEL)) |
723 |
z = ( f1 - f0 ) / epsnew |
724 |
return z |
725 |
|
726 |
###################################################### |
727 |
def FDGMRES(f0, Aprod, Msolve, bilinearform, stoppingcriterium, xc=None, x=None, iter_max=100, iter_restart=20,TOL=None): |
728 |
###################################################### |
729 |
b=-f0 |
730 |
b_dot_b = bilinearform(b, b) |
731 |
if b_dot_b<0: raise NegativeNorm,"negative norm." |
732 |
norm_b=math.sqrt(b_dot_b) |
733 |
|
734 |
r=b |
735 |
|
736 |
if x==None: |
737 |
x=0 |
738 |
else: |
739 |
r=-dirder(xc,x,bilinearform,Aprod,Msolve,f0,b)-f0 |
740 |
|
741 |
r_dot_r = bilinearform(r, r) |
742 |
if r_dot_r<0: raise NegativeNorm,"negative norm." |
743 |
|
744 |
h=numarray.zeros((iter_restart,iter_restart),numarray.Float64) |
745 |
c=numarray.zeros(iter_restart,numarray.Float64) |
746 |
s=numarray.zeros(iter_restart,numarray.Float64) |
747 |
g=numarray.zeros(iter_restart,numarray.Float64) |
748 |
v=[] |
749 |
|
750 |
rho=math.sqrt(r_dot_r) |
751 |
|
752 |
v.append(r/rho) |
753 |
g[0]=rho |
754 |
iter=0 |
755 |
|
756 |
while not (stoppingcriterium(rho,norm_b,solver="FDGMRES",TOL=TOL) or iter==iter_restart-1): |
757 |
|
758 |
if iter >= iter_max: raise MaxIterReached,"maximum number of %s steps reached."%iter_max |
759 |
|
760 |
|
761 |
p=dirder(xc, v[iter], bilinearform,Aprod,Msolve,f0,b) |
762 |
|
763 |
v.append(p) |
764 |
|
765 |
v_norm1=math.sqrt(bilinearform(v[iter+1], v[iter+1])) |
766 |
|
767 |
# Modified Gram-Schmidt |
768 |
for j in range(iter+1): |
769 |
h[j][iter]=bilinearform(v[j],v[iter+1]) |
770 |
v[iter+1]+=(-1.)*h[j][iter]*v[j] |
771 |
|
772 |
h[iter+1][iter]=math.sqrt(bilinearform(v[iter+1],v[iter+1])) |
773 |
v_norm2=h[iter+1][iter] |
774 |
|
775 |
|
776 |
# Reorthogonalize if needed |
777 |
if v_norm1 + 0.001*v_norm2 == v_norm1: #Brown/Hindmarsh condition (default) |
778 |
for j in range(iter+1): |
779 |
hr=bilinearform(v[j],v[iter+1]) |
780 |
h[j][iter]=h[j][iter]+hr #vhat |
781 |
v[iter+1] +=(-1.)*hr*v[j] |
782 |
|
783 |
v_norm2=math.sqrt(bilinearform(v[iter+1], v[iter+1])) |
784 |
h[iter+1][iter]=v_norm2 |
785 |
|
786 |
# watch out for happy breakdown |
787 |
if v_norm2 != 0: |
788 |
v[iter+1]=v[iter+1]/h[iter+1][iter] |
789 |
|
790 |
# Form and store the information for the new Givens rotation |
791 |
if iter > 0 : |
792 |
hhat=[] |
793 |
for i in range(iter+1) : hhat.append(h[i][iter]) |
794 |
hhat=givapp(c[0:iter],s[0:iter],hhat); |
795 |
for i in range(iter+1) : h[i][iter]=hhat[i] |
796 |
|
797 |
mu=math.sqrt(h[iter][iter]*h[iter][iter]+h[iter+1][iter]*h[iter+1][iter]) |
798 |
if mu!=0 : |
799 |
c[iter]=h[iter][iter]/mu |
800 |
s[iter]=-h[iter+1][iter]/mu |
801 |
h[iter][iter]=c[iter]*h[iter][iter]-s[iter]*h[iter+1][iter] |
802 |
h[iter+1][iter]=0.0 |
803 |
g[iter:iter+2]=givapp(c[iter],s[iter],g[iter:iter+2]) |
804 |
|
805 |
# Update the residual norm |
806 |
rho=abs(g[iter+1]) |
807 |
iter+=1 |
808 |
|
809 |
if iter > 0 : |
810 |
y=numarray.zeros(iter,numarray.Float64) |
811 |
y[iter-1] = g[iter-1] / h[iter-1][iter-1] |
812 |
if iter > 1 : |
813 |
i=iter-2 |
814 |
while i>=0 : |
815 |
y[i] = ( g[i] - numarray.dot(h[i][i+1:iter], y[i+1:iter])) / h[i][i] |
816 |
i=i-1 |
817 |
xhat=v[iter-1]*y[iter-1] |
818 |
for i in range(iter-1): |
819 |
xhat += v[i]*y[i] |
820 |
else : xhat=v[0] |
821 |
|
822 |
x += xhat |
823 |
if iter<iter_restart-1: |
824 |
stopped=True |
825 |
else: |
826 |
stopped=False |
827 |
|
828 |
return x,stopped |
829 |
|
830 |
def MINRES(b, Aprod, Msolve, bilinearform, stoppingcriterium, x=None, iter_max=100): |
831 |
|
832 |
# |
833 |
# minres solves the system of linear equations Ax = b |
834 |
# where A is a symmetric matrix (possibly indefinite or singular) |
835 |
# and b is a given vector. |
836 |
# |
837 |
# "A" may be a dense or sparse matrix (preferably sparse!) |
838 |
# or the name of a function such that |
839 |
# y = A(x) |
840 |
# returns the product y = Ax for any given vector x. |
841 |
# |
842 |
# "M" defines a positive-definite preconditioner M = C C'. |
843 |
# "M" may be a dense or sparse matrix (preferably sparse!) |
844 |
# or the name of a function such that |
845 |
# solves the system My = x for any given vector x. |
846 |
# |
847 |
# |
848 |
|
849 |
#------------------------------------------------------------------ |
850 |
# Set up y and v for the first Lanczos vector v1. |
851 |
# y = beta1 P' v1, where P = C**(-1). |
852 |
# v is really P' v1. |
853 |
#------------------------------------------------------------------ |
854 |
if x==None: |
855 |
x=0*b |
856 |
else: |
857 |
b += (-1)*Aprod(x) |
858 |
|
859 |
r1 = b |
860 |
y = Msolve(b) |
861 |
beta1 = bilinearform(b,y) |
862 |
|
863 |
if beta1< 0: raise NegativeNorm,"negative norm." |
864 |
|
865 |
# If b = 0 exactly, stop with x = 0. |
866 |
if beta1==0: return x*0. |
867 |
|
868 |
if beta1> 0: |
869 |
beta1 = math.sqrt(beta1) |
870 |
|
871 |
#------------------------------------------------------------------ |
872 |
# Initialize quantities. |
873 |
# ------------------------------------------------------------------ |
874 |
iter = 0 |
875 |
Anorm = 0 |
876 |
ynorm = 0 |
877 |
oldb = 0 |
878 |
beta = beta1 |
879 |
dbar = 0 |
880 |
epsln = 0 |
881 |
phibar = beta1 |
882 |
rhs1 = beta1 |
883 |
rhs2 = 0 |
884 |
rnorm = phibar |
885 |
tnorm2 = 0 |
886 |
ynorm2 = 0 |
887 |
cs = -1 |
888 |
sn = 0 |
889 |
w = b*0. |
890 |
w2 = b*0. |
891 |
r2 = r1 |
892 |
eps = 0.0001 |
893 |
|
894 |
#--------------------------------------------------------------------- |
895 |
# Main iteration loop. |
896 |
# -------------------------------------------------------------------- |
897 |
while not stoppingcriterium(rnorm,Anorm*ynorm,'MINRES'): # checks ||r|| < (||A|| ||x||) * TOL |
898 |
|
899 |
if iter >= iter_max: raise MaxIterReached,"maximum number of %s steps reached."%iter_max |
900 |
iter = iter + 1 |
901 |
|
902 |
#----------------------------------------------------------------- |
903 |
# Obtain quantities for the next Lanczos vector vk+1, k = 1, 2,... |
904 |
# The general iteration is similar to the case k = 1 with v0 = 0: |
905 |
# |
906 |
# p1 = Operator * v1 - beta1 * v0, |
907 |
# alpha1 = v1'p1, |
908 |
# q2 = p2 - alpha1 * v1, |
909 |
# beta2^2 = q2'q2, |
910 |
# v2 = (1/beta2) q2. |
911 |
# |
912 |
# Again, y = betak P vk, where P = C**(-1). |
913 |
#----------------------------------------------------------------- |
914 |
s = 1/beta # Normalize previous vector (in y). |
915 |
v = s*y # v = vk if P = I |
916 |
|
917 |
y = Aprod(v) |
918 |
|
919 |
if iter >= 2: |
920 |
y = y - (beta/oldb)*r1 |
921 |
|
922 |
alfa = bilinearform(v,y) # alphak |
923 |
y = (- alfa/beta)*r2 + y |
924 |
r1 = r2 |
925 |
r2 = y |
926 |
y = Msolve(r2) |
927 |
oldb = beta # oldb = betak |
928 |
beta = bilinearform(r2,y) # beta = betak+1^2 |
929 |
if beta < 0: raise NegativeNorm,"negative norm." |
930 |
|
931 |
beta = math.sqrt( beta ) |
932 |
tnorm2 = tnorm2 + alfa*alfa + oldb*oldb + beta*beta |
933 |
|
934 |
if iter==1: # Initialize a few things. |
935 |
gmax = abs( alfa ) # alpha1 |
936 |
gmin = gmax # alpha1 |
937 |
|
938 |
# Apply previous rotation Qk-1 to get |
939 |
# [deltak epslnk+1] = [cs sn][dbark 0 ] |
940 |
# [gbar k dbar k+1] [sn -cs][alfak betak+1]. |
941 |
|
942 |
oldeps = epsln |
943 |
delta = cs * dbar + sn * alfa # delta1 = 0 deltak |
944 |
gbar = sn * dbar - cs * alfa # gbar 1 = alfa1 gbar k |
945 |
epsln = sn * beta # epsln2 = 0 epslnk+1 |
946 |
dbar = - cs * beta # dbar 2 = beta2 dbar k+1 |
947 |
|
948 |
# Compute the next plane rotation Qk |
949 |
|
950 |
gamma = math.sqrt(gbar*gbar+beta*beta) # gammak |
951 |
gamma = max(gamma,eps) |
952 |
cs = gbar / gamma # ck |
953 |
sn = beta / gamma # sk |
954 |
phi = cs * phibar # phik |
955 |
phibar = sn * phibar # phibark+1 |
956 |
|
957 |
# Update x. |
958 |
|
959 |
denom = 1/gamma |
960 |
w1 = w2 |
961 |
w2 = w |
962 |
w = (v - oldeps*w1 - delta*w2) * denom |
963 |
x = x + phi*w |
964 |
|
965 |
# Go round again. |
966 |
|
967 |
gmax = max(gmax,gamma) |
968 |
gmin = min(gmin,gamma) |
969 |
z = rhs1 / gamma |
970 |
ynorm2 = z*z + ynorm2 |
971 |
rhs1 = rhs2 - delta*z |
972 |
rhs2 = - epsln*z |
973 |
|
974 |
# Estimate various norms and test for convergence. |
975 |
|
976 |
Anorm = math.sqrt( tnorm2 ) |
977 |
ynorm = math.sqrt( ynorm2 ) |
978 |
|
979 |
rnorm = phibar |
980 |
|
981 |
return x |
982 |
|
983 |
def NewtonGMRES(b, Aprod, Msolve, bilinearform, stoppingcriterium,x=None, iter_max=100,iter_restart=20): |
984 |
|
985 |
gamma=.9 |
986 |
lmaxit=40 |
987 |
etamax=.5 |
988 |
|
989 |
n = 1 #len(x) |
990 |
iter=0 |
991 |
|
992 |
# evaluate f at the initial iterate |
993 |
# compute the stop tolerance |
994 |
# |
995 |
r=b |
996 |
if x==None: |
997 |
x=0*b |
998 |
else: |
999 |
r += (-1)*Aprod(x) |
1000 |
|
1001 |
f0=-Msolve(r) |
1002 |
fnrm=math.sqrt(bilinearform(f0,f0))/math.sqrt(n) |
1003 |
fnrmo=1 |
1004 |
atol=1.e-2 |
1005 |
rtol=1.e-4 |
1006 |
stop_tol=atol + rtol*fnrm |
1007 |
# |
1008 |
# main iteration loop |
1009 |
# |
1010 |
while not stoppingcriterium(fnrm,stop_tol,'NewtonGMRES',TOL=1.): |
1011 |
|
1012 |
if iter >= iter_max: raise MaxIterReached,"maximum number of %s steps reached."%iter_max |
1013 |
# |
1014 |
# keep track of the ratio (rat = fnrm/frnmo) |
1015 |
# of successive residual norms and |
1016 |
# the iteration counter (iter) |
1017 |
# |
1018 |
#rat=fnrm/fnrmo |
1019 |
fnrmo=fnrm |
1020 |
iter+=1 |
1021 |
# |
1022 |
# compute the step using a GMRES(m) routine especially designed for this purpose |
1023 |
# |
1024 |
initer=0 |
1025 |
while True: |
1026 |
xc,stopped=FDGMRES(f0, Aprod, Msolve, bilinearform, stoppingcriterium, xc=x, iter_max=lmaxit-initer, iter_restart=iter_restart, TOL=etamax) |
1027 |
if stopped: break |
1028 |
initer+=iter_restart |
1029 |
xold=x |
1030 |
x+=xc |
1031 |
f0=-Msolve(Aprod(x)) |
1032 |
fnrm=math.sqrt(bilinearform(f0,f0))/math.sqrt(n) |
1033 |
rat=fnrm/fnrmo |
1034 |
|
1035 |
|
1036 |
# adjust eta |
1037 |
# |
1038 |
if etamax > 0: |
1039 |
etaold=etamax |
1040 |
etanew=gamma*rat*rat |
1041 |
if gamma*etaold*etaold > .1 : |
1042 |
etanew=max(etanew,gamma*etaold*etaold) |
1043 |
|
1044 |
etamax=min(etanew,etamax) |
1045 |
etamax=max(etamax,.5*stop_tol/fnrm) |
1046 |
|
1047 |
return x |
1048 |
|
1049 |
def TFQMR(b, Aprod, Msolve, bilinearform, stoppingcriterium, x=None, iter_max=100): |
1050 |
|
1051 |
# TFQMR solver for linear systems |
1052 |
# |
1053 |
# |
1054 |
# initialization |
1055 |
# |
1056 |
errtol = math.sqrt(bilinearform(b,b)) |
1057 |
norm_b=errtol |
1058 |
kmax = iter_max |
1059 |
error = [] |
1060 |
|
1061 |
if math.sqrt(bilinearform(x,x)) != 0.0: |
1062 |
r = b - Aprod(x) |
1063 |
else: |
1064 |
r = b |
1065 |
|
1066 |
r=Msolve(r) |
1067 |
|
1068 |
u1=0 |
1069 |
u2=0 |
1070 |
y1=0 |
1071 |
y2=0 |
1072 |
|
1073 |
w = r |
1074 |
y1 = r |
1075 |
iter = 0 |
1076 |
d = 0 |
1077 |
|
1078 |
v = Msolve(Aprod(y1)) |
1079 |
u1 = v |
1080 |
|
1081 |
theta = 0.0; |
1082 |
eta = 0.0; |
1083 |
tau = math.sqrt(bilinearform(r,r)) |
1084 |
error = [ error, tau ] |
1085 |
rho = tau * tau |
1086 |
m=1 |
1087 |
# |
1088 |
# TFQMR iteration |
1089 |
# |
1090 |
# while ( iter < kmax-1 ): |
1091 |
|
1092 |
while not stoppingcriterium(tau*math.sqrt ( m + 1 ),norm_b,'TFQMR'): |
1093 |
if iter >= iter_max: raise MaxIterReached,"maximum number of %s steps reached."%iter_max |
1094 |
|
1095 |
sigma = bilinearform(r,v) |
1096 |
|
1097 |
if ( sigma == 0.0 ): |
1098 |
raise 'TFQMR breakdown, sigma=0' |
1099 |
|
1100 |
|
1101 |
alpha = rho / sigma |
1102 |
|
1103 |
for j in range(2): |
1104 |
# |
1105 |
# Compute y2 and u2 only if you have to |
1106 |
# |
1107 |
if ( j == 1 ): |
1108 |
y2 = y1 - alpha * v |
1109 |
u2 = Msolve(Aprod(y2)) |
1110 |
|
1111 |
m = 2 * (iter+1) - 2 + (j+1) |
1112 |
if j==0: |
1113 |
w = w - alpha * u1 |
1114 |
d = y1 + ( theta * theta * eta / alpha ) * d |
1115 |
if j==1: |
1116 |
w = w - alpha * u2 |
1117 |
d = y2 + ( theta * theta * eta / alpha ) * d |
1118 |
|
1119 |
theta = math.sqrt(bilinearform(w,w))/ tau |
1120 |
c = 1.0 / math.sqrt ( 1.0 + theta * theta ) |
1121 |
tau = tau * theta * c |
1122 |
eta = c * c * alpha |
1123 |
x = x + eta * d |
1124 |
# |
1125 |
# Try to terminate the iteration at each pass through the loop |
1126 |
# |
1127 |
# if ( tau * math.sqrt ( m + 1 ) <= errtol ): |
1128 |
# error = [ error, tau ] |
1129 |
# total_iters = iter |
1130 |
# break |
1131 |
|
1132 |
|
1133 |
if ( rho == 0.0 ): |
1134 |
raise 'TFQMR breakdown, rho=0' |
1135 |
|
1136 |
|
1137 |
rhon = bilinearform(r,w) |
1138 |
beta = rhon / rho; |
1139 |
rho = rhon; |
1140 |
y1 = w + beta * y2; |
1141 |
u1 = Msolve(Aprod(y1)) |
1142 |
v = u1 + beta * ( u2 + beta * v ) |
1143 |
error = [ error, tau ] |
1144 |
total_iters = iter |
1145 |
|
1146 |
iter = iter + 1 |
1147 |
|
1148 |
return x |
1149 |
|
1150 |
|
1151 |
############################################# |
1152 |
|
1153 |
class ArithmeticTuple(object): |
1154 |
""" |
1155 |
tuple supporting inplace update x+=y and scaling x=a*y where x,y is an ArithmeticTuple and a is a float. |
1156 |
|
1157 |
example of usage: |
1158 |
|
1159 |
from esys.escript import Data |
1160 |
from numarray import array |
1161 |
a=Data(...) |
1162 |
b=array([1.,4.]) |
1163 |
x=ArithmeticTuple(a,b) |
1164 |
y=5.*x |
1165 |
|
1166 |
""" |
1167 |
def __init__(self,*args): |
1168 |
""" |
1169 |
initialize object with elements args. |
1170 |
|
1171 |
@param args: tuple of object that support implace add (x+=y) and scaling (x=a*y) |
1172 |
""" |
1173 |
self.__items=list(args) |
1174 |
|
1175 |
def __len__(self): |
1176 |
""" |
1177 |
number of items |
1178 |
|
1179 |
@return: number of items |
1180 |
@rtype: C{int} |
1181 |
""" |
1182 |
return len(self.__items) |
1183 |
|
1184 |
def __getitem__(self,index): |
1185 |
""" |
1186 |
get an item |
1187 |
|
1188 |
@param index: item to be returned |
1189 |
@type index: C{int} |
1190 |
@return: item with index C{index} |
1191 |
""" |
1192 |
return self.__items.__getitem__(index) |
1193 |
|
1194 |
def __mul__(self,other): |
1195 |
""" |
1196 |
scaling from the right |
1197 |
|
1198 |
@param other: scaling factor |
1199 |
@type other: C{float} |
1200 |
@return: itemwise self*other |
1201 |
@rtype: L{ArithmeticTuple} |
1202 |
""" |
1203 |
out=[] |
1204 |
for i in range(len(self)): |
1205 |
out.append(self[i]*other) |
1206 |
return ArithmeticTuple(*tuple(out)) |
1207 |
|
1208 |
def __rmul__(self,other): |
1209 |
""" |
1210 |
scaling from the left |
1211 |
|
1212 |
@param other: scaling factor |
1213 |
@type other: C{float} |
1214 |
@return: itemwise other*self |
1215 |
@rtype: L{ArithmeticTuple} |
1216 |
""" |
1217 |
out=[] |
1218 |
for i in range(len(self)): |
1219 |
out.append(other*self[i]) |
1220 |
return ArithmeticTuple(*tuple(out)) |
1221 |
|
1222 |
######################### |
1223 |
# Added by Artak |
1224 |
######################### |
1225 |
def __div__(self,other): |
1226 |
""" |
1227 |
dividing from the right |
1228 |
|
1229 |
@param other: scaling factor |
1230 |
@type other: C{float} |
1231 |
@return: itemwise self/other |
1232 |
@rtype: L{ArithmeticTuple} |
1233 |
""" |
1234 |
out=[] |
1235 |
for i in range(len(self)): |
1236 |
out.append(self[i]/other) |
1237 |
return ArithmeticTuple(*tuple(out)) |
1238 |
|
1239 |
def __rdiv__(self,other): |
1240 |
""" |
1241 |
dividing from the left |
1242 |
|
1243 |
@param other: scaling factor |
1244 |
@type other: C{float} |
1245 |
@return: itemwise other/self |
1246 |
@rtype: L{ArithmeticTuple} |
1247 |
""" |
1248 |
out=[] |
1249 |
for i in range(len(self)): |
1250 |
out.append(other/self[i]) |
1251 |
return ArithmeticTuple(*tuple(out)) |
1252 |
|
1253 |
##########################################33 |
1254 |
|
1255 |
def __iadd__(self,other): |
1256 |
""" |
1257 |
in-place add of other to self |
1258 |
|
1259 |
@param other: increment |
1260 |
@type other: C{ArithmeticTuple} |
1261 |
""" |
1262 |
if len(self) != len(other): |
1263 |
raise ValueError,"tuple length must match." |
1264 |
for i in range(len(self)): |
1265 |
self.__items[i]+=other[i] |
1266 |
return self |
1267 |
|
1268 |
class HomogeneousSaddlePointProblem(object): |
1269 |
""" |
1270 |
This provides a framwork for solving homogeneous saddle point problem of the form |
1271 |
|
1272 |
Av+B^*p=f |
1273 |
Bv =0 |
1274 |
|
1275 |
for the unknowns v and p and given operators A and B and given right hand side f. |
1276 |
B^* is the adjoint operator of B is the given inner product. |
1277 |
|
1278 |
""" |
1279 |
def __init__(self,**kwargs): |
1280 |
self.setTolerance() |
1281 |
self.setToleranceReductionFactor() |
1282 |
|
1283 |
def initialize(self): |
1284 |
""" |
1285 |
initialize the problem (overwrite) |
1286 |
""" |
1287 |
pass |
1288 |
def B(self,v): |
1289 |
""" |
1290 |
returns Bv (overwrite) |
1291 |
@rtype: equal to the type of p |
1292 |
|
1293 |
@note: boundary conditions on p should be zero! |
1294 |
""" |
1295 |
pass |
1296 |
|
1297 |
def inner(self,p0,p1): |
1298 |
""" |
1299 |
returns inner product of two element p0 and p1 (overwrite) |
1300 |
|
1301 |
@type p0: equal to the type of p |
1302 |
@type p1: equal to the type of p |
1303 |
@rtype: C{float} |
1304 |
|
1305 |
@rtype: equal to the type of p |
1306 |
""" |
1307 |
pass |
1308 |
|
1309 |
def solve_A(self,u,p): |
1310 |
""" |
1311 |
solves Av=f-Au-B^*p with accuracy self.getReducedTolerance() (overwrite) |
1312 |
|
1313 |
@rtype: equal to the type of v |
1314 |
@note: boundary conditions on v should be zero! |
1315 |
""" |
1316 |
pass |
1317 |
|
1318 |
def solve_prec(self,p): |
1319 |
""" |
1320 |
provides a preconditioner for BA^{-1}B^* with accuracy self.getReducedTolerance() (overwrite) |
1321 |
|
1322 |
@rtype: equal to the type of p |
1323 |
""" |
1324 |
pass |
1325 |
|
1326 |
def stoppingcriterium(self,Bv,v,p): |
1327 |
""" |
1328 |
returns a True if iteration is terminated. (overwrite) |
1329 |
|
1330 |
@rtype: C{bool} |
1331 |
""" |
1332 |
pass |
1333 |
|
1334 |
def __inner(self,p,r): |
1335 |
return self.inner(p,r[1]) |
1336 |
|
1337 |
def __inner_p(self,p1,p2): |
1338 |
return self.inner(p1,p2) |
1339 |
|
1340 |
def __stoppingcriterium(self,norm_r,r,p): |
1341 |
return self.stoppingcriterium(r[1],r[0],p) |
1342 |
|
1343 |
def __stoppingcriterium2(self,norm_r,norm_b,solver='GMRES',TOL=None): |
1344 |
return self.stoppingcriterium2(norm_r,norm_b,solver,TOL) |
1345 |
|
1346 |
def setTolerance(self,tolerance=1.e-8): |
1347 |
self.__tol=tolerance |
1348 |
def getTolerance(self): |
1349 |
return self.__tol |
1350 |
def setToleranceReductionFactor(self,reduction=0.01): |
1351 |
self.__reduction=reduction |
1352 |
def getSubProblemTolerance(self): |
1353 |
return self.__reduction*self.getTolerance() |
1354 |
|
1355 |
def solve(self,v,p,max_iter=20, verbose=False, show_details=False, solver='PCG',iter_restart=20): |
1356 |
""" |
1357 |
solves the saddle point problem using initial guesses v and p. |
1358 |
|
1359 |
@param max_iter: maximum number of iteration steps. |
1360 |
""" |
1361 |
self.verbose=verbose |
1362 |
self.show_details=show_details and self.verbose |
1363 |
|
1364 |
# assume p is known: then v=A^-1(f-B^*p) |
1365 |
# which leads to BA^-1B^*p = BA^-1f |
1366 |
|
1367 |
# Az=f is solved as A(z-v)=f-Av (z-v = 0 on fixed_u_mask) |
1368 |
|
1369 |
|
1370 |
self.__z=v+self.solve_A(v,p*0) |
1371 |
|
1372 |
Bz=self.B(self.__z) |
1373 |
# |
1374 |
# solve BA^-1B^*p = Bz |
1375 |
# |
1376 |
# note that the residual r=Bz-BA^-1B^*p = B(z-A^-1B^*p) = Bv |
1377 |
# |
1378 |
# with Av=Az-B^*p = f - B^*p (v=z on fixed_u_mask) |
1379 |
# A(v-z)=Az-B^*p-Az = f -Az - B^*p (v-z=0 on fixed_u_mask) |
1380 |
# |
1381 |
self.iter=0 |
1382 |
if solver=='GMRES': |
1383 |
if self.verbose: print "enter GMRES method (iter_max=%s)"%max_iter |
1384 |
p=GMRES(Bz,self.__Aprod2,self.__Msolve2,self.__inner_p,self.__stoppingcriterium2,iter_max=max_iter, x=p*1.,iter_restart=iter_restart) |
1385 |
# solve Au=f-B^*p |
1386 |
# A(u-v)=f-B^*p-Av |
1387 |
# u=v+(u-v) |
1388 |
u=v+self.solve_A(v,p) |
1389 |
|
1390 |
if solver=='NewtonGMRES': |
1391 |
if self.verbose: print "enter NewtonGMRES method (iter_max=%s)"%max_iter |
1392 |
p=NewtonGMRES(Bz,self.__Aprod_Newton,self.__Msolve2,self.__inner_p,self.__stoppingcriterium2,iter_max=max_iter, x=p*1.) |
1393 |
# solve Au=f-B^*p |
1394 |
# A(u-v)=f-B^*p-Av |
1395 |
# u=v+(u-v) |
1396 |
u=v+self.solve_A(v,p) |
1397 |
|
1398 |
|
1399 |
if solver=='TFQMR': |
1400 |
if self.verbose: print "enter TFQMR method (iter_max=%s)"%max_iter |
1401 |
p=TFQMR(Bz,self.__Aprod2,self.__Msolve2,self.__inner_p,self.__stoppingcriterium2,iter_max=max_iter, x=p*1.) |
1402 |
# solve Au=f-B^*p |
1403 |
# A(u-v)=f-B^*p-Av |
1404 |
# u=v+(u-v) |
1405 |
u=v+self.solve_A(v,p) |
1406 |
|
1407 |
if solver=='MINRES': |
1408 |
if self.verbose: print "enter MINRES method (iter_max=%s)"%max_iter |
1409 |
p=MINRES(Bz,self.__Aprod2,self.__Msolve2,self.__inner_p,self.__stoppingcriterium2,iter_max=max_iter, x=p*1.) |
1410 |
# solve Au=f-B^*p |
1411 |
# A(u-v)=f-B^*p-Av |
1412 |
# u=v+(u-v) |
1413 |
u=v+self.solve_A(v,p) |
1414 |
|
1415 |
if solver=='PCG': |
1416 |
if self.verbose: print "enter PCG method (iter_max=%s)"%max_iter |
1417 |
p,r=PCG(ArithmeticTuple(self.__z*1.,Bz),self.__Aprod,self.__Msolve,self.__inner,self.__stoppingcriterium,iter_max=max_iter, x=p) |
1418 |
u=r[0] |
1419 |
|
1420 |
print "RESULT div(u)=",util.Lsup(self.B(u)),util.Lsup(u) |
1421 |
|
1422 |
return u,p |
1423 |
|
1424 |
def __Msolve(self,r): |
1425 |
return self.solve_prec(r[1]) |
1426 |
|
1427 |
def __Msolve2(self,r): |
1428 |
return self.solve_prec(r) |
1429 |
|
1430 |
|
1431 |
def __Aprod(self,p): |
1432 |
# return BA^-1B*p |
1433 |
#solve Av =-B^*p as Av =f-Az-B^*p |
1434 |
v=self.solve_A(self.__z,-p) |
1435 |
return ArithmeticTuple(v, self.B(v)) |
1436 |
|
1437 |
def __Aprod2(self,p): |
1438 |
# return BA^-1B*p |
1439 |
#solve Av =-B^*p as Av =f-Az-B^*p |
1440 |
v=self.solve_A(self.__z,-p) |
1441 |
return self.B(v) |
1442 |
|
1443 |
def __Aprod_Newton(self,p): |
1444 |
# return BA^-1B*p |
1445 |
#solve Av =-B^*p as Av =f-Az-B^*p |
1446 |
v=self.solve_A(self.__z,-p) |
1447 |
return self.B(v-self.__z) |
1448 |
|
1449 |
class SaddlePointProblem(object): |
1450 |
""" |
1451 |
This implements a solver for a saddlepoint problem |
1452 |
|
1453 |
M{f(u,p)=0} |
1454 |
M{g(u)=0} |
1455 |
|
1456 |
for u and p. The problem is solved with an inexact Uszawa scheme for p: |
1457 |
|
1458 |
M{Q_f (u^{k+1}-u^{k}) = - f(u^{k},p^{k})} |
1459 |
M{Q_g (p^{k+1}-p^{k}) = g(u^{k+1})} |
1460 |
|
1461 |
where Q_f is an approximation of the Jacobiean A_f of f with respect to u and Q_f is an approximation of |
1462 |
A_g A_f^{-1} A_g with A_g is the jacobiean of g with respect to p. As a the construction of a 'proper' |
1463 |
Q_g can be difficult, non-linear conjugate gradient method is applied to solve for p, so Q_g plays |
1464 |
in fact the role of a preconditioner. |
1465 |
""" |
1466 |
def __init__(self,verbose=False,*args): |
1467 |
""" |
1468 |
initializes the problem |
1469 |
|
1470 |
@param verbose: switches on the printing out some information |
1471 |
@type verbose: C{bool} |
1472 |
@note: this method may be overwritten by a particular saddle point problem |
1473 |
""" |
1474 |
if not isinstance(verbose,bool): |
1475 |
raise TypeError("verbose needs to be of type bool.") |
1476 |
self.__verbose=verbose |
1477 |
self.relaxation=1. |
1478 |
|
1479 |
def trace(self,text): |
1480 |
""" |
1481 |
prints text if verbose has been set |
1482 |
|
1483 |
@param text: a text message |
1484 |
@type text: C{str} |
1485 |
""" |
1486 |
if self.__verbose: print "#s: #s"%(str(self),text) |
1487 |
|
1488 |
def solve_f(self,u,p,tol=1.e-8): |
1489 |
""" |
1490 |
solves |
1491 |
|
1492 |
A_f du = f(u,p) |
1493 |
|
1494 |
with tolerance C{tol} and return du. A_f is Jacobiean of f with respect to u. |
1495 |
|
1496 |
@param u: current approximation of u |
1497 |
@type u: L{escript.Data} |
1498 |
@param p: current approximation of p |
1499 |
@type p: L{escript.Data} |
1500 |
@param tol: tolerance expected for du |
1501 |
@type tol: C{float} |
1502 |
@return: increment du |
1503 |
@rtype: L{escript.Data} |
1504 |
@note: this method has to be overwritten by a particular saddle point problem |
1505 |
""" |
1506 |
pass |
1507 |
|
1508 |
def solve_g(self,u,tol=1.e-8): |
1509 |
""" |
1510 |
solves |
1511 |
|
1512 |
Q_g dp = g(u) |
1513 |
|
1514 |
with Q_g is a preconditioner for A_g A_f^{-1} A_g with A_g is the jacobiean of g with respect to p. |
1515 |
|
1516 |
@param u: current approximation of u |
1517 |
@type u: L{escript.Data} |
1518 |
@param tol: tolerance expected for dp |
1519 |
@type tol: C{float} |
1520 |
@return: increment dp |
1521 |
@rtype: L{escript.Data} |
1522 |
@note: this method has to be overwritten by a particular saddle point problem |
1523 |
""" |
1524 |
pass |
1525 |
|
1526 |
def inner(self,p0,p1): |
1527 |
""" |
1528 |
inner product of p0 and p1 approximating p. Typically this returns integrate(p0*p1) |
1529 |
@return: inner product of p0 and p1 |
1530 |
@rtype: C{float} |
1531 |
""" |
1532 |
pass |
1533 |
|
1534 |
subiter_max=3 |
1535 |
def solve(self,u0,p0,tolerance=1.e-6,tolerance_u=None,iter_max=100,accepted_reduction=0.995,relaxation=None): |
1536 |
""" |
1537 |
runs the solver |
1538 |
|
1539 |
@param u0: initial guess for C{u} |
1540 |
@type u0: L{esys.escript.Data} |
1541 |
@param p0: initial guess for C{p} |
1542 |
@type p0: L{esys.escript.Data} |
1543 |
@param tolerance: tolerance for relative error in C{u} and C{p} |
1544 |
@type tolerance: positive C{float} |
1545 |
@param tolerance_u: tolerance for relative error in C{u} if different from C{tolerance} |
1546 |
@type tolerance_u: positive C{float} |
1547 |
@param iter_max: maximum number of iteration steps. |
1548 |
@type iter_max: C{int} |
1549 |
@param accepted_reduction: if the norm g cannot be reduced by C{accepted_reduction} backtracking to adapt the |
1550 |
relaxation factor. If C{accepted_reduction=None} no backtracking is used. |
1551 |
@type accepted_reduction: positive C{float} or C{None} |
1552 |
@param relaxation: initial relaxation factor. If C{relaxation==None}, the last relaxation factor is used. |
1553 |
@type relaxation: C{float} or C{None} |
1554 |
""" |
1555 |
tol=1.e-2 |
1556 |
if tolerance_u==None: tolerance_u=tolerance |
1557 |
if not relaxation==None: self.relaxation=relaxation |
1558 |
if accepted_reduction ==None: |
1559 |
angle_limit=0. |
1560 |
elif accepted_reduction>=1.: |
1561 |
angle_limit=0. |
1562 |
else: |
1563 |
angle_limit=util.sqrt(1-accepted_reduction**2) |
1564 |
self.iter=0 |
1565 |
u=u0 |
1566 |
p=p0 |
1567 |
# |
1568 |
# initialize things: |
1569 |
# |
1570 |
converged=False |
1571 |
# |
1572 |
# start loop: |
1573 |
# |
1574 |
# initial search direction is g |
1575 |
# |
1576 |
while not converged : |
1577 |
if self.iter>iter_max: |
1578 |
raise ArithmeticError("no convergence after %s steps."%self.iter) |
1579 |
f_new=self.solve_f(u,p,tol) |
1580 |
norm_f_new = util.Lsup(f_new) |
1581 |
u_new=u-f_new |
1582 |
g_new=self.solve_g(u_new,tol) |
1583 |
self.iter+=1 |
1584 |
norm_g_new = util.sqrt(self.inner(g_new,g_new)) |
1585 |
if norm_f_new==0. and norm_g_new==0.: return u, p |
1586 |
if self.iter>1 and not accepted_reduction==None: |
1587 |
# |
1588 |
# did we manage to reduce the norm of G? I |
1589 |
# if not we start a backtracking procedure |
1590 |
# |
1591 |
# print "new/old norm = ",norm_g_new, norm_g, norm_g_new/norm_g |
1592 |
if norm_g_new > accepted_reduction * norm_g: |
1593 |
sub_iter=0 |
1594 |
s=self.relaxation |
1595 |
d=g |
1596 |
g_last=g |
1597 |
self.trace(" start substepping: f = %s, g = %s, relaxation = %s."%(norm_f_new, norm_g_new, s)) |
1598 |
while sub_iter < self.subiter_max and norm_g_new > accepted_reduction * norm_g: |
1599 |
dg= g_new-g_last |
1600 |
norm_dg=abs(util.sqrt(self.inner(dg,dg))/self.relaxation) |
1601 |
rad=self.inner(g_new,dg)/self.relaxation |
1602 |
# print " ",sub_iter,": rad, norm_dg:",abs(rad), norm_dg*norm_g_new * angle_limit |
1603 |
# print " ",sub_iter,": rad, norm_dg:",rad, norm_dg, norm_g_new, norm_g |
1604 |
if abs(rad) < norm_dg*norm_g_new * angle_limit: |
1605 |
if sub_iter>0: self.trace(" no further improvements expected from backtracking.") |
1606 |
break |
1607 |
r=self.relaxation |
1608 |
self.relaxation= - rad/norm_dg**2 |
1609 |
s+=self.relaxation |
1610 |
##### |
1611 |
# a=g_new+self.relaxation*dg/r |
1612 |
# print "predicted new norm = ",util.sqrt(self.inner(a,a)),util.sqrt(self.inner(g_new,g_new)), self.relaxation |
1613 |
##### |
1614 |
g_last=g_new |
1615 |
p+=self.relaxation*d |
1616 |
f_new=self.solve_f(u,p,tol) |
1617 |
u_new=u-f_new |
1618 |
g_new=self.solve_g(u_new,tol) |
1619 |
self.iter+=1 |
1620 |
norm_f_new = util.Lsup(f_new) |
1621 |
norm_g_new = util.sqrt(self.inner(g_new,g_new)) |
1622 |
# print " ",sub_iter," new g norm",norm_g_new |
1623 |
self.trace(" %s th sub-step: f = %s, g = %s, relaxation = %s."%(sub_iter, norm_f_new, norm_g_new, s)) |
1624 |
# |
1625 |
# can we expect reduction of g? |
1626 |
# |
1627 |
# u_last=u_new |
1628 |
sub_iter+=1 |
1629 |
self.relaxation=s |
1630 |
# |
1631 |
# check for convergence: |
1632 |
# |
1633 |
norm_u_new = util.Lsup(u_new) |
1634 |
p_new=p+self.relaxation*g_new |
1635 |
norm_p_new = util.sqrt(self.inner(p_new,p_new)) |
1636 |
self.trace("%s th step: f = %s, f/u = %s, g = %s, g/p = %s, relaxation = %s."%(self.iter, norm_f_new ,norm_f_new/norm_u_new, norm_g_new, norm_g_new/norm_p_new, self.relaxation)) |
1637 |
|
1638 |
if self.iter>1: |
1639 |
dg2=g_new-g |
1640 |
df2=f_new-f |
1641 |
norm_dg2=util.sqrt(self.inner(dg2,dg2)) |
1642 |
norm_df2=util.Lsup(df2) |
1643 |
# print norm_g_new, norm_g, norm_dg, norm_p, tolerance |
1644 |
tol_eq_g=tolerance*norm_dg2/(norm_g*abs(self.relaxation))*norm_p_new |
1645 |
tol_eq_f=tolerance_u*norm_df2/norm_f*norm_u_new |
1646 |
if norm_g_new <= tol_eq_g and norm_f_new <= tol_eq_f: |
1647 |
converged=True |
1648 |
f, norm_f, u, norm_u, g, norm_g, p, norm_p = f_new, norm_f_new, u_new, norm_u_new, g_new, norm_g_new, p_new, norm_p_new |
1649 |
self.trace("convergence after %s steps."%self.iter) |
1650 |
return u,p |
1651 |
# def solve(self,u0,p0,tolerance=1.e-6,iter_max=10,self.relaxation=1.): |
1652 |
# tol=1.e-2 |
1653 |
# iter=0 |
1654 |
# converged=False |
1655 |
# u=u0*1. |
1656 |
# p=p0*1. |
1657 |
# while not converged and iter<iter_max: |
1658 |
# du=self.solve_f(u,p,tol) |
1659 |
# u-=du |
1660 |
# norm_du=util.Lsup(du) |
1661 |
# norm_u=util.Lsup(u) |
1662 |
# |
1663 |
# dp=self.relaxation*self.solve_g(u,tol) |
1664 |
# p+=dp |
1665 |
# norm_dp=util.sqrt(self.inner(dp,dp)) |
1666 |
# norm_p=util.sqrt(self.inner(p,p)) |
1667 |
# print iter,"-th step rel. errror u,p= (%s,%s) (%s,%s)(%s,%s)"%(norm_du,norm_dp,norm_du/norm_u,norm_dp/norm_p,norm_u,norm_p) |
1668 |
# iter+=1 |
1669 |
# |
1670 |
# converged = (norm_du <= tolerance*norm_u) and (norm_dp <= tolerance*norm_p) |
1671 |
# if converged: |
1672 |
# print "convergence after %s steps."%iter |
1673 |
# else: |
1674 |
# raise ArithmeticError("no convergence after %s steps."%iter) |
1675 |
# |
1676 |
# return u,p |
1677 |
|
1678 |
def MaskFromBoundaryTag(function_space,*tags): |
1679 |
""" |
1680 |
create a mask on the given function space which one for samples |
1681 |
that touch the boundary tagged by tags. |
1682 |
|
1683 |
usage: m=MaskFromBoundaryTag(Solution(domain),"left", "right") |
1684 |
|
1685 |
@param function_space: a given function space |
1686 |
@type function_space: L{escript.FunctionSpace} |
1687 |
@param tags: boundray tags |
1688 |
@type tags: C{str} |
1689 |
@return: a mask which marks samples used by C{function_space} that are touching the |
1690 |
boundary tagged by any of the given tags. |
1691 |
@rtype: L{escript.Data} of rank 0 |
1692 |
""" |
1693 |
pde=linearPDEs.LinearPDE(function_space.getDomain(),numEquations=1, numSolutions=1) |
1694 |
d=escript.Scalar(0.,escript.FunctionOnBoundary(function_space.getDomain())) |
1695 |
for t in tags: d.setTaggedValue(t,1.) |
1696 |
pde.setValue(y=d) |
1697 |
out=util.whereNonZero(pde.getRightHandSide()) |
1698 |
if out.getFunctionSpace() == function_space: |
1699 |
return out |
1700 |
else: |
1701 |
return util.whereNonZero(util.interpolate(out,function_space)) |
1702 |
|
1703 |
|
1704 |
|