/[escript]/trunk/downunder/py_src/regularizations.py
ViewVC logotype

Contents of /trunk/downunder/py_src/regularizations.py

Parent Directory Parent Directory | Revision Log Revision Log


Revision 4238 - (show annotations)
Thu Feb 21 09:58:49 2013 UTC (6 years, 9 months ago) by jfenwick
File MIME type: text/x-python
File size: 20534 byte(s)
fixed some relative imports and uses of xrange
1
2 ##############################################################################
3 #
4 # Copyright (c) 2003-2013 by University of Queensland
5 # http://www.uq.edu.au
6 #
7 # Primary Business: Queensland, Australia
8 # Licensed under the Open Software License version 3.0
9 # http://www.opensource.org/licenses/osl-3.0.php
10 #
11 # Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12 # Development since 2012 by School of Earth Sciences
13 #
14 ##############################################################################
15
16 __copyright__="""Copyright (c) 2003-2013 by University of Queensland
17 http://www.uq.edu.au
18 Primary Business: Queensland, Australia"""
19 __license__="""Licensed under the Open Software License version 3.0
20 http://www.opensource.org/licenses/osl-3.0.php"""
21 __url__="https://launchpad.net/escript-finley"
22
23 __all__ = ['Regularization']
24
25 from .costfunctions import CostFunction
26
27 import numpy as np
28 from esys.escript import Function, outer, Data, Scalar, grad, inner, integrate, interpolate, kronecker, boundingBoxEdgeLengths, vol, sqrt, length
29 from esys.escript.linearPDEs import LinearPDE, IllegalCoefficientValue
30 from esys.escript.pdetools import ArithmeticTuple
31
32 import sys
33 if sys.version_info.major>2:
34 xrange=range
35
36 class Regularization(CostFunction):
37 """
38 The regularization term for the level set function ``m`` within the cost
39 function J for an inversion:
40
41 *J(m)=1/2 * sum_k imtegrate( mu[k] * ( w0[k] * m_k**2 * w1[k,i] * m_{k,i}**2) + sum_l<k mu_c[l,k] wc[l,k] * | curl(m_k) x curl(m_l) |^2*
42
43 where w0[k], w1[k,i] and wc[k,l] are non-negative weighting factors and
44 mu[k] and mu_c[l,k] are trade-off factors which may be altered
45 during the inversion. The weighting factors are normalized such that their
46 integrals over the domain are constant:
47
48 *integrate(w0[k] + inner(w1[k,:],1/L[:]**2))=scale[k]* volume(domain)*
49 *integrate(wc[l,k]*1/L**4)=scale_c[k]* volume(domain) *
50
51 """
52 def __init__(self, domain, numLevelSets=1,
53 w0=None, w1=None, wc=None,
54 location_of_set_m=Data(),
55 useDiagonalHessianApproximation=False, tol=1e-8,
56 scale=None, scale_c=None):
57 """
58 initialization.
59
60 :param domain: domain
61 :type domain: `Domain`
62 :param numLevelSets: number of level sets
63 :type numLevelSets: ``int``
64 :param w0: weighting factor for the m**2 term. If not set zero is assumed.
65 :type w0: ``Scalar`` if ``numLevelSets`` == 1 or `Data` object of shape
66 (``numLevelSets`` ,) if ``numLevelSets`` > 1
67 :param w1: weighting factor for the grad(m_i) terms. If not set zero is assumed
68 :type w1: ``Vector`` if ``numLevelSets`` == 1 or `Data` object of shape
69 (``numLevelSets`` , DIM) if ``numLevelSets`` > 1
70 :param wc: weighting factor for the cross gradient terms. If not set
71 zero is assumed. Used for the case if ``numLevelSets`` > 1
72 only. Only values ``wc[l,k]`` in the lower triangle (l<k)
73 are used.
74 :type wc: `Data` object of shape (``numLevelSets`` , ``numLevelSets``)
75 :param location_of_set_m: marks location of zero values of the level
76 set function ``m`` by a positive entry.
77 :type location_of_set_m: ``Scalar`` if ``numLevelSets`` == 1 or `Data`
78 object of shape (``numLevelSets`` ,) if ``numLevelSets`` > 1
79 :param useDiagonalHessianApproximation: if True cross gradient terms
80 between level set components are ignored when calculating
81 approximations of the inverse of the Hessian Operator.
82 This can speed-up the calculation of the inverse but may
83 lead to an increase of the number of iteration steps in the
84 inversion.
85 :type useDiagonalHessianApproximation: ``bool``
86 :param tol: tolerance when solving the PDE for the inverse of the
87 Hessian Operator
88 :type tol: positive ``float``
89
90 :param scale: weighting factor for level set function variation terms. If not set one is used.
91 :type scale: ``Scalar`` if ``numLevelSets`` == 1 or `Data` object of shape
92 (``numLevelSets`` ,) if ``numLevelSets`` > 1
93 :param scale_c: scale for the cross gradient terms. If not set
94 one is assumed. Used for the case if ``numLevelSets`` > 1
95 only. Only values ``scale_c[l,k]`` in the lower triangle (l<k)
96 are used.
97 :type scale_c: `Data` object of shape (``numLevelSets`` , ``numLevelSets``)
98
99 """
100 if w0 == None and w1==None:
101 raise ValueError("Values for w0 or for w1 must be given.")
102 if wc == None and numLevelSets>1:
103 raise ValueError("Values for wc must be given.")
104
105 self.__domain=domain
106 DIM=self.__domain.getDim()
107 self.__numLevelSets=numLevelSets
108
109 self.__pde=LinearPDE(self.__domain, numEquations=self.__numLevelSets)
110 self.__pde.getSolverOptions().setTolerance(tol)
111 self.__pde.setSymmetryOn()
112 try:
113 self.__pde.setValue(q=location_of_set_m)
114 except IllegalCoefficientValue:
115 raise ValueError("Unable to set location of fixed level set function.")
116
117 # =========== check the shape of the scales: ========================
118 if scale is None:
119 if numLevelSets == 1 :
120 scale = 1.
121 else:
122 scale = np.ones((numLevelSets,))
123 else:
124 scale=np.asarray(scale)
125 if numLevelSets == 1 :
126 if scale.shape == ():
127 if not scale > 0 :
128 raise ValueError("Value for scale must be positive.")
129 else:
130 raise ValueError("Unexpected shape %s for scale."%scale.shape)
131 else:
132 if scale.shape is (numLevelSets,):
133 if not min(scale) > 0:
134 raise ValueError("All value for scale must be positive.")
135 else:
136 raise ValueError("Unexpected shape %s for scale."%scale.shape)
137
138 if scale_c is None or numLevelSets < 2:
139 scale_c = np.ones((numLevelSets,numLevelSets))
140 else:
141 scale_c=np.asarray(scale_c)
142 if scale_c.shape == (numLevelSets,numLevelSets):
143 if not all( [ [ scale_c[l,k] > 0. for l in xrange(k) ] for k in xrange(1,numLevelSets) ]):
144 raise ValueError("All values in the lower triangle of scale_c must be positive.")
145 else:
146 raise ValueError("Unexpected shape %s for scale."%scale_c.shape)
147 # ===== check the shape of the weights: =============================
148 if w0 is not None:
149 w0 = interpolate(w0,self.__pde.getFunctionSpaceForCoefficient('D'))
150 s0=w0.getShape()
151 if numLevelSets == 1 :
152 if not s0 == () :
153 raise ValueError("Unexpected shape %s for weight w0."%s0)
154 else:
155 if not s0 == (numLevelSets,):
156 raise ValueError("Unexpected shape %s for weight w0."%s0)
157
158 if not w1 is None:
159 w1 = interpolate(w1,self.__pde.getFunctionSpaceForCoefficient('A'))
160 s1=w1.getShape()
161 if numLevelSets is 1 :
162 if not s1 == (DIM,) :
163 raise ValueError("Unexpected shape %s for weight w1."%s1)
164 else:
165 if not s1 == (numLevelSets,DIM):
166 raise ValueError("Unexpected shape %s for weight w1."%s1)
167
168 if numLevelSets == 1:
169 wc=None
170 else:
171 wc = interpolate(wc,self.__pde.getFunctionSpaceForCoefficient('A'))
172 sc=wc.getShape()
173 if not sc == (numLevelSets, numLevelSets):
174 raise ValueError("Unexpected shape %s for weight wc."%(sc,))
175 # ============= now we rescale weights: =============================
176 L2s=np.asarray(boundingBoxEdgeLengths(domain))**2
177 L4=1/np.sum(1/L2s)**2
178 if numLevelSets == 1:
179 A=0
180 if w0 is not None:
181 A = integrate(w0)
182 if w1 is not None:
183 A += integrate(inner(w1, 1/L2s))
184 if A > 0:
185 f = scale/A
186 if w0 is not None:
187 w0*=f
188 if w1 is not None:
189 w1*=f
190 else:
191 raise ValueError("Non-positive weighting factor detected.")
192 else:
193
194 for k in xrange(numLevelSets):
195 A=0
196 if w0 is not None:
197 A = integrate(w0[k])
198 if w1 is not None:
199 A += integrate(inner(w1[k,:], 1/L2s))
200 if A > 0:
201 f = scale[k]/A
202 if w0 is not None:
203 w0[k]*=f
204 if w1 is not None:
205 w1[k,:]*=f
206 else:
207 raise ValueError("Non-positive weighting factor for level set component %d detected."%k)
208
209 # and now the cross-gradient:
210 if wc is not None:
211 for l in xrange(k):
212 A = integrate(wc[l,k])/L4
213 if A > 0:
214 f = scale_c[l,k]/A
215 wc[l,k]*=f
216 # else:
217 # raise ValueError("Non-positive weighting factor for cross-gradient level set components %d and %d detected."%(l,k))
218
219 self.__w0=w0
220 self.__w1=w1
221 self.__wc=wc
222
223 self.__pde_is_set=False
224 if self.__numLevelSets > 1:
225 self.__useDiagonalHessianApproximation=useDiagonalHessianApproximation
226 else:
227 self.__useDiagonalHessianApproximation=True
228 self._update_Hessian=True
229
230 self.__num_tradeoff_factors=numLevelSets+((numLevelSets-1)*numLevelSets)/2
231 self.setTradeOffFactors()
232 self.__vol_d=vol(self.__domain)
233
234 def getDomain(self):
235 """
236 returns the domain of the regularization term
237
238 :rtype: ``Domain``
239 """
240 return self.__domain
241
242 def getNumLevelSets(self):
243 """
244 returns the number of level set functions
245
246 :rtype: ``int``
247 """
248 return self.__numLevelSets
249
250 def getPDE(self):
251 """
252 returns the linear PDE to be solved for the Hessian Operator inverse
253
254 :rtype: `LinearPDE`
255 """
256 return self.__pde
257
258 def getDualProduct(self, m, r):
259 """
260 returns the dual product of a gradient represented by X=r[1] and Y=r[0]
261 with a level set function m:
262
263 *Y_i*m_i + X_ij*m_{i,j}*
264
265 :type m: `Data`
266 :type r: `ArithmeticTuple`
267 :rtype: ``float``
268 """
269 A=0
270 if not r[0].isEmpty(): A+=integrate(inner(r[0], m))
271 if not r[1].isEmpty(): A+=integrate(inner(r[1], grad(m)))
272 return A
273 def getNumTradeOffFactors(self):
274 """
275 returns the number of trade-off factors being used.
276
277 :rtype: ``int``
278 """
279 return self.__num_tradeoff_factors
280
281 def setTradeOffFactors(self, mu=None):
282 """
283 sets the trade-off factors for the level-set variation and the cross-gradient
284
285 :param mu: new values for the trade-off factors where values mu[:numLevelSets] are the
286 trade-off factors for the level-set variation and the remaining values for
287 the cross-gradient part with mu_c[l,k]=mu[numLevelSets+l+((k-1)*k)/2] (l<k).
288 If no values for mu is given ones are used. Values must be positive.
289 :type mu: ``list`` of ``float`` or ```numpy.array```
290 """
291 numLS=self.getNumLevelSets()
292 numTF=self.getNumTradeOffFactors()
293 if mu is None:
294 mu = np.ones((numTF,))
295 else:
296 mu = np.asarray(mu)
297
298 if mu.shape == (numTF,):
299 self.setTradeOffFactorsForVariation(mu[:numLS])
300 mu_c2=np.zeros((numLS,numLS))
301 for k in xrange(numLS):
302 for l in xrange(k):
303 mu_c2[l,k] = mu[numLS+l+((k-1)*k)/2]
304 self.setTradeOffFactorsForCrossGradient(mu_c2)
305 elif mu.shape == () and numLS ==1:
306 self.setTradeOffFactorsForVariation(mu)
307 else:
308 raise ValueError("Unexpected shape %s for mu."%(mu.shape,))
309
310 def setTradeOffFactorsForVariation(self, mu=None):
311 """
312 sets the trade-off factors for the level-set variation part
313
314 :param mu: new values for the trade-off factors. Values must be positive.
315 :type mu: ``float``, ``list`` of ``float`` or ```numpy.array```
316 """
317 numLS=self.getNumLevelSets()
318 if mu is None:
319 if numLS == 1:
320 mu = 1.
321 else:
322 mu = np.ones((numLS,))
323
324 mu=np.asarray(mu)
325 if numLS == 1:
326 if mu.shape == (1,): mu=mu[0]
327 if mu.shape == ():
328 if mu > 0:
329 self.__mu= mu
330 self._new_mu=True
331 else:
332 raise ValueError("Value for trade-off factor must be positive.")
333 else:
334 raise ValueError("Unexpected shape %s for mu."%mu.shape)
335 else:
336 if mu.shape == (numLS,):
337 if min(mu) > 0:
338 self.__mu= mu
339 self._new_mu=True
340 else:
341 raise ValueError("All value for mu must be positive.")
342 else:
343 raise ValueError("Unexpected shape %s for trade-off factor."%mu.shape)
344
345 def setTradeOffFactorsForCrossGradient(self, mu_c=None):
346 """
347 sets the trade-off factors for the cross-gradient terms
348
349 :param mu_c: new values for the trade-off factors for the cross-gradient
350 terms. Values must be positive. If no value is given ones
351 are used. Onky value mu_c[l,k] for l<k are used.
352 :type mu_c: ``float``, ``list`` of ``float`` or ``numpy.array``
353 """
354 numLS=self.getNumLevelSets()
355 if mu_c is None or numLS < 2:
356 self.__mu_c = np.ones((numLS,numLS))
357 if isinstance(mu_c, float) or isinstance(mu_c, int):
358 self.__mu_c = np.zeros((numLS,numLS))
359 self.__mu_c[:,:]=mu_c
360 else:
361 mu_c=np.asarray(mu_c)
362 if mu_c.shape == (numLS,numLS):
363 if not all( [ [ mu_c[l,k] > 0. for l in xrange(k) ] for k in xrange(1,numLS) ]):
364 raise ValueError("All trade-off factors in the lower triangle of mu_c must be positive.")
365 else:
366 self.__mu_c = mu_c
367 self._new_mu=True
368 else:
369 raise ValueError("Unexpected shape %s for mu."%mu_c.shape)
370
371 def getArguments(self, m):
372 """
373 """
374 return ( grad(m),)
375
376 def getValue(self, m, grad_m):
377 """
378 returns the value of the cost function J with respect to m.
379
380 :rtype: ``float``
381 """
382 mu=self.__mu
383 mu_c=self.__mu_c
384 DIM=self.getDomain().getDim()
385 numLS=self.getNumLevelSets()
386
387 A=0
388 if self.__w0 is not None:
389 A+=inner(integrate(m**2 * self.__w0), mu)
390
391 if self.__w1 is not None:
392 if numLS == 1:
393 A+=integrate(inner(grad_m**2, self.__w1))*mu
394 else:
395 for k in xrange(numLS):
396 A+=mu[k]*integrate(inner(grad_m[k,:]**2,self.__w1[k,:]))
397
398 if numLS > 1:
399 for k in xrange(numLS):
400 gk=grad_m[k,:]
401 len_gk=length(gk)
402 for l in xrange(k):
403 gl=grad_m[l,:]
404 A+= mu_c[l,k] * integrate( self.__wc[l,k] * ( len_gk * length(gl) )**2 - inner(gk, gl)**2 )
405 return A/2
406
407 def getGradient(self, m, grad_m):
408 """
409 returns the gradient of the cost function J with respect to m.
410 The function returns Y_k=dPsi/dm_k and X_kj=dPsi/dm_kj
411 """
412
413 mu=self.__mu
414 mu_c=self.__mu_c
415 DIM=self.getDomain().getDim()
416 numLS=self.getNumLevelSets()
417
418 grad_m=grad(m, Function(m.getDomain()))
419 if self.__w0 is not None:
420 Y = m * self.__w0 * mu
421 else:
422 if numLS == 1:
423 Y = Scalar(0, grad_m.getFunctionSpace())
424 else:
425 Y = Data(0, (numLS,) , grad_m.getFunctionSpace())
426
427 if self.__w1 is not None:
428 X=grad_m*self.__w1
429 if numLS == 1:
430 X=grad_m* self.__w1*mu
431 else:
432 for k in xrange(numLS):
433 X[k,:]*=mu[k]
434 else:
435 X = Data(0, grad_m.getShape(), grad_m.getFunctionSpace())
436
437 # cross gradient terms:
438 if numLS > 1:
439 for k in xrange(numLS):
440 grad_m_k=grad_m[k,:]
441 l2_grad_m_k = length(grad_m_k)**2
442 for l in xrange(k):
443 grad_m_l=grad_m[l,:]
444 l2_grad_m_l = length(grad_m_l)**2
445 grad_m_lk = inner(grad_m_l, grad_m_k)
446 f= mu_c[l,k]* self.__wc[l,k]
447 X[l,:] += f * ( l2_grad_m_l * grad_m_l - grad_m_lk * grad_m_k )
448 X[k,:] += f * ( l2_grad_m_k * grad_m_k - grad_m_lk * grad_m_l )
449
450 return ArithmeticTuple(Y, X)
451
452 def getInverseHessianApproximation(self, m, r, grad_m):
453 """
454 """
455 if self._new_mu or self._update_Hessian:
456
457 self._new_mu=False
458 self._update_Hessian=False
459 mu=self.__mu
460 mu_c=self.__mu_c
461
462 DIM=self.getDomain().getDim()
463 numLS=self.getNumLevelSets()
464 if self.__w0 is not None:
465 if numLS == 1:
466 D=self.__w0 * mu
467 else:
468 D=self.getPDE().createCoefficient("D")
469 for k in xrange(numLS): D[k,k]=self.__w0[k] * mu[k]
470 self.getPDE().setValue(D=D)
471
472 A=self.getPDE().createCoefficient("A")
473 if self.__w1 is not None:
474 if numLS == 1:
475 for i in xrange(DIM): A[i,i]=self.__w1[i] * mu
476 else:
477 for k in xrange(numLS):
478 for i in xrange(DIM): A[k,i,k,i]=self.__w1[k,i] * mu[k]
479
480 if numLS > 1:
481 for k in xrange(numLS):
482 grad_m_k=grad_m[k,:]
483 l2_grad_m_k = length(grad_m_k)**2
484 o_kk=outer(grad_m_k, grad_m_k)
485 for l in xrange(k):
486 grad_m_l=grad_m[l,:]
487 l2_grad_m_l = length(grad_m_l)**2
488 i_lk = inner(grad_m_l, grad_m_k)
489 o_lk = outer(grad_m_l, grad_m_k)
490 o_kl = outer(grad_m_k, grad_m_l)
491 o_ll=outer(grad_m_l, grad_m_l)
492 f= mu_c[l,k]* self.__wc[l,k]
493
494 A[l,:,l,:] += f * ( l2_grad_m_k * kronecker(DIM) - o_kk )
495 A[l,:,k,:] += f * ( 2 * o_lk - o_kl - i_lk * kronecker(DIM) )
496 A[k,:,l,:] += f * ( 2 * o_kl - o_lk - i_lk * kronecker(DIM) )
497 A[k,:,k,:] += f * ( l2_grad_m_l * kronecker(DIM) - o_ll )
498 self.getPDE().setValue(A=A)
499 #self.getPDE().resetRightHandSideCoefficients()
500 #self.getPDE().setValue(X=r[1])
501 #print "X only: ",self.getPDE().getSolution()
502 #self.getPDE().resetRightHandSideCoefficients()
503 #self.getPDE().setValue(Y=r[0])
504 #print "Y only: ",self.getPDE().getSolution()
505
506 self.getPDE().resetRightHandSideCoefficients()
507 self.getPDE().setValue(X=r[1], Y=r[0])
508 return self.getPDE().getSolution()
509
510 def updateHessian(self):
511 """
512 notify the class to recalculate the Hessian operator
513 """
514 if not self.__useDiagonalHessianApproximation:
515 self._update_Hessian=True
516
517 def getNorm(self, m):
518 """
519 returns the norm of ``m``
520
521 :param m: level set function
522 :type m: `Data`
523 :rtype: ``float``
524 """
525 return sqrt(integrate(length(m)**2)/self.__vol_d)
526

  ViewVC Help
Powered by ViewVC 1.1.26