/[escript]/trunk/downunder/py_src/regularizations.py
ViewVC logotype

Contents of /trunk/downunder/py_src/regularizations.py

Parent Directory Parent Directory | Revision Log Revision Log


Revision 4213 - (show annotations)
Tue Feb 19 01:16:29 2013 UTC (6 years, 9 months ago) by caltinay
File MIME type: text/x-python
File size: 20477 byte(s)
Some cleanup and more consistent logging.

1
2 ##############################################################################
3 #
4 # Copyright (c) 2003-2013 by University of Queensland
5 # http://www.uq.edu.au
6 #
7 # Primary Business: Queensland, Australia
8 # Licensed under the Open Software License version 3.0
9 # http://www.opensource.org/licenses/osl-3.0.php
10 #
11 # Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12 # Development since 2012 by School of Earth Sciences
13 #
14 ##############################################################################
15
16 __copyright__="""Copyright (c) 2003-2013 by University of Queensland
17 http://www.uq.edu.au
18 Primary Business: Queensland, Australia"""
19 __license__="""Licensed under the Open Software License version 3.0
20 http://www.opensource.org/licenses/osl-3.0.php"""
21 __url__="https://launchpad.net/escript-finley"
22
23 __all__ = ['Regularization']
24
25 from costfunctions import CostFunction
26
27 import numpy as np
28 from esys.escript import Function, outer, Data, Scalar, grad, inner, integrate, interpolate, kronecker, boundingBoxEdgeLengths, vol, sqrt, length
29 from esys.escript.linearPDEs import LinearPDE, IllegalCoefficientValue
30 from esys.escript.pdetools import ArithmeticTuple
31
32 class Regularization(CostFunction):
33 """
34 The regularization term for the level set function ``m`` within the cost
35 function J for an inversion:
36
37 *J(m)=1/2 * sum_k imtegrate( mu[k] * ( w0[k] * m_k**2 * w1[k,i] * m_{k,i}**2) + sum_l<k mu_c[l,k] wc[l,k] * | curl(m_k) x curl(m_l) |^2*
38
39 where w0[k], w1[k,i] and wc[k,l] are non-negative weighting factors and
40 mu[k] and mu_c[l,k] are trade-off factors which may be altered
41 during the inversion. The weighting factors are normalized such that their
42 integrals over the domain are constant:
43
44 *integrate(w0[k] + inner(w1[k,:],1/L[:]**2))=scale[k]* volume(domain)*
45 *integrate(wc[l,k]*1/L**4)=scale_c[k]* volume(domain) *
46
47 """
48 def __init__(self, domain, numLevelSets=1,
49 w0=None, w1=None, wc=None,
50 location_of_set_m=Data(),
51 useDiagonalHessianApproximation=False, tol=1e-8,
52 scale=None, scale_c=None):
53 """
54 initialization.
55
56 :param domain: domain
57 :type domain: `Domain`
58 :param numLevelSets: number of level sets
59 :type numLevelSets: ``int``
60 :param w0: weighting factor for the m**2 term. If not set zero is assumed.
61 :type w0: ``Scalar`` if ``numLevelSets`` == 1 or `Data` object of shape
62 (``numLevelSets`` ,) if ``numLevelSets`` > 1
63 :param w1: weighting factor for the grad(m_i) terms. If not set zero is assumed
64 :type w1: ``Vector`` if ``numLevelSets`` == 1 or `Data` object of shape
65 (``numLevelSets`` , DIM) if ``numLevelSets`` > 1
66 :param wc: weighting factor for the cross gradient terms. If not set
67 zero is assumed. Used for the case if ``numLevelSets`` > 1
68 only. Only values ``wc[l,k]`` in the lower triangle (l<k)
69 are used.
70 :type wc: `Data` object of shape (``numLevelSets`` , ``numLevelSets``)
71 :param location_of_set_m: marks location of zero values of the level
72 set function ``m`` by a positive entry.
73 :type location_of_set_m: ``Scalar`` if ``numLevelSets`` == 1 or `Data`
74 object of shape (``numLevelSets`` ,) if ``numLevelSets`` > 1
75 :param useDiagonalHessianApproximation: if True cross gradient terms
76 between level set components are ignored when calculating
77 approximations of the inverse of the Hessian Operator.
78 This can speed-up the calculation of the inverse but may
79 lead to an increase of the number of iteration steps in the
80 inversion.
81 :type useDiagonalHessianApproximation: ``bool``
82 :param tol: tolerance when solving the PDE for the inverse of the
83 Hessian Operator
84 :type tol: positive ``float``
85
86 :param scale: weighting factor for level set function variation terms. If not set one is used.
87 :type scale: ``Scalar`` if ``numLevelSets`` == 1 or `Data` object of shape
88 (``numLevelSets`` ,) if ``numLevelSets`` > 1
89 :param scale_c: scale for the cross gradient terms. If not set
90 one is assumed. Used for the case if ``numLevelSets`` > 1
91 only. Only values ``scale_c[l,k]`` in the lower triangle (l<k)
92 are used.
93 :type scale_c: `Data` object of shape (``numLevelSets`` , ``numLevelSets``)
94
95 """
96 if w0 == None and w1==None:
97 raise ValueError("Values for w0 or for w1 must be given.")
98 if wc == None and numLevelSets>1:
99 raise ValueError("Values for wc must be given.")
100
101 self.__domain=domain
102 DIM=self.__domain.getDim()
103 self.__numLevelSets=numLevelSets
104
105 self.__pde=LinearPDE(self.__domain, numEquations=self.__numLevelSets)
106 self.__pde.getSolverOptions().setTolerance(tol)
107 self.__pde.setSymmetryOn()
108 try:
109 self.__pde.setValue(q=location_of_set_m)
110 except IllegalCoefficientValue:
111 raise ValueError("Unable to set location of fixed level set function.")
112
113 # =========== check the shape of the scales: ========================
114 if scale is None:
115 if numLevelSets == 1 :
116 scale = 1.
117 else:
118 scale = np.ones((numLevelSets,))
119 else:
120 scale=np.asarray(scale)
121 if numLevelSets == 1 :
122 if scale.shape == ():
123 if not scale > 0 :
124 raise ValueError("Value for scale must be positive.")
125 else:
126 raise ValueError("Unexpected shape %s for scale."%scale.shape)
127 else:
128 if scale.shape is (numLevelSets,):
129 if not min(scale) > 0:
130 raise ValueError("All value for scale must be positive.")
131 else:
132 raise ValueError("Unexpected shape %s for scale."%scale.shape)
133
134 if scale_c is None or numLevelSets < 2:
135 scale_c = np.ones((numLevelSets,numLevelSets))
136 else:
137 scale_c=np.asarray(scale_c)
138 if scale_c.shape == (numLevelSets,numLevelSets):
139 if not all( [ [ scale_c[l,k] > 0. for l in xrange(k) ] for k in xrange(1,numLevelSets) ]):
140 raise ValueError("All values in the lower triangle of scale_c must be positive.")
141 else:
142 raise ValueError("Unexpected shape %s for scale."%scale_c.shape)
143 # ===== check the shape of the weights: =============================
144 if w0 is not None:
145 w0 = interpolate(w0,self.__pde.getFunctionSpaceForCoefficient('D'))
146 s0=w0.getShape()
147 if numLevelSets == 1 :
148 if not s0 == () :
149 raise ValueError("Unexpected shape %s for weight w0."%s0)
150 else:
151 if not s0 == (numLevelSets,):
152 raise ValueError("Unexpected shape %s for weight w0."%s0)
153
154 if not w1 is None:
155 w1 = interpolate(w1,self.__pde.getFunctionSpaceForCoefficient('A'))
156 s1=w1.getShape()
157 if numLevelSets is 1 :
158 if not s1 == (DIM,) :
159 raise ValueError("Unexpected shape %s for weight w1."%s1)
160 else:
161 if not s1 == (numLevelSets,DIM):
162 raise ValueError("Unexpected shape %s for weight w1."%s1)
163
164 if numLevelSets == 1:
165 wc=None
166 else:
167 wc = interpolate(wc,self.__pde.getFunctionSpaceForCoefficient('A'))
168 sc=wc.getShape()
169 if not sc == (numLevelSets, numLevelSets):
170 raise ValueError("Unexpected shape %s for weight wc."%(sc,))
171 # ============= now we rescale weights: =============================
172 L2s=np.asarray(boundingBoxEdgeLengths(domain))**2
173 L4=1/np.sum(1/L2s)**2
174 if numLevelSets == 1:
175 A=0
176 if w0 is not None:
177 A = integrate(w0)
178 if w1 is not None:
179 A += integrate(inner(w1, 1/L2s))
180 if A > 0:
181 f = scale/A
182 if w0 is not None:
183 w0*=f
184 if w1 is not None:
185 w1*=f
186 else:
187 raise ValueError("Non-positive weighting factor detected.")
188 else:
189
190 for k in xrange(numLevelSets):
191 A=0
192 if w0 is not None:
193 A = integrate(w0[k])
194 if w1 is not None:
195 A += integrate(inner(w1[k,:], 1/L2s))
196 if A > 0:
197 f = scale[k]/A
198 if w0 is not None:
199 w0[k]*=f
200 if w1 is not None:
201 w1[k,:]*=f
202 else:
203 raise ValueError("Non-positive weighting factor for level set component %d detected."%k)
204
205 # and now the cross-gradient:
206 if wc is not None:
207 for l in xrange(k):
208 A = integrate(wc[l,k])/L4
209 if A > 0:
210 f = scale_c[l,k]/A
211 wc[l,k]*=f
212 # else:
213 # raise ValueError("Non-positive weighting factor for cross-gradient level set components %d and %d detected."%(l,k))
214
215 self.__w0=w0
216 self.__w1=w1
217 self.__wc=wc
218
219 self.__pde_is_set=False
220 if self.__numLevelSets > 1:
221 self.__useDiagonalHessianApproximation=useDiagonalHessianApproximation
222 else:
223 self.__useDiagonalHessianApproximation=True
224 self._update_Hessian=True
225
226 self.__num_tradeoff_factors=numLevelSets+((numLevelSets-1)*numLevelSets)/2
227 self.setTradeOffFactors()
228 self.__vol_d=vol(self.__domain)
229
230 def getDomain(self):
231 """
232 returns the domain of the regularization term
233
234 :rtype: ``Domain``
235 """
236 return self.__domain
237
238 def getNumLevelSets(self):
239 """
240 returns the number of level set functions
241
242 :rtype: ``int``
243 """
244 return self.__numLevelSets
245
246 def getPDE(self):
247 """
248 returns the linear PDE to be solved for the Hessian Operator inverse
249
250 :rtype: `LinearPDE`
251 """
252 return self.__pde
253
254 def getDualProduct(self, m, r):
255 """
256 returns the dual product of a gradient represented by X=r[1] and Y=r[0]
257 with a level set function m:
258
259 *Y_i*m_i + X_ij*m_{i,j}*
260
261 :type m: `Data`
262 :type r: `ArithmeticTuple`
263 :rtype: ``float``
264 """
265 A=0
266 if not r[0].isEmpty(): A+=integrate(inner(r[0], m))
267 if not r[1].isEmpty(): A+=integrate(inner(r[1], grad(m)))
268 return A
269 def getNumTradeOffFactors(self):
270 """
271 returns the number of trade-off factors being used.
272
273 :rtype: ``int``
274 """
275 return self.__num_tradeoff_factors
276
277 def setTradeOffFactors(self, mu=None):
278 """
279 sets the trade-off factors for the level-set variation and the cross-gradient
280
281 :param mu: new values for the trade-off factors where values mu[:numLevelSets] are the
282 trade-off factors for the level-set variation and the remaining values for
283 the cross-gradient part with mu_c[l,k]=mu[numLevelSets+l+((k-1)*k)/2] (l<k).
284 If no values for mu is given ones are used. Values must be positive.
285 :type mu: ``list`` of ``float`` or ```numpy.array```
286 """
287 numLS=self.getNumLevelSets()
288 numTF=self.getNumTradeOffFactors()
289 if mu is None:
290 mu = np.ones((numTF,))
291 else:
292 mu = np.asarray(mu)
293
294 if mu.shape == (numTF,):
295 self.setTradeOffFactorsForVariation(mu[:numLS])
296 mu_c2=np.zeros((numLS,numLS))
297 for k in xrange(numLS):
298 for l in xrange(k):
299 mu_c2[l,k] = mu[numLS+l+((k-1)*k)/2]
300 self.setTradeOffFactorsForCrossGradient(mu_c2)
301 elif mu.shape == () and numLS ==1:
302 self.setTradeOffFactorsForVariation(mu)
303 else:
304 raise ValueError("Unexpected shape %s for mu."%(mu.shape,))
305
306 def setTradeOffFactorsForVariation(self, mu=None):
307 """
308 sets the trade-off factors for the level-set variation part
309
310 :param mu: new values for the trade-off factors. Values must be positive.
311 :type mu: ``float``, ``list`` of ``float`` or ```numpy.array```
312 """
313 numLS=self.getNumLevelSets()
314 if mu is None:
315 if numLS == 1:
316 mu = 1.
317 else:
318 mu = np.ones((numLS,))
319
320 mu=np.asarray(mu)
321 if numLS == 1:
322 if mu.shape == (1,): mu=mu[0]
323 if mu.shape == ():
324 if mu > 0:
325 self.__mu= mu
326 self._new_mu=True
327 else:
328 raise ValueError("Value for trade-off factor must be positive.")
329 else:
330 raise ValueError("Unexpected shape %s for mu."%mu.shape)
331 else:
332 if mu.shape == (numLS,):
333 if min(mu) > 0:
334 self.__mu= mu
335 self._new_mu=True
336 else:
337 raise ValueError("All value for mu must be positive.")
338 else:
339 raise ValueError("Unexpected shape %s for trade-off factor."%mu.shape)
340
341 def setTradeOffFactorsForCrossGradient(self, mu_c=None):
342 """
343 sets the trade-off factors for the cross-gradient terms
344
345 :param mu_c: new values for the trade-off factors for the cross-gradient
346 terms. Values must be positive. If no value is given ones
347 are used. Onky value mu_c[l,k] for l<k are used.
348 :type mu_c: ``float``, ``list`` of ``float`` or ``numpy.array``
349 """
350 numLS=self.getNumLevelSets()
351 if mu_c is None or numLS < 2:
352 self.__mu_c = np.ones((numLS,numLS))
353 if isinstance(mu_c, float) or isinstance(mu_c, int):
354 self.__mu_c = np.zeros((numLS,numLS))
355 self.__mu_c[:,:]=mu_c
356 else:
357 mu_c=np.asarray(mu_c)
358 if mu_c.shape == (numLS,numLS):
359 if not all( [ [ mu_c[l,k] > 0. for l in xrange(k) ] for k in xrange(1,numLS) ]):
360 raise ValueError("All trade-off factors in the lower triangle of mu_c must be positive.")
361 else:
362 self.__mu_c = mu_c
363 self._new_mu=True
364 else:
365 raise ValueError("Unexpected shape %s for mu."%mu_c.shape)
366
367 def getArguments(self, m):
368 """
369 """
370 return ( grad(m),)
371
372 def getValue(self, m, grad_m):
373 """
374 returns the value of the cost function J with respect to m.
375
376 :rtype: ``float``
377 """
378 mu=self.__mu
379 mu_c=self.__mu_c
380 DIM=self.getDomain().getDim()
381 numLS=self.getNumLevelSets()
382
383 A=0
384 if self.__w0 is not None:
385 A+=inner(integrate(m**2 * self.__w0), mu)
386
387 if self.__w1 is not None:
388 if numLS == 1:
389 A+=integrate(inner(grad_m**2, self.__w1))*mu
390 else:
391 for k in xrange(numLS):
392 A+=mu[k]*integrate(inner(grad_m[k,:]**2,self.__w1[k,:]))
393
394 if numLS > 1:
395 for k in xrange(numLS):
396 gk=grad_m[k,:]
397 len_gk=length(gk)
398 for l in xrange(k):
399 gl=grad_m[l,:]
400 A+= mu_c[l,k] * integrate( self.__wc[l,k] * ( len_gk * length(gl) )**2 - inner(gk, gl)**2 )
401 return A/2
402
403 def getGradient(self, m, grad_m):
404 """
405 returns the gradient of the cost function J with respect to m.
406 The function returns Y_k=dPsi/dm_k and X_kj=dPsi/dm_kj
407 """
408
409 mu=self.__mu
410 mu_c=self.__mu_c
411 DIM=self.getDomain().getDim()
412 numLS=self.getNumLevelSets()
413
414 grad_m=grad(m, Function(m.getDomain()))
415 if self.__w0 is not None:
416 Y = m * self.__w0 * mu
417 else:
418 if numLS == 1:
419 Y = Scalar(0, grad_m.getFunctionSpace())
420 else:
421 Y = Data(0, (numLS,) , grad_m.getFunctionSpace())
422
423 if self.__w1 is not None:
424 X=grad_m*self.__w1
425 if numLS == 1:
426 X=grad_m* self.__w1*mu
427 else:
428 for k in xrange(numLS):
429 X[k,:]*=mu[k]
430 else:
431 X = Data(0, grad_m.getShape(), grad_m.getFunctionSpace())
432
433 # cross gradient terms:
434 if numLS > 1:
435 for k in xrange(numLS):
436 grad_m_k=grad_m[k,:]
437 l2_grad_m_k = length(grad_m_k)**2
438 for l in xrange(k):
439 grad_m_l=grad_m[l,:]
440 l2_grad_m_l = length(grad_m_l)**2
441 grad_m_lk = inner(grad_m_l, grad_m_k)
442 f= mu_c[l,k]* self.__wc[l,k]
443 X[l,:] += f * ( l2_grad_m_l * grad_m_l - grad_m_lk * grad_m_k )
444 X[k,:] += f * ( l2_grad_m_k * grad_m_k - grad_m_lk * grad_m_l )
445
446 return ArithmeticTuple(Y, X)
447
448 def getInverseHessianApproximation(self, m, r, grad_m):
449 """
450 """
451 if self._new_mu or self._update_Hessian:
452
453 self._new_mu=False
454 self._update_Hessian=False
455 mu=self.__mu
456 mu_c=self.__mu_c
457
458 DIM=self.getDomain().getDim()
459 numLS=self.getNumLevelSets()
460 if self.__w0 is not None:
461 if numLS == 1:
462 D=self.__w0 * mu
463 else:
464 D=self.getPDE().createCoefficient("D")
465 for k in xrange(numLS): D[k,k]=self.__w0[k] * mu[k]
466 self.getPDE().setValue(D=D)
467
468 A=self.getPDE().createCoefficient("A")
469 if self.__w1 is not None:
470 if numLS == 1:
471 for i in xrange(DIM): A[i,i]=self.__w1[i] * mu
472 else:
473 for k in xrange(numLS):
474 for i in xrange(DIM): A[k,i,k,i]=self.__w1[k,i] * mu[k]
475
476 if numLS > 1:
477 for k in xrange(numLS):
478 grad_m_k=grad_m[k,:]
479 l2_grad_m_k = length(grad_m_k)**2
480 o_kk=outer(grad_m_k, grad_m_k)
481 for l in xrange(k):
482 grad_m_l=grad_m[l,:]
483 l2_grad_m_l = length(grad_m_l)**2
484 i_lk = inner(grad_m_l, grad_m_k)
485 o_lk = outer(grad_m_l, grad_m_k)
486 o_kl = outer(grad_m_k, grad_m_l)
487 o_ll=outer(grad_m_l, grad_m_l)
488 f= mu_c[l,k]* self.__wc[l,k]
489
490 A[l,:,l,:] += f * ( l2_grad_m_k * kronecker(DIM) - o_kk )
491 A[l,:,k,:] += f * ( 2 * o_lk - o_kl - i_lk * kronecker(DIM) )
492 A[k,:,l,:] += f * ( 2 * o_kl - o_lk - i_lk * kronecker(DIM) )
493 A[k,:,k,:] += f * ( l2_grad_m_l * kronecker(DIM) - o_ll )
494 self.getPDE().setValue(A=A)
495 #self.getPDE().resetRightHandSideCoefficients()
496 #self.getPDE().setValue(X=r[1])
497 #print "X only: ",self.getPDE().getSolution()
498 #self.getPDE().resetRightHandSideCoefficients()
499 #self.getPDE().setValue(Y=r[0])
500 #print "Y only: ",self.getPDE().getSolution()
501
502 self.getPDE().resetRightHandSideCoefficients()
503 self.getPDE().setValue(X=r[1], Y=r[0])
504 return self.getPDE().getSolution()
505
506 def updateHessian(self):
507 """
508 notify the class to recalculate the Hessian operator
509 """
510 if not self.__useDiagonalHessianApproximation:
511 self._update_Hessian=True
512
513 def getNorm(self, m):
514 """
515 returns the norm of ``m``
516
517 :param m: level set function
518 :type m: `Data`
519 :rtype: ``float``
520 """
521 return sqrt(integrate(length(m)**2)/self.__vol_d)
522

  ViewVC Help
Powered by ViewVC 1.1.26