/[escript]/trunk/downunder/py_src/regularizations.py
ViewVC logotype

Contents of /trunk/downunder/py_src/regularizations.py

Parent Directory Parent Directory | Revision Log Revision Log


Revision 4154 - (show annotations)
Tue Jan 22 09:30:23 2013 UTC (6 years, 9 months ago) by jfenwick
File MIME type: text/x-python
File size: 20864 byte(s)
Round 1 of copyright fixes
1
2 ##############################################################################
3 #
4 # Copyright (c) 2003-2013 by University of Queensland
5 # http://www.uq.edu.au
6 #
7 # Primary Business: Queensland, Australia
8 # Licensed under the Open Software License version 3.0
9 # http://www.opensource.org/licenses/osl-3.0.php
10 #
11 # Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12 # Development since 2012 by School of Earth Sciences
13 #
14 ##############################################################################
15
16 __copyright__="""Copyright (c) 2003-2013 by University of Queensland
17 http://www.uq.edu.au
18 Primary Business: Queensland, Australia"""
19 __license__="""Licensed under the Open Software License version 3.0
20 http://www.opensource.org/licenses/osl-3.0.php"""
21 __url__="https://launchpad.net/escript-finley"
22
23 __all__ = ['Regularization']
24
25 from costfunctions import CostFunction
26
27 import numpy as np
28 from esys.escript import ReducedFunction, outer, Data, Scalar, grad, inner, integrate, interpolate, kronecker, boundingBoxEdgeLengths, vol, sqrt, length
29 from esys.escript.linearPDEs import LinearPDE, IllegalCoefficientValue
30 from esys.escript.pdetools import ArithmeticTuple
31
32 class Regularization(CostFunction):
33 """
34 The regularization term for the level set function ``m`` within the cost
35 function J for an inversion:
36
37 *J(m)=1/2 * sum_k imtegrate( mu[k] * ( w0[k] * m_k**2 * w1[k,i] * m_{k,i}**2) + sum_l<k mu_c[l,k] wc[l,k] * | curl(m_k) x curl(m_l) |^2*
38
39 where w0[k], w1[k,i] and wc[k,l] are non-negative weighting factors and
40 mu[k] and mu_c[l,k] are trade-off factors which may be altered
41 during the inversion. The weighting factors are normalized such that their
42 integrals over the domain are constant:
43
44 *integrate(w0[k] + inner(w1[k,:],1/L[:]**2))=scale[k]* volume(domain)*
45 *integrate(wc[l,k]*1/L**4)=scale_c[k]* volume(domain) *
46
47 """
48 def __init__(self, domain, numLevelSets=1,
49 w0=None, w1=None, wc=None,
50 location_of_set_m=Data(),
51 useDiagonalHessianApproximation=False, tol=1e-8,
52 scale=None, scale_c=None):
53 """
54 initialization.
55
56 :param domain: domain
57 :type domain: `Domain`
58 :param numLevelSets: number of level sets
59 :type numLevelSets: ``int``
60 :param w0: weighting factor for the m**2 term. If not set zero is assumed.
61 :type w0: ``Scalar`` if ``numLevelSets`` == 1 or `Data` object of shape
62 (``numLevelSets`` ,) if ``numLevelSets`` > 1
63 :param w1: weighting factor for the grad(m_i) terms. If not set zero is assumed
64 :type w1: ``Vector`` if ``numLevelSets`` == 1 or `Data` object of shape
65 (``numLevelSets`` , DIM) if ``numLevelSets`` > 1
66 :param wc: weighting factor for the cross gradient terms. If not set
67 zero is assumed. Used for the case if ``numLevelSets`` > 1
68 only. Only values ``wc[l,k]`` in the lower triangle (l<k)
69 are used.
70 :type wc: `Data` object of shape (``numLevelSets`` , ``numLevelSets``)
71 :param location_of_set_m: marks location of zero values of the level
72 set function ``m`` by a positive entry.
73 :type location_of_set_m: ``Scalar`` if ``numLevelSets`` == 1 or `Data`
74 object of shape (``numLevelSets`` ,) if ``numLevelSets`` > 1
75 :param useDiagonalHessianApproximation: if True cross gradient terms
76 between level set components are ignored when calculating
77 approximations of the inverse of the Hessian Operator.
78 This can speed-up the calculation of the inverse but may
79 lead to an increase of the number of iteration steps in the
80 inversion.
81 :type useDiagonalHessianApproximation: ``bool``
82 :param tol: tolerance when solving the PDE for the inverse of the
83 Hessian Operator
84 :type tol: positive ``float``
85
86 :param scale: weighting factor for level set function variation terms. If not set one is used.
87 :type scale: ``Scalar`` if ``numLevelSets`` == 1 or `Data` object of shape
88 (``numLevelSets`` ,) if ``numLevelSets`` > 1
89 :param scale_c: scale for the cross gradient terms. If not set
90 one is assumed. Used for the case if ``numLevelSets`` > 1
91 only. Only values ``scale_c[l,k]`` in the lower triangle (l<k)
92 are used.
93 :type scale_c: `Data` object of shape (``numLevelSets`` , ``numLevelSets``)
94
95
96 """
97 if w0 == None and w1==None:
98 raise ValueError("Values for w0 or for w1 must be given.")
99 if wc == None and numLevelSets>1:
100 raise ValueError("Values for wc must be given.")
101
102 self.__domain=domain
103 DIM=self.__domain.getDim()
104 self.__numLevelSets=numLevelSets
105
106 self.__pde=LinearPDE(self.__domain, numEquations=self.__numLevelSets)
107 self.__pde.getSolverOptions().setTolerance(tol)
108 self.__pde.setSymmetryOn()
109 try:
110 self.__pde.setValue(q=location_of_set_m)
111 except IllegalCoefficientValue:
112 raise ValueError("Unable to set location of fixed level set function.")
113
114 # =========== check the shape of the scales: =================================
115 if scale is None:
116 if numLevelSets == 1 :
117 scale = 1.
118 else:
119 scale = np.ones((numLevelSets,))
120 else:
121 scale=np.asarray(scale)
122 if numLevelSets == 1 :
123 if scale.shape == ():
124 if not scale > 0 :
125 raise ValueError("Value for scale must be positive.")
126 else:
127 raise ValueError("Unexpected shape %s for scale."%scale.shape)
128 else:
129 if scale.shape is (numLevelSets,):
130 if not min(scale) > 0:
131 raise ValueError("All value for scale must be positive.")
132 else:
133 raise ValueError("Unexpected shape %s for scale."%scale.shape)
134
135 if scale_c is None or numLevelSets < 2:
136 scale_c = np.ones((numLevelSets,numLevelSets))
137 else:
138 scale_c=np.asarray(scale_c)
139 if scale_c.shape == (numLevelSets,numLevelSets):
140 if not all( [ [ scale_c[l,k] > 0. for l in xrange(k) ] for k in xrange(1,numLevelSets) ]):
141 raise ValueError("All values in the lower triangle of scale_c must be positive.")
142 else:
143 raise ValueError("Unexpected shape %s for scale."%scale_c.shape)
144 # ===== check the shape of the weights: ============================================
145 if w0 is not None:
146 w0 = interpolate(w0,self.__pde.getFunctionSpaceForCoefficient('D'))
147 s0=w0.getShape()
148 if numLevelSets == 1 :
149 if not s0 == () :
150 raise ValueError("Unexpected shape %s for weight w0."%s0)
151 else:
152 if not s0 == (numLevelSets,):
153 raise ValueError("Unexpected shape %s for weight w0."%s0)
154 if not w1 is None:
155 w1 = interpolate(w1,self.__pde.getFunctionSpaceForCoefficient('A'))
156 s1=w1.getShape()
157 if numLevelSets is 1 :
158 if not s1 == (DIM,) :
159 raise ValueError("Unexpected shape %s for weight w1."%s1)
160 else:
161 if not s1 == (numLevelSets,DIM):
162 raise ValueError("Unexpected shape %s for weight w1."%s1)
163 if numLevelSets == 1 :
164 wc=None
165 else:
166 wc = interpolate(wc,self.__pde.getFunctionSpaceForCoefficient('A'))
167 sc=wc.getShape()
168 if not sc == (numLevelSets, numLevelSets):
169 raise ValueError("Unexpected shape %s for weight wc."%(sc,))
170 # ============= now we rescale weights: ======================================
171 L2s=np.asarray(boundingBoxEdgeLengths(domain))**2
172 L4=1/np.sum(1/L2s)**2
173 if numLevelSets == 1 :
174 A=0
175 if w0 is not None:
176 A = integrate(w0)
177 if w1 is not None:
178 A += integrate(inner(w1, 1/L2s))
179 if A > 0:
180 f = scale/A
181 if w0 is not None:
182 w0*=f
183 if w1 is not None:
184 w1*=f
185 else:
186 raise ValueError("Non-positive weighting factor detected.")
187 else:
188
189 for k in xrange(numLevelSets):
190 A=0
191 if w0 is not None:
192 A = integrate(w0[k])
193 if w1 is not None:
194 A += integrate(inner(w1[k,:], 1/L2s))
195 if A > 0:
196 f = scale[k]/A
197 if w0 is not None:
198 w0[k]*=f
199 if w1 is not None:
200 w1[k,:]*=f
201 else:
202 raise ValueError("Non-positive weighting factor for level set component %d detected."%k)
203
204 # and now the cross-gradient:
205 if wc is not None:
206 for l in xrange(k):
207 A = integrate(wc[l,k])/L4
208 if A > 0:
209 f = scale_c[l,k]/A
210 wc[l,k]*=f
211 # else:
212 # raise ValueError("Non-positive weighting factor for cross-gradient level set components %d and %d detected."%(l,k))
213
214 self.__w0=w0
215 self.__w1=w1
216 self.__wc=wc
217
218 self.__pde_is_set=False
219 if self.__numLevelSets > 1:
220 self.__useDiagonalHessianApproximation=useDiagonalHessianApproximation
221 else:
222 self.__useDiagonalHessianApproximation=True
223 self._update_Hessian=True
224
225 self.__num_tradeoff_factors=numLevelSets+((numLevelSets-1)*numLevelSets)/2
226 self.setTradeOffFactors()
227 self.__vol_d=vol(self.__domain)
228
229 def getDomain(self):
230 """
231 returns the domain of the regularization term
232
233 :rtype: ``Domain``
234 """
235 return self.__domain
236
237 def getNumLevelSets(self):
238 """
239 returns the number of level set functions
240
241 :rtype: ``int``
242 """
243 return self.__numLevelSets
244
245 def getPDE(self):
246 """
247 returns the linear PDE to be solved for the Hessian Operator inverse
248
249 :rtype: `LinearPDE`
250 """
251 return self.__pde
252
253 def getDualProduct(self, m, r):
254 """
255 returns the dual product of a gradient represented by X=r[1] and Y=r[0]
256 with a level set function m:
257
258 *Y_i*m_i + X_ij*m_{i,j}*
259
260 :type m: `Data`
261 :type r: `ArithmeticTuple`
262 :rtype: ``float``
263 """
264 A=0
265 if not r[0].isEmpty(): A+=integrate(inner(r[0], m))
266 if not r[1].isEmpty(): A+=integrate(inner(r[1], grad(m)))
267 return A
268 def getNumTradeOffFactors(self):
269 """
270 returns the number of trade-off factors being used.
271
272 :rtype: ``int``
273 """
274 return self.__num_tradeoff_factors
275
276 def setTradeOffFactors(self, mu=None):
277 """
278 sets the trade-off factors for the level-set variation and the cross-gradient
279
280 :param mu: new values for the trade-off factors where values mu[:numLevelSets] are the
281 trade-off factors for the level-set variation and the remaining values for
282 the cross-gradient part with mu_c[l,k]=mu[numLevelSets+l+((k-1)*k)/2] (l<k).
283 If no values for mu is given ones are used. Values must be positive.
284 :type mu: ``list`` of ``float`` or ```numpy.array```
285 """
286 numLS=self.getNumLevelSets()
287 numTF=self.getNumTradeOffFactors()
288 if mu is None:
289 mu = np.ones((numTF,))
290 else:
291 mu = np.asarray(mu)
292
293 if mu.shape == (numTF,):
294 self.setTradeOffFactorsForVariation(mu[:numLS])
295 mu_c2=np.zeros((numLS,numLS))
296 for k in xrange(numLS):
297 for l in xrange(k):
298 mu_c2[l,k] = mu[numLS+l+((k-1)*k)/2]
299 self.setTradeOffFactorsForCrossGradient(mu_c2)
300 elif mu.shape == () and numLS ==1:
301 self.setTradeOffFactorsForVariation(mu)
302 else:
303 raise ValueError("Unexpected shape %s for mu."%(mu.shape,))
304
305 def setTradeOffFactorsForVariation(self, mu=None):
306 """
307 sets the trade-off factors for the level-set variation part
308
309 :param mu: new values for the trade-off factors. Values must be positive.
310 :type mu: ``float``, ``list`` of ``float`` or ```numpy.array```
311 """
312 numLS=self.getNumLevelSets()
313 if mu is None:
314 if numLS == 1:
315 mu = 1.
316 else:
317 mu = np.ones((numLS,))
318
319 mu=np.asarray(mu)
320 if numLS == 1:
321 if mu.shape == (1,): mu=mu[0]
322 if mu.shape == ():
323 if mu > 0:
324 self.__mu= mu
325 self._new_mu=True
326 else:
327 raise ValueError("Value for trade-off factor must be positive.")
328 else:
329 raise ValueError("Unexpected shape %s for mu."%mu.shape)
330 else:
331 if mu.shape == (numLS,):
332 if min(mu) > 0:
333 self.__mu= mu
334 self._new_mu=True
335 else:
336 raise ValueError("All value for mu must be positive.")
337 else:
338 raise ValueError("Unexpected shape %s for trade-off factor."%mu.shape)
339
340 def setTradeOffFactorsForCrossGradient(self, mu_c=None):
341 """
342 sets the trade-off factors for the cross-gradient terms
343
344 :param mu_c: new values for the trade-off factors for the cross-gradient
345 terms. Values must be positive. If no value is given ones
346 are used. Onky value mu_c[l,k] for l<k are used.
347 :type mu_c: ``float``, ``list`` of ``float`` or ``numpy.array``
348
349 """
350 numLS=self.getNumLevelSets()
351 if mu_c is None or numLS < 2:
352 self.__mu_c = np.ones((numLS,numLS))
353 if isinstance(mu_c, float) or isinstance(mu_c, int):
354 self.__mu_c = np.zeros((numLS,numLS))
355 self.__mu_c[:,:]=mu_c
356 else:
357 mu_c=np.asarray(mu_c)
358 if mu_c.shape == (numLS,numLS):
359 if not all( [ [ mu_c[l,k] > 0. for l in xrange(k) ] for k in xrange(1,numLS) ]):
360 raise ValueError("All trade-off factors in the lower triangle of mu_c must be positive.")
361 else:
362 self.__mu_c = mu_c
363 self._new_mu=True
364 else:
365 raise ValueError("Unexpected shape %s for mu."%mu_c.shape)
366
367 def getArguments(self, m):
368 """
369 """
370 return ( grad(m),)
371
372
373 def getValue(self, m, grad_m):
374 """
375 returns the value of the cost function J with respect to m.
376
377 :rtype: ``float``
378 """
379 mu=self.__mu
380 mu_c=self.__mu_c
381 DIM=self.getDomain().getDim()
382 numLS=self.getNumLevelSets()
383
384 A=0
385 if self.__w0 is not None:
386 A+=inner(integrate(m**2 * self.__w0), mu)
387
388 if self.__w1 is not None:
389 if numLS == 1:
390 A+=integrate(inner(grad_m**2, self.__w1))*mu
391 else:
392 for k in xrange(numLS):
393 A+=mu[k]*integrate(inner(grad_m[k,:]**2,self.__w1[k,:]))
394
395 if numLS > 1:
396 for k in xrange(numLS):
397 gk=grad_m[k,:]
398 len_gk=length(gk)
399 for l in xrange(k):
400 gl=grad_m[l,:]
401 A+= mu_c[l,k] * integrate( self.__wc[l,k] * ( len_gk * length(gl) )**2 - inner(gk, gl)**2 )
402 return A/2
403
404 def getGradient(self, m, grad_m):
405 """
406 returns the gradient of the cost function J with respect to m.
407 The function returns Y_k=dPsi/dm_k and X_kj=dPsi/dm_kj
408 """
409
410 mu=self.__mu
411 mu_c=self.__mu_c
412 DIM=self.getDomain().getDim()
413 numLS=self.getNumLevelSets()
414
415 print "WARNING: WRONG FUNCTION SPACE"
416
417 grad_m=grad(m, ReducedFunction(m.getDomain()))
418 if self.__w0 is not None:
419 Y = m * self.__w0 * mu
420 else:
421 if numLS == 1:
422 Y = Scalar(0, grad_m.getFunctionSpace())
423 else:
424 Y = Data(0, (numLS,) , grad_m.getFunctionSpace())
425
426 if self.__w1 is not None:
427 X=grad_m*self.__w1
428 if numLS == 1:
429 X=grad_m* self.__w1*mu
430 else:
431 for k in xrange(numLS):
432 X[k,:]*=mu[k]
433 else:
434 X = Data(0, grad_m.getShape(), grad_m.getFunctionSpace())
435
436 # cross gradient terms:
437 if numLS > 1:
438 for k in xrange(numLS):
439 grad_m_k=grad_m[k,:]
440 l2_grad_m_k = length(grad_m_k)**2
441 for l in xrange(k):
442 grad_m_l=grad_m[l,:]
443 l2_grad_m_l = length(grad_m_l)**2
444 grad_m_lk = inner(grad_m_l, grad_m_k)
445 f= mu_c[l,k]* self.__wc[l,k]
446 X[l,:] += f * ( l2_grad_m_l * grad_m_l - grad_m_lk * grad_m_k )
447 X[k,:] += f * ( l2_grad_m_k * grad_m_k - grad_m_lk * grad_m_l )
448
449 return ArithmeticTuple(Y, X)
450
451
452
453 def getInverseHessianApproximation(self, m, r, grad_m):
454 """
455 """
456 if self._new_mu or self._update_Hessian:
457
458 self._new_mu=False
459 self._update_Hessian=False
460 mu=self.__mu
461 mu_c=self.__mu_c
462
463 DIM=self.getDomain().getDim()
464 numLS=self.getNumLevelSets()
465 if self.__w0 is not None:
466 if numLS == 1:
467 D=self.__w0 * mu
468 else:
469 D=self.getPDE().createCoefficient("D")
470 for k in xrange(numLS): D[k,k]=self.__w0[k] * mu[k]
471 self.getPDE().setValue(D=D)
472
473 A=self.getPDE().createCoefficient("A")
474 if self.__w1 is not None:
475 if numLS == 1:
476 for i in xrange(DIM): A[i,i]=self.__w1[i] * mu
477 else:
478 for k in xrange(numLS):
479 for i in xrange(DIM): A[k,i,k,i]=self.__w1[k,i] * mu[k]
480
481 if numLS > 1:
482 for k in xrange(numLS):
483 grad_m_k=grad_m[k,:]
484 l2_grad_m_k = length(grad_m_k)**2
485 o_kk=outer(grad_m_k, grad_m_k)
486 for l in xrange(k):
487 grad_m_l=grad_m[l,:]
488 l2_grad_m_l = length(grad_m_l)**2
489 i_lk = inner(grad_m_l, grad_m_k)
490 o_lk = outer(grad_m_l, grad_m_k)
491 o_kl = outer(grad_m_k, grad_m_l)
492 o_ll=outer(grad_m_l, grad_m_l)
493 f= mu_c[l,k]* self.__wc[l,k]
494
495 A[l,:,l,:] += f * ( l2_grad_m_k * kronecker(DIM) - o_kk )
496 A[l,:,k,:] += f * ( 2 * o_lk - o_kl - i_lk * kronecker(DIM) )
497 A[k,:,l,:] += f * ( 2 * o_kl - o_lk - i_lk * kronecker(DIM) )
498 A[k,:,k,:] += f * ( l2_grad_m_l * kronecker(DIM) - o_ll )
499 self.getPDE().setValue(A=A)
500 #self.getPDE().resetRightHandSideCoefficients()
501 #self.getPDE().setValue(X=r[1])
502 #print "X only: ",self.getPDE().getSolution()
503 #self.getPDE().resetRightHandSideCoefficients()
504 #self.getPDE().setValue(Y=r[0])
505 #print "Y only: ",self.getPDE().getSolution()
506
507 self.getPDE().resetRightHandSideCoefficients()
508 self.getPDE().setValue(X=r[1], Y=r[0])
509 return self.getPDE().getSolution()
510
511 def updateHessian(self):
512 """
513 notify the class to recalculate the Hessian operator
514 """
515 if not self.__useDiagonalHessianApproximation:
516 self._update_Hessian=True
517
518 def getNorm(self, m):
519 """
520 returns the norm of ``m``
521
522 :param m: level set function
523 :type m: `Data`
524 :rtype: ``float``
525 """
526 return sqrt(integrate(length(m)**2)/self.__vol_d)

  ViewVC Help
Powered by ViewVC 1.1.26