13 |
# |
# |
14 |
############################################################################## |
############################################################################## |
15 |
|
|
16 |
"""Collection of cost functions for inversion""" |
"""Cost functions for inversions with one or more forward models""" |
17 |
|
|
18 |
__copyright__="""Copyright (c) 2003-2013 by University of Queensland |
__copyright__="""Copyright (c) 2003-2013 by University of Queensland |
19 |
http://www.uq.edu.au |
http://www.uq.edu.au |
24 |
|
|
25 |
__all__ = [ 'InversionCostFunction'] |
__all__ = [ 'InversionCostFunction'] |
26 |
|
|
27 |
from costfunctions import MeteredCostFunction |
from .costfunctions import MeteredCostFunction |
28 |
from mappings import Mapping |
from .mappings import Mapping |
29 |
from forwardmodels import ForwardModel |
from .forwardmodels import ForwardModel |
30 |
from esys.escript.pdetools import ArithmeticTuple |
from esys.escript.pdetools import ArithmeticTuple |
31 |
from esys.escript import Data |
from esys.escript import Data |
32 |
import numpy as np |
import numpy as np |
33 |
|
|
34 |
|
|
35 |
class InversionCostFunction(MeteredCostFunction): |
class InversionCostFunction(MeteredCostFunction): |
36 |
""" |
""" |
37 |
Class to define cost function *J(m)* for inversion with one or more forward model |
Class to define cost function *J(m)* for inversion with one or more |
38 |
based on a multi-valued level set function *m*: |
forward models based on a multi-valued level set function *m*: |
39 |
|
|
40 |
*J(m) = J_reg(m) + sum_f mu_f * J_f(p)* |
*J(m) = J_reg(m) + sum_f mu_f * J_f(p)* |
41 |
|
|
42 |
where *J_reg(m)* is the regularization and cross gradient component of the cost function applied |
where *J_reg(m)* is the regularization and cross gradient component of the |
43 |
to a level set function *m*, *J_f(p)* are the data defect cost function involving a |
cost function applied to a level set function *m*, *J_f(p)* are the data |
44 |
physical forward model using the physical parameter(s) *p* and mu_f is trade-off factors for model f. |
defect cost functions involving a physical forward model using the |
45 |
|
physical parameter(s) *p* and *mu_f* is the trade-off factor for model f. |
46 |
|
|
47 |
A forward model depends on a set of physical parameters *p* which are constructed from |
|
48 |
components of the level set *m* function via mappings. |
A forward model depends on a set of physical parameters *p* which are |
49 |
|
constructed from components of the level set function *m* via mappings. |
50 |
|
|
51 |
Example 1 (single forward model): |
Example 1 (single forward model): |
52 |
m=Mapping() |
m=Mapping() |
53 |
f=ForwardModel() |
f=ForwardModel() |
54 |
J=InversionCostFunction(Regularization(), m, f) |
J=InversionCostFunction(Regularization(), m, f) |
55 |
|
|
56 |
Example 2 (two forward models on a single valued level set) |
Example 2 (two forward models on a single valued level set) |
57 |
m0=Mapping() |
m0=Mapping() |
58 |
m1=Mapping() |
m1=Mapping() |
60 |
f1=ForwardModel() |
f1=ForwardModel() |
61 |
|
|
62 |
J=InversionCostFunction(Regularization(), mappings=[m0, m1], forward_models=[(f0, 0), (f1,1)]) |
J=InversionCostFunction(Regularization(), mappings=[m0, m1], forward_models=[(f0, 0), (f1,1)]) |
63 |
|
|
64 |
Example 2 (two forward models on 2-valued level set) |
Example 2 (two forward models on 2-valued level set) |
65 |
m0=Mapping() |
m0=Mapping() |
66 |
m1=Mapping() |
m1=Mapping() |
67 |
f0=ForwardModel() |
f0=ForwardModel() |
68 |
f1=ForwardModel() |
f1=ForwardModel() |
69 |
|
|
70 |
J=InversionCostFunction(Regularization(numLevelSets=2), mappings=[(m0,0), (m1,0)], forward_models=[(f0, 0), (f1,1)]) |
J=InversionCostFunction(Regularization(numLevelSets=2), mappings=[(m0,0), (m1,0)], forward_models=[(f0, 0), (f1,1)]) |
71 |
|
|
72 |
:var provides_inverse_Hessian_approximation: if true the class provides an |
:cvar provides_inverse_Hessian_approximation: if true the class provides an |
73 |
approximative inverse of the Hessian operator. |
approximative inverse of the Hessian operator. |
74 |
""" |
""" |
75 |
provides_inverse_Hessian_approximation=True |
provides_inverse_Hessian_approximation=True |
76 |
|
|
77 |
def __init__(self, regularization, mappings, forward_models): |
def __init__(self, regularization, mappings, forward_models): |
78 |
""" |
""" |
79 |
constructor for the cost function. |
constructor for the cost function. |
80 |
Stores the supplied object references and sets default weights. |
Stores the supplied object references and sets default weights. |
81 |
|
|
82 |
:param regularization: the regularization part of the cost function |
:param regularization: the regularization part of the cost function |
106 |
""" |
""" |
107 |
super(InversionCostFunction, self).__init__() |
super(InversionCostFunction, self).__init__() |
108 |
self.regularization=regularization |
self.regularization=regularization |
109 |
|
|
110 |
if isinstance(mappings, Mapping): |
if isinstance(mappings, Mapping): |
111 |
self.mappings = [mappings ] |
self.mappings = [mappings ] |
112 |
else: |
else: |
113 |
self.mappings = mappings |
self.mappings = mappings |
114 |
|
|
115 |
if isinstance(forward_models, ForwardModel): |
if isinstance(forward_models, ForwardModel): |
116 |
self.forward_models = [ forward_models ] |
self.forward_models = [ forward_models ] |
117 |
else: |
else: |
118 |
self.forward_models=forward_models |
self.forward_models=forward_models |
119 |
|
|
120 |
|
trafo = regularization.getCoordinateTransformation() |
121 |
|
for m in self.forward_models : |
122 |
|
if not m[0].getCoordinateTransformation() == trafo: |
123 |
|
raise ValueError("Coordinate transformation for regularization and model %m don't match.") |
124 |
|
|
125 |
self.numMappings=len(self.mappings) |
self.numMappings=len(self.mappings) |
126 |
self.numModels=len(self.forward_models) |
self.numModels=len(self.forward_models) |
127 |
self.numLevelSets = self.regularization.getNumLevelSets() |
self.numLevelSets = self.regularization.getNumLevelSets() |
128 |
self.__num_tradeoff_factors = self.regularization.getNumTradeOffFactors() + self.numModels |
self.__num_tradeoff_factors = self.regularization.getNumTradeOffFactors() + self.numModels |
129 |
self.setTradeOffFactorsModels() |
self.setTradeOffFactorsModels() |
130 |
|
|
131 |
def getDomain(self): |
def getDomain(self): |
132 |
""" |
""" |
133 |
returns the domain of the cost function |
returns the domain of the cost function |
134 |
:rtype: 'Domain` |
:rtype: 'Domain` |
135 |
""" |
""" |
136 |
self.regularization.getDomain() |
self.regularization.getDomain() |
137 |
|
|
138 |
def getNumTradeOffFactors(self): |
def getNumTradeOffFactors(self): |
139 |
""" |
""" |
140 |
returns the number of trade-off factors being used including the |
returns the number of trade-off factors being used including the |
154 |
""" |
""" |
155 |
if idx==None: idx=0 |
if idx==None: idx=0 |
156 |
f=self.forward_models[idx] |
f=self.forward_models[idx] |
157 |
if isinstance(f, ForwardModel): |
if isinstance(f, ForwardModel): |
158 |
F=f |
F=f |
159 |
else: |
else: |
160 |
F=f[0] |
F=f[0] |
161 |
return F |
return F |
162 |
|
|
163 |
def getRegularization(self): |
def getRegularization(self): |
164 |
""" |
""" |
165 |
returns the regularization |
returns the regularization |
166 |
|
|
167 |
:rtype: `Regularization` |
:rtype: `Regularization` |
168 |
""" |
""" |
169 |
return self.regularization |
return self.regularization |
170 |
|
|
171 |
|
def setTradeOffFactorsModels(self, mu=None): |
|
def setTradeOffFactorsModels(self,mu=None): |
|
172 |
""" |
""" |
173 |
sets the trade-off factors for the forward model components. |
sets the trade-off factors for the forward model components. |
174 |
|
|
175 |
:param mu: list of the trade-off factors. If not present ones are used. |
:param mu: list of the trade-off factors. If not present ones are used. |
176 |
:type mu: ``float`` in case of a single model or a ``list`` of ``float`` |
:type mu: ``float`` in case of a single model or a ``list`` of |
177 |
with the length of the number of models. |
``float`` with the length of the number of models. |
178 |
""" |
""" |
179 |
if mu==None: |
if mu==None: |
180 |
self.mu_model=np.ones((self.numModels, )) |
self.mu_model=np.ones((self.numModels, )) |
181 |
else: |
else: |
182 |
if self.numModels > 1: |
if self.numModels > 1: |
183 |
mu=np.asarray(mu) |
mu=np.asarray(mu) |
184 |
if min(mu) > 0: |
if min(mu) > 0: |
185 |
self.mu_model= mu |
self.mu_model= mu |
186 |
else: |
else: |
187 |
raise ValueError("All value for trade-off factor mu must be positive.") |
raise ValueError("All values for trade-off factor mu must be positive.") |
188 |
else: |
else: |
189 |
mu=float(mu) |
mu=float(mu) |
190 |
if mu > 0: |
if mu > 0: |
191 |
self.mu_model= [mu, ] |
self.mu_model= [mu, ] |
192 |
else: |
else: |
193 |
raise ValueError("Trade-off factor must be positive.") |
raise ValueError("Trade-off factor must be positive.") |
194 |
|
|
195 |
def getTradeOffFactorsModels(self): |
def getTradeOffFactorsModels(self): |
196 |
""" |
""" |
197 |
returns the trade-off factors for the forward models |
returns the trade-off factors for the forward models |
198 |
|
|
199 |
:rtype: ``float`` or ``list`` of ``float`` |
:rtype: ``float`` or ``list`` of ``float`` |
200 |
""" |
""" |
201 |
if self.numModels>1: |
if self.numModels>1: |
202 |
return self.mu_model |
return self.mu_model |
203 |
else: |
else: |
204 |
return self.mu_model[0] |
return self.mu_model[0] |
205 |
|
|
206 |
def setTradeOffFactorsRegularization(self,mu=None, mu_c=None): |
def setTradeOffFactorsRegularization(self, mu=None, mu_c=None): |
207 |
""" |
""" |
208 |
sets the trade of factors for the regularization component of the cost |
sets the trade-off factors for the regularization component of the |
209 |
function, see `Regularization` for details. |
cost function, see `Regularization` for details. |
210 |
|
|
211 |
:param mu: trade-off factors for the level-set variation part |
:param mu: trade-off factors for the level-set variation part |
212 |
:param mu_c: trade-off factors for the cross gradient variation part |
:param mu_c: trade-off factors for the cross gradient variation part |
213 |
""" |
""" |
214 |
self.regularization.setTradeOffFactorsForVariation(mu) |
self.regularization.setTradeOffFactorsForVariation(mu) |
215 |
self.regularization.setTradeOffFactorsForCrossGradient(mu_c) |
self.regularization.setTradeOffFactorsForCrossGradient(mu_c) |
216 |
|
|
217 |
def setTradeOffFactors(self, mu=None): |
def setTradeOffFactors(self, mu=None): |
218 |
""" |
""" |
219 |
sets the trade-off factors for the forward model and regularization |
sets the trade-off factors for the forward model and regularization |
220 |
terms. |
terms. |
221 |
|
|
222 |
:param mu: list of trade-off factors. |
:param mu: list of trade-off factors. |
223 |
:type mu: ``list`` of ``float`` |
:type mu: ``list`` of ``float`` |
224 |
""" |
""" |
225 |
if mu is None: |
if mu is None: |
226 |
mu=np.ones((self.__num_tradeoff_factors,)) |
mu=np.ones((self.__num_tradeoff_factors,)) |
227 |
self.setTradeOffFactorsModels(mu[:self.numModels]) |
self.setTradeOffFactorsModels(mu[:self.numModels]) |
228 |
self.regularization.setTradeOffFactors(mu[self.numModels:]) |
self.regularization.setTradeOffFactors(mu[self.numModels:]) |
229 |
|
|
230 |
def getTradeOffFactors(self, mu=None): |
def getTradeOffFactors(self, mu=None): |
231 |
""" |
""" |
232 |
returns a list of the trade-off factors |
returns a list of the trade-off factors. |
233 |
|
|
234 |
:rtype: ``list`` of ``float`` |
:rtype: ``list`` of ``float`` |
235 |
""" |
""" |
236 |
mu1=self.getTradeOffFactorsModels(mu[:self.numModels]) |
mu1=self.getTradeOffFactorsModels(mu[:self.numModels]) |
237 |
mu2=self.regularization.getTradeOffFactors() |
mu2=self.regularization.getTradeOffFactors() |
238 |
return [ m for m in mu1] + [ m for m in mu2] |
return [ m for m in mu1] + [ m for m in mu2] |
239 |
|
|
|
|
|
240 |
def createLevelSetFunction(self, *props): |
def createLevelSetFunction(self, *props): |
241 |
""" |
""" |
242 |
returns an instance of an object used to represent a level set function |
returns an instance of an object used to represent a level set function |
243 |
initialized with zeros. Components can be overwritten by physical |
initialized with zeros. Components can be overwritten by physical |
244 |
properties 'props'. If present entries must correspond to the |
properties `props`. If present entries must correspond to the |
245 |
`mappings` arguments in the constructor. Use `None` for properties for |
`mappings` arguments in the constructor. Use ``None`` for properties |
246 |
which no value is given. |
for which no value is given. |
247 |
""" |
""" |
248 |
m=self.regularization.getPDE().createSolution() |
m=self.regularization.getPDE().createSolution() |
249 |
if len(props) > 0: |
if len(props) > 0: |
250 |
for i in xrange(self.numMappings): |
for i in range(self.numMappings): |
251 |
if props[i]: |
if props[i]: |
252 |
mm=self.mappings[i] |
mm=self.mappings[i] |
253 |
if isinstance(mm, Mapping): |
if isinstance(mm, Mapping): |
254 |
m=mm.getInverse(props[i]) |
m=mm.getInverse(props[i]) |
255 |
elif len(mm) == 1: |
elif len(mm) == 1: |
256 |
m=mm[0].getInverse(props[i]) |
m=mm[0].getInverse(props[i]) |
257 |
else: |
else: |
258 |
m[mm[1]]=mm[0].getInverse(props[i]) |
m[mm[1]]=mm[0].getInverse(props[i]) |
259 |
return m |
return m |
260 |
|
|
261 |
def getProperties(self, m, return_list=False): |
def getProperties(self, m, return_list=False): |
262 |
""" |
""" |
263 |
returns a list of the physical properties from a given level set |
returns a list of the physical properties from a given level set |
264 |
function *m* using the mappings of the cost function. |
function *m* using the mappings of the cost function. |
265 |
|
|
266 |
:param m: level set function |
:param m: level set function |
267 |
:type m: `Data` |
:type m: `Data` |
268 |
:param return_list: if True a list is returned. |
:param return_list: if ``True`` a list is returned. |
269 |
:type return_list: `bool` |
:type return_list: ``bool`` |
270 |
:rtype: `list` of `Data` |
:rtype: ``list`` of `Data` |
271 |
""" |
""" |
272 |
props=[] |
props=[] |
273 |
for i in xrange(self.numMappings): |
for i in range(self.numMappings): |
274 |
mm=self.mappings[i] |
mm=self.mappings[i] |
275 |
if isinstance(mm, Mapping): |
if isinstance(mm, Mapping): |
276 |
p=mm.getValue(m) |
p=mm.getValue(m) |
277 |
elif len(mm) == 1: |
elif len(mm) == 1: |
278 |
p=mm[0].getValue(m) |
p=mm[0].getValue(m) |
279 |
else: |
else: |
280 |
p=mm[0].getValue(m[mm[1]]) |
p=mm[0].getValue(m[mm[1]]) |
281 |
props.append(p) |
props.append(p) |
282 |
if self.numMappings > 1 or return_list: |
if self.numMappings > 1 or return_list: |
283 |
return props |
return props |
284 |
else: |
else: |
285 |
return props[0] |
return props[0] |
286 |
|
|
287 |
def _getDualProduct(self, x, r): |
def _getDualProduct(self, x, r): |
288 |
""" |
""" |
289 |
Returns the dual product, see `Regularization.getDualProduct` |
Returns the dual product, see `Regularization.getDualProduct` |
290 |
|
|
291 |
:type x: `Data` |
:type x: `Data` |
292 |
:type r: `ArithmeticTuple` |
:type r: `ArithmeticTuple` |
293 |
:rtype: `float` |
:rtype: ``float`` |
294 |
""" |
""" |
295 |
return self.regularization.getDualProduct(x, r) |
return self.regularization.getDualProduct(x, r) |
296 |
|
|
300 |
*J(m)* and *grad J(m)*. In this implementation returns a tuple with the |
*J(m)* and *grad J(m)*. In this implementation returns a tuple with the |
301 |
mapped value of ``m``, the arguments from the forward model and the |
mapped value of ``m``, the arguments from the forward model and the |
302 |
arguments from the regularization. |
arguments from the regularization. |
303 |
|
|
304 |
:param m: current approximation of the level set function |
:param m: current approximation of the level set function |
305 |
:type m: `Data` |
:type m: `Data` |
306 |
:return: tuple of of values of the parameters, pre-computed values for the forward model and |
:return: tuple of of values of the parameters, pre-computed values |
307 |
pre-computed values for the regularization |
for the forward model and pre-computed values for the |
308 |
:rtype: `tuple` |
regularization |
309 |
|
:rtype: ``tuple`` |
310 |
""" |
""" |
311 |
args_reg=self.regularization.getArguments(m) |
args_reg=self.regularization.getArguments(m) |
312 |
# cache for physical parameters: |
# cache for physical parameters: |
313 |
props=self.getProperties(m, return_list=True) |
props=self.getProperties(m, return_list=True) |
314 |
args_f=[] |
args_f=[] |
315 |
for i in xrange(self.numModels): |
for i in range(self.numModels): |
316 |
f=self.forward_models[i] |
f=self.forward_models[i] |
317 |
if isinstance(f, ForwardModel): |
if isinstance(f, ForwardModel): |
318 |
aa=f.getArguments(props[0]) |
aa=f.getArguments(props[0]) |
319 |
elif len(f) == 1: |
elif len(f) == 1: |
320 |
aa=f[0].getArguments(props[0]) |
aa=f[0].getArguments(props[0]) |
321 |
else: |
else: |
322 |
idx = f[1] |
idx = f[1] |
323 |
f=f[0] |
f=f[0] |
324 |
if isinstance(idx, int): |
if isinstance(idx, int): |
325 |
aa=f.getArguments(props[idx]) |
aa=f.getArguments(props[idx]) |
326 |
else: |
else: |
327 |
pp=tuple( [ props[i] for i in idx] ) |
pp=tuple( [ props[i] for i in idx] ) |
328 |
aa=f.getArguments(*pp) |
aa=f.getArguments(*pp) |
329 |
args_f.append(aa) |
args_f.append(aa) |
330 |
|
|
331 |
return props, args_f, args_reg |
return props, args_f, args_reg |
332 |
|
|
333 |
def _getValue(self, m, *args): |
def _getValue(self, m, *args): |
337 |
|
|
338 |
:param m: current approximation of the level set function |
:param m: current approximation of the level set function |
339 |
:type m: `Data` |
:type m: `Data` |
340 |
:param args: tuple of of values of the parameters, pre-computed values for the forward model and |
:param args: tuple of of values of the parameters, pre-computed values |
341 |
pre-computed values for the regularization |
for the forward model and pre-computed values for the |
342 |
:rtype: `float` |
regularization |
343 |
""" |
:rtype: ``float`` |
344 |
# if there is more than one forward_model and/or regularization their |
""" |
|
# contributions need to be added up. But this implementation allows |
|
|
# only one of each... |
|
345 |
if len(args)==0: |
if len(args)==0: |
346 |
args=self.getArguments(m) |
args=self.getArguments(m) |
347 |
|
|
348 |
props=args[0] |
props=args[0] |
349 |
args_f=args[1] |
args_f=args[1] |
350 |
args_reg=args[2] |
args_reg=args[2] |
351 |
|
|
352 |
J = self.regularization.getValue(m, *args_reg) |
J = self.regularization.getValue(m, *args_reg) |
353 |
print "J_reg = %e"%J |
|
354 |
|
for i in range(self.numModels): |
355 |
for i in xrange(self.numModels): |
f=self.forward_models[i] |
356 |
|
if isinstance(f, ForwardModel): |
357 |
f=self.forward_models[i] |
J_f = f.getValue(props[0],*args_f[i]) |
358 |
if isinstance(f, ForwardModel): |
elif len(f) == 1: |
359 |
J_f = f.getValue(props[0],*args_f[i]) |
J_f=f[0].getValue(props[0],*args_f[i]) |
360 |
elif len(f) == 1: |
else: |
361 |
J_f=f[0].getValue(props[0],*args_f[i]) |
idx = f[1] |
362 |
else: |
f=f[0] |
363 |
idx = f[1] |
if isinstance(idx, int): |
364 |
f=f[0] |
J_f = f.getValue(props[idx],*args_f[i]) |
365 |
if isinstance(idx, int): |
else: |
366 |
J_f = f.getValue(props[idx],*args_f[i]) |
args=tuple( [ props[j] for j in idx] + args_f[i]) |
367 |
else: |
J_f = f.getValue(*args) |
368 |
args=tuple( [ props[j] for j in idx] + args_f[i]) |
self.logger.debug("J_f[%d] = %e"%(i, J_f)) |
369 |
J_f = f.getValue(*args) |
self.logger.debug("mu_model[%d] = %e"%(i, self.mu_model[i])) |
370 |
print "J_f[%d] = %e"%(i, J_f) |
J += self.mu_model[i] * J_f |
371 |
print "mu_model[%d] = %e"%(i, self.mu_model[i]) |
|
372 |
J += self.mu_model[i] * J_f |
return J |
373 |
|
|
374 |
return J |
def getComponentValues(self, m, *args): |
375 |
|
return self._getComponentValues(m, *args) |
376 |
|
|
377 |
|
def _getComponentValues(self, m, *args): |
378 |
|
""" |
379 |
|
returns the values of the individual cost functions that make up *f(x)* |
380 |
|
using the precalculated values for *x*. |
381 |
|
|
382 |
|
:param x: a solution approximation |
383 |
|
:type x: x-type |
384 |
|
:rtype: ``list<<float>>`` |
385 |
|
""" |
386 |
|
if len(args)==0: |
387 |
|
args=self.getArguments(m) |
388 |
|
|
389 |
|
props=args[0] |
390 |
|
args_f=args[1] |
391 |
|
args_reg=args[2] |
392 |
|
|
393 |
|
J_reg = self.regularization.getValue(m, *args_reg) |
394 |
|
result = [J_reg] |
395 |
|
|
396 |
|
for i in range(self.numModels): |
397 |
|
f=self.forward_models[i] |
398 |
|
if isinstance(f, ForwardModel): |
399 |
|
J_f = f.getValue(props[0],*args_f[i]) |
400 |
|
elif len(f) == 1: |
401 |
|
J_f=f[0].getValue(props[0],*args_f[i]) |
402 |
|
else: |
403 |
|
idx = f[1] |
404 |
|
f=f[0] |
405 |
|
if isinstance(idx, int): |
406 |
|
J_f = f.getValue(props[idx],*args_f[i]) |
407 |
|
else: |
408 |
|
args=tuple( [ props[j] for j in idx] + args_f[i]) |
409 |
|
J_f = f.getValue(*args) |
410 |
|
self.logger.debug("J_f[%d] = %e"%(i, J_f)) |
411 |
|
self.logger.debug("mu_model[%d] = %e"%(i, self.mu_model[i])) |
412 |
|
|
413 |
|
result += [J_f] # self.mu_model[i] * ?? |
414 |
|
|
415 |
|
return result |
416 |
|
|
417 |
def _getGradient(self, m, *args): |
def _getGradient(self, m, *args): |
418 |
""" |
""" |
419 |
returns the gradient of the cost function at *m*. |
returns the gradient of the cost function at *m*. |
420 |
If the pre-computed values are not supplied `getArguments()` is called. |
If the pre-computed values are not supplied `getArguments()` is called. |
421 |
|
|
422 |
:param m: current approximation of the level set function |
:param m: current approximation of the level set function |
423 |
:type m: `Data` |
:type m: `Data` |
424 |
:param args: tuple of of values of the parameters, pre-computed values for the forward model and |
:param args: tuple of values of the parameters, pre-computed values |
425 |
pre-computed values for the regularization |
for the forward model and pre-computed values for the |
426 |
|
regularization |
427 |
|
|
428 |
:rtype: `ArithmeticTuple` |
:rtype: `ArithmeticTuple` |
429 |
""" |
""" |
430 |
if len(args)==0: |
if len(args)==0: |
431 |
args = self.getArguments(m) |
args = self.getArguments(m) |
432 |
|
|
433 |
props=args[0] |
props=args[0] |
434 |
args_f=args[1] |
args_f=args[1] |
435 |
args_reg=args[2] |
args_reg=args[2] |
436 |
|
|
437 |
g_J = self.regularization.getGradient(m, *args_reg) |
g_J = self.regularization.getGradient(m, *args_reg) |
438 |
p_diffs=[] |
p_diffs=[] |
439 |
for i in xrange(self.numMappings): |
for i in range(self.numMappings): |
440 |
mm=self.mappings[i] |
mm=self.mappings[i] |
441 |
if isinstance(mm, Mapping): |
if isinstance(mm, Mapping): |
442 |
dpdm = mm.getDerivative(m) |
dpdm = mm.getDerivative(m) |
443 |
elif len(mm) == 1: |
elif len(mm) == 1: |
444 |
dpdm = mm[0].getDerivative(m) |
dpdm = mm[0].getDerivative(m) |
445 |
else: |
else: |
446 |
dpdm = mm[0].getDerivative(m[mm[1]]) |
dpdm = mm[0].getDerivative(m[mm[1]]) |
447 |
p_diffs.append(dpdm) |
p_diffs.append(dpdm) |
|
|
|
|
Y=g_J[0] |
|
|
for i in xrange(self.numModels): |
|
|
mu=self.mu_model[i] |
|
|
f=self.forward_models[i] |
|
|
if isinstance(f, ForwardModel): |
|
|
Ys= f.getGradient(props[0],*args_f[i]) * p_diffs[0] * mu |
|
|
if self.numLevelSets == 1 : |
|
|
Y +=Ys |
|
|
else: |
|
|
Y[0] +=Ys |
|
|
elif len(f) == 1: |
|
|
Ys=f[0].getGradient(props[0],*args_f[i]) * p_diffs[0] * mu |
|
|
if self.numLevelSets == 1 : |
|
|
Y +=Ys |
|
|
else: |
|
|
Y[0] +=Ys |
|
|
else: |
|
|
idx = f[1] |
|
|
f=f[0] |
|
|
if isinstance(idx, int): |
|
|
Ys = f.getGradient(props[idx],*args_f[i]) * p_diffs[idx] * mu |
|
|
if self.numLevelSets == 1 : |
|
|
if idx == 0: |
|
|
Y+=Ys |
|
|
else: |
|
|
raise IndexError("Illegal mapping index.") |
|
|
else: |
|
|
Y[idx] += Ys |
|
|
else: |
|
|
args=tuple( [ props[j] for j in idx] + args_f[i]) |
|
|
Ys = f.getGradient(*args) |
|
|
for ii in xrange(len(idx)): |
|
|
Y[idx[ii]]+=Ys[ii]* p_diffs[idx[ii]] * mu |
|
448 |
|
|
449 |
return g_J |
Y=g_J[0] |
450 |
|
for i in range(self.numModels): |
451 |
|
mu=self.mu_model[i] |
452 |
|
f=self.forward_models[i] |
453 |
|
if isinstance(f, ForwardModel): |
454 |
|
Ys= f.getGradient(props[0],*args_f[i]) * p_diffs[0] * mu |
455 |
|
if self.numLevelSets == 1 : |
456 |
|
Y +=Ys |
457 |
|
else: |
458 |
|
Y[0] +=Ys |
459 |
|
elif len(f) == 1: |
460 |
|
Ys=f[0].getGradient(props[0],*args_f[i]) * p_diffs[0] * mu |
461 |
|
if self.numLevelSets == 1 : |
462 |
|
Y +=Ys |
463 |
|
else: |
464 |
|
Y[0] +=Ys |
465 |
|
else: |
466 |
|
idx = f[1] |
467 |
|
f = f[0] |
468 |
|
if isinstance(idx, int): |
469 |
|
Ys = f.getGradient(props[idx],*args_f[i]) * p_diffs[idx] * mu |
470 |
|
if self.numLevelSets == 1 : |
471 |
|
if idx == 0: |
472 |
|
Y+=Ys |
473 |
|
else: |
474 |
|
raise IndexError("Illegal mapping index.") |
475 |
|
else: |
476 |
|
Y[idx] += Ys |
477 |
|
else: |
478 |
|
args = tuple( [ props[j] for j in idx] + args_f[i]) |
479 |
|
Ys = f.getGradient(*args) |
480 |
|
for ii in range(len(idx)): |
481 |
|
Y[idx[ii]]+=Ys[ii]* p_diffs[idx[ii]] * mu |
482 |
|
|
483 |
|
return g_J |
484 |
|
|
485 |
def _getInverseHessianApproximation(self, m, r, *args): |
def _getInverseHessianApproximation(self, m, r, *args): |
486 |
""" |
""" |
487 |
returns an approximative evaluation *p* of the inverse of the Hessian operator of the cost function |
returns an approximative evaluation *p* of the inverse of the Hessian |
488 |
for a given gradient type *r* at a given location *m*: *H(m) p = r* |
operator of the cost function for a given gradient type *r* at a |
489 |
|
given location *m*: *H(m) p = r* |
490 |
|
|
491 |
:param m: level set approximation where to calculate Hessian inverse |
:param m: level set approximation where to calculate Hessian inverse |
492 |
:type m: `Data` |
:type m: `Data` |
493 |
:param r: a given gradient |
:param r: a given gradient |
494 |
:type r: `ArithmeticTuple` |
:type r: `ArithmeticTuple` |
495 |
:param args: tuple of of values of the parameters, pre-computed values for the forward model and |
:param args: tuple of values of the parameters, pre-computed values |
496 |
pre-computed values for the regularization |
for the forward model and pre-computed values for the |
497 |
|
regularization |
498 |
:rtype: `Data` |
:rtype: `Data` |
499 |
:note: in the current implementation only the regularization term is |
:note: in the current implementation only the regularization term is |
500 |
considered in the inverse Hessian approximation. |
considered in the inverse Hessian approximation. |
|
|
|
501 |
""" |
""" |
502 |
m=self.regularization.getInverseHessianApproximation(m, r, *args[2]) |
m=self.regularization.getInverseHessianApproximation(m, r, *args[2]) |
503 |
return m |
return m |
507 |
notifies the class that the Hessian operator needs to be updated. |
notifies the class that the Hessian operator needs to be updated. |
508 |
""" |
""" |
509 |
self.regularization.updateHessian() |
self.regularization.updateHessian() |
510 |
|
|
511 |
def _getNorm(self, m): |
def _getNorm(self, m): |
512 |
""" |
""" |
513 |
returns the norm of ``m`` |
returns the norm of `m` |
514 |
|
|
515 |
:param m: level set function |
:param m: level set function |
516 |
:type m: `Data` |
:type m: `Data` |
517 |
:rtype: ``float`` |
:rtype: ``float`` |
518 |
""" |
""" |
519 |
return self.regularization.getNorm(m) |
return self.regularization.getNorm(m) |
520 |
|
|