/[escript]/trunk/doc/inversion/CostFunctions.tex
ViewVC logotype

Diff of /trunk/doc/inversion/CostFunctions.tex

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 4141 by caltinay, Tue Jan 15 00:13:28 2013 UTC revision 4142 by gross, Tue Jan 15 09:06:06 2013 UTC
# Line 55  the second pair entry specifies the inde Line 55  the second pair entry specifies the inde
55  In this case the level set function has two components, where the density mapping uses the first component of the level set function  In this case the level set function has two components, where the density mapping uses the first component of the level set function
56  while the susceptibility mapping uses the second component.  while the susceptibility mapping uses the second component.
57    
58  The \class{InversionCostFunction} API is defined as follows:  \section{\class{InversionCostFunction} API}\label{chapter:ref:inversion cost function:api}
59    
60    The \class{InversionCostFunction} implements a \class{CostFunction} class used to run optimization solvers,
61    see section~\ref{chapter:ref:Minimization: costfunction class}.Its API is defined as follows:
62    
63  \begin{classdesc}{InversionCostFunction}{regularization, mappings, forward_models}  \begin{classdesc}{InversionCostFunction}{regularization, mappings, forward_models}
64  Constructor for the inversion cost function. \member{regularization} sets the regularization to be used, see Chapter~\ref{Chp:ref:regularization}.  Constructor for the inversion cost function. \member{regularization} sets the regularization to be used, see Chapter~\ref{Chp:ref:regularization}.
# Line 72  forward model can be assigned to \member Line 75  forward model can be assigned to \member
75    
76    
77  \begin{methoddesc}[InversionCostFunction]{getDomain}{}  \begin{methoddesc}[InversionCostFunction]{getDomain}{}
78          """  returns the \escript domain of the inversion, see~\cite{ESCRIPT}.
         returns the domain of the cost function  
         :rtype: 'Domain`  
         """  
         self.regularization.getDomain()  
79  \end{methoddesc}  \end{methoddesc}
80                    
81  \begin{methoddesc}[InversionCostFunction]{getNumTradeOffFactors}{}  \begin{methoddesc}[InversionCostFunction]{getNumTradeOffFactors}{}
82          """  returns the total number of trade-off factors. The count includes the trade-off factors $\mu^{data}_{f}$ for
83          returns the number of trade-off factors being used including the  the forward models and (hidden) trade-off fatcors in the regularization term, see~definition\ref{REF:EQU:DRIVE:10}.
         trade-off factors used in the regularization component.  
   
         :rtype: ``int``  
         """  
         return self.__num_tradeoff_factors  
 \end{methoddesc}  
   
  \begin{methoddesc}[InversionCostFunction]{getForwardModel}{idx=None}  
         """  
         returns the *idx*-th forward model.  
   
         :param idx: model index. If cost function contains one model only `idx`  
                     can be omitted.  
         :type idx: ``int``  
         """  
         if idx==None: idx=0  
         f=self.forward_models[idx]  
         if isinstance(f, ForwardModel):  
               F=f  
         else:  
               F=f[0]  
         return F  
 \end{methoddesc}  
           
 \begin{methoddesc}[InversionCostFunction]{getRegularization}{}  
         """  
         returns the regularization  
         """  
         return self.regularization  
84  \end{methoddesc}  \end{methoddesc}
85    
86            \begin{methoddesc}[InversionCostFunction]{getForwardModel}{\optional{idx=\None}}
87  \begin{methoddesc}[InversionCostFunction]{setTradeOffFactorsModels}{mu=None}  returns the forward model with index \member{idx}. If the cost function contains
88          """  on model only argument \member{idx} can be omitted.  
         sets the trade-off factors for the forward model components.  
           
         :param mu: list of the trade-off factors. If not present ones are used.  
         :type mu: ``float`` in case of a single model or a ``list`` of ``float``  
                   with the length of the number of models.  
         """  
         if mu==None:  
             self.mu_model=np.ones((self.numModels, ))  
         else:  
             if self.numModels > 1:  
                mu=np.asarray(mu)  
                if min(mu) > 0:  
                   self.mu_model= mu  
                else:  
                   raise ValueError("All value for trade-off factor mu must be positive.")  
             else:  
               mu=float(mu)  
               if mu > 0:  
                   self.mu_model= [mu, ]  
               else:  
                   raise ValueError("Trade-off factor must be positive.")  
 \end{methoddesc}  
             
 \begin{methoddesc}[InversionCostFunction]{setTradeOffFactorsRegularization}{mu=None, mu_c=None}  
         """  
         sets the trade of factors for the regularization component of the cost  
         function, see `Regularization` for details.  
           
         :param mu: trade-off factors for the level-set variation part  
         :param mu_c: trade-off factors for the cross gradient variation part  
         """  
         self.regularization.setTradeOffFactorsForVariation(mu)  
         self.regularization.setTradeOffFactorsForCrossGradient(mu_c)  
89  \end{methoddesc}  \end{methoddesc}
90                    
91  \begin{methoddesc}[InversionCostFunction]{setTradeOffFactors}{mu=None}  \begin{methoddesc}[InversionCostFunction]{getRegularization}{}
92          """  returns the regularization component of the cost function, see \class{regularization} in Chapter~\ref{Chp:ref:regularization}.
         sets the trade-off factors for the forward model and regularization  
         terms.  
   
         :param mu: list of trade-off factors.  
         :type mu: ``list`` of ``float``  
         """  
         if mu is None:  
             mu=np.ones((self.__num_tradeoff_factors,))  
         self.setTradeOffFactorsModels(mu[:self.numModels])  
         self.regularization.setTradeOffFactors(mu[self.numModels:])  
 \end{methoddesc}  
   
 \begin{methoddesc}[InversionCostFunction]{createLevelSetFunction}{*props}  
         """  
         returns an instance of an object used to represent a level set function  
         initialized with zeros. Components can be overwritten by physical  
         properties 'props'. If present entries must correspond to the  
         `mappings` arguments in the constructor. Use `None` for properties for  
         which no value is given.  
         """  
         m=self.regularization.getPDE().createSolution()  
         if len(props) > 0:  
            for i in xrange(self.numMappings):  
               if props[i]:  
                   mm=self.mappings[i]  
                   if isinstance(mm, Mapping):  
                       m=mm.getInverse(props[i])  
                   elif len(mm) == 1:  
                       m=mm[0].getInverse(props[i])  
                   else:  
                       m[mm[1]]=mm[0].getInverse(props[i])  
         return m  
 \end{methoddesc}  
       
 \begin{methoddesc}[InversionCostFunction]{getProperties}{m, return_list=False}  
         """  
         returns a list of the physical properties from a given level set  
         function *m* using the mappings of the cost function.  
           
         :param m: level set function  
         :type m: `Data`  
         :param return_list: if True a list is returned.  
             def _:type return_list: `bool`  
         :rtype: `list` of `Data`  
         """  
         props=[]  
         for i in xrange(self.numMappings):  
            mm=self.mappings[i]  
            if isinstance(mm, Mapping):  
                p=mm.getValue(m)  
            elif len(mm) == 1:  
                p=mm[0].getValue(m)  
            else:  
                p=mm[0].getValue(m[mm[1]])  
            props.append(p)  
         if self.numMappings > 1 or return_list:  
            return props  
         else:  
            return props[0]  
 \end{methoddesc}  
             
 \begin{methoddesc}[InversionCostFunction]{getDualProduct}{x, r}  
         """  
         Returns the dual product, see `Regularization.getDualProduct`  
   
         :type x: `Data`  
         :type r: `ArithmeticTuple`              
         :rtype: `float`  
         """  
         return self.regularization.getDualProduct(x, r)  
93  \end{methoddesc}  \end{methoddesc}
94    
 \begin{methoddesc}[InversionCostFunction]{getArguments}{m}  
         """  
         returns pre-computed values that are shared in the calculation of  
         *J(m)* and *grad J(m)*. In this implementation returns a tuple with the  
         mapped value of ``m``, the arguments from the forward model and the  
         arguments from the regularization.  
95                    
96          :param m: current approximation of the level set function  \begin{methoddesc}[InversionCostFunction]{setTradeOffFactorsModels}{\optional{mu=\None}}
97          :type m: `Data`  sets the trade-off factors  $\mu^{data}_{f}$ for the forward model components.
98          :return: tuple of of values of the parameters, pre-computed values for the forward model and  If a single model is present \member{mu} must be a floating point numbers. Otherwise
99                   pre-computed values for the regularization  \member{mu} must be a list of floating point numbers. It is assumed that all
100          :rtype: `tuple`  numbers are positive. Default values for the trade-off factors is one.
         """  
         args_reg=self.regularization.getArguments(m)  
   
         props=self.getProperties(m, return_list=True)  
         args_f=[]  
         for i in xrange(self.numModels):  
            f=self.forward_models[i]  
            if isinstance(f, ForwardModel):  
               aa=f.getArguments(props[0])  
            elif len(f) == 1:  
               aa=f[0].getArguments(props[0])  
            else:  
               idx = f[1]  
               f=f[0]  
               if isinstance(idx, int):  
                  aa=f.getArguments(props[idx])  
               else:  
                  pp=tuple( [ props[i] for i in idx] )  
                  aa=f.getArguments(*pp)  
            args_f.append(aa)  
             
         return props, args_f, args_reg  
101  \end{methoddesc}  \end{methoddesc}
102    
103  \begin{methoddesc}[InversionCostFunction]{getValue}{m, *args}  \begin{methoddesc}[InversionCostFunction]{getTradeOffFactorsModels}{}
104          """  returns the values of the the trade-off factors  $\mu^{data}_{f}$ for the forward model components.
         Returns the value *J(m)* of the cost function at *m*.  
         If the pre-computed values are not supplied `getArguments()` is called.  
   
         :param m: current approximation of the level set function  
         :type m: `Data`  
         :param args: tuple of of values of the parameters, pre-computed values for the forward model and  
                  pre-computed values for the regularization  
         :rtype: `float`  
         """  
   
         if len(args)==0:  
             args=self.getArguments(m)  
           
         props=args[0]  
         args_f=args[1]  
         args_reg=args[2]  
           
         J = self.regularization.getValue(m, *args_reg)  
         print "J_reg = %e"%J  
                   
         for i in xrange(self.numModels):  
                   
            f=self.forward_models[i]  
            if isinstance(f, ForwardModel):  
               J_f = f.getValue(props[0],*args_f[i])  
            elif len(f) == 1:  
               J_f=f[0].getValue(props[0],*args_f[i])  
            else:  
               idx = f[1]  
               f=f[0]  
               if isinstance(idx, int):  
                  J_f = f.getValue(props[idx],*args_f[i])  
               else:  
                  args=tuple( [ props[j] for j in idx] + args_f[i])  
                  J_f = f.getValue(*args)  
            print "J_f[%d] = %e"%(i, J_f)  
            print "mu_model[%d] = %e"%(i, self.mu_model[i])  
            J += self.mu_model[i] * J_f  
             
         return   J  
105  \end{methoddesc}  \end{methoddesc}
106    
 \begin{methoddesc}[InversionCostFunction]{getGradient}{m, *args}  
         """  
         returns the gradient of the cost function  at *m*.  
         If the pre-computed values are not supplied `getArguments()` is called.  
   
         :param m: current approximation of the level set function  
         :type m: `Data`  
         :param args: tuple of of values of the parameters, pre-computed values for the forward model and  
                  pre-computed values for the regularization  
                   
         :rtype: `ArithmeticTuple`  
         """  
         if len(args)==0:  
             args = self.getArguments(m)  
           
         props=args[0]  
         args_f=args[1]  
         args_reg=args[2]  
           
         g_J = self.regularization.getGradient(m, *args_reg)  
         p_diffs=[]  
         for i in xrange(self.numMappings):  
            mm=self.mappings[i]  
            if isinstance(mm, Mapping):  
                dpdm = mm.getDerivative(m)  
            elif len(mm) == 1:  
                dpdm = mm[0].getDerivative(m)  
            else:  
                dpdm = mm[0].getDerivative(m[mm[1]])  
            p_diffs.append(dpdm)  
107                        
108          Y=g_J[0]    \begin{methoddesc}[InversionCostFunction]{setTradeOffFactorsRegularization}{\optional{mu=\None}, \optional{mu_c=\None}}
109          for i in xrange(self.numModels):  sets the trade of factors for the regularization component of the cost function.
110             mu=self.mu_model[i]  for details. \member{mu} defines the trade-off factors for the level-set variation part and
111             f=self.forward_models[i]  \member{mu_c} sets the trade-off factors for the cross-gradient variation part.
112             if isinstance(f, ForwardModel):  This method is a short-cut for calling \member{setTradeOffFactorsForVariation} and
113                Ys= f.getGradient(props[0],*args_f[i]) * p_diffs[0] * mu  \member{setTradeOffFactorsForCrossGradient} for the underlying the regularization.
114                if self.numLevelSets == 1 :  Please see \class{Regularization} in Chapter~\ref{Chp:ref:regularization} for more details
115                   Y +=Ys  on the arguments \member{mu} and \member{mu_c}.
116                else:  \end{methoddesc}
117                    Y[0] +=Ys          
118             elif len(f) == 1:  \begin{methoddesc}[InversionCostFunction]{setTradeOffFactors}{\optional{mu=\None}}
119                Ys=f[0].getGradient(props[0],*args_f[i]) * p_diffs[0]  * mu  sets the trade-off factors for the forward model and regularization
120                if self.numLevelSets == 1 :  terms. \member{mu} is a list of positive floats. The length of the list
121                   Y +=Ys  is total number of trade-off factors given by the method \method{getNumTradeOffFactors}. The
122                else:  first part of \member{mu} define the trade-off factors  $\mu^{data}_{f}$ for the forward model components
123                    Y[0] +=Ys  while the remaining entries define the trade-off factors for the regularization components of the
124             else:  cost function. By default all values are set to one.
125                idx = f[1]  \end{methoddesc}
126                f=f[0]  
127                if isinstance(idx, int):  \begin{methoddesc}[InversionCostFunction]{getProperties}{m}
128                   Ys = f.getGradient(props[idx],*args_f[i]) * p_diffs[idx] * mu  returns the physical properties from a given level set function \member{m}
129                   if self.numLevelSets == 1 :  using the mappings of the cost function. The physical properties are
130                       if idx == 0:  returned in the order in which they are given in the \member{mappings} argument
131                           Y+=Ys  in the class constructor.
                      else:  
                          raise IndexError("Illegal mapping index.")  
                  else:  
                      Y[idx] += Ys  
               else:  
                  args=tuple( [ props[j] for j in idx] + args_f[i])  
                  Ys = f.getGradient(*args)  
                  for ii in xrange(len(idx)):  
                      Y[idx[ii]]+=Ys[ii]* p_diffs[idx[ii]]  * mu  
   
         return g_J  
132  \end{methoddesc}  \end{methoddesc}
133    
134    \begin{methoddesc}[InversionCostFunction]{createLevelSetFunction}{*props}
135  \begin{methoddesc}[InversionCostFunction]{getInverseHessianApproximation}{m, r, *args}  returns the level set function corresponding to set of given physical properties.
136          """  This method is the inverse of the \method{getProperties} method.
137          returns an approximative evaluation *p* of the inverse of the Hessian operator of the cost function  The arguments \member{props} define a tuple of values for the  physical properties
138          for a given gradient type *r* at a given location *m*: *H(m) p = r*  where the order needs to correspond to the order in which the physical property mappings
139    are given in the \member{mappings} argument
140          :param m: level set approximation where to calculate Hessian inverse  in the class constructor. If a value for a physical property
141          :type m: `Data`  is given as \None the corresponding component of the returned level set function is set to zero.
142          :param r: a given gradient  If no physical properties are given all components of the level set function are set to zero.
         :type r: `ArithmeticTuple`  
         :param args: tuple of of values of the parameters, pre-computed values for the forward model and  
                  pre-computed values for the regularization  
         :rtype: `Data`  
         :note: in the current implementation only the regularization term is  
                considered in the inverse Hessian approximation.  
   
         """  
         m=self.regularization.getInverseHessianApproximation(m, r, *args[2])  
         return m  
   
143  \end{methoddesc}  \end{methoddesc}
144                
145  \begin{methoddesc}[InversionCostFunction]{getNorm}{m}  \begin{methoddesc}[InversionCostFunction]{getNorm}{m}
146          """  returns the norm of a level set function \member{m} as a floating point number.
         returns the norm of ``m``  
   
         :param m: level set function  
         :type m: `Data`  
         :rtype: ``float``  
         """  
   
147  \end{methoddesc}  \end{methoddesc}
148    
149    \begin{methoddesc}[InversionCostFunction]{getArguments}{m}
150    returns pre-computed values for the evaluation of the
151    cost function and its gradient for a given value \member{m}
152    of the level set function. In essence the method collects
153    pre-computed values for the underlying regularization and forward models\footnote{Using pre-computed
154    values can significant speed up the optimization process when the value
155    of the cost function and it gradient for the same same leve set function
156    are needed.}.
157    \end{methoddesc}
158    
159    \begin{methoddesc}[InversionCostFunction]{getValue}{m \optional{, *args}}
160    returns the value of the cost function for a given level set function \member{m}
161    and corresponding pre-computed values \member{args}. If no pre-computed values are present
162    \member{getArguments} is called.
163    \end{methoddesc}
164    
165    \begin{methoddesc}[InversionCostFunction]{getGradient}{m \optional{, *args}}
166    returns the gradient of the cost function  at level set function \member{m}
167    using the corresponding pre-computed values \member{args}. If no pre-computed values are present
168    \member{getArguments} is called. The gradient
169    is represented as a tuple $(Y,X)$ where in essence
170    $Y$ represents the derivative of the cost function kernel with respect to the
171    level set function and $X$ represents thederivative of the cost function kernel with respect to the gradient of the
172    level set function, see Section~\ref{chapter:ref:inversion cost function:gradient} for more details.
173    \end{methoddesc}
174          
175    \begin{methoddesc}[InversionCostFunction]{getDualProduct}{m, g}
176    return the dual product of a level set function \member{m}
177    with a gradient \member{g}, see Section~\ref{chapter:ref:inversion cost function:gradient} for more details.
178    This method uses the dual product of the regularization.
179    \end{methoddesc}
180    
181    \begin{methoddesc}[InversionCostFunction]{getInverseHessianApproximation}{m, g \optional{, *args}}
182    returns an approximative evaluation of the inverse of the Hessian operator of the cost function
183    for a given gradient \member{g} at a given level set function \member{m}
184    using the corresponding pre-computed values \member{args}. If no pre-computed values are present
185    \member{getArguments} is called. In the current implementation
186    contributions to Hessian operator from the forward models are ignored and only contributions
187    from the regularization and cross-gradient term.
188    \end{methoddesc}
189            
190    
191    \section{Gradient calculation}\label{chapter:ref:inversion cost function:gradient}
192    In this section we briefly discuss the calculation of the gradient and the Hessian operator.
193    If $\nabla$ denotes the gradient operator (with respect to the level set function $m$)
194    the gradient of  $J$ is given as
195    \begin{equation}\label{REF:EQU:DRIVE:10b}
196    \nabla J(m) = \nabla J^{reg}(m) + \sum_{f} \mu^{data}_{f} \cdot \nabla J^{f}(p^f) \; .
197    \end{equation}
198    We first focus on the calculation of $\nabla J^{reg}$. In fact the
199    regularization cost function $J^{reg}$ is given through a cost function
200    kernel\index{cost function!kernel} $K^{reg}$ in the form
201    \begin{equation}\label{REF:EQU:INTRO 2a}
202    J^{reg}(m) = \int_{\Omega} K^{reg} \; dx
203    \end{equation}
204    where $K^{reg}$ is a given function of the
205    level set function $m_k$ and its spatial derivative $m_{k,i}$. If $n$ is an increment to the level set function
206    then the directional derivative of $J^{ref}$ in the direction of $n$ is given as
207    \begin{equation}\label{REF:EQU:INTRO 2a}
208    <n, \nabla J^{reg}(m)> = \int_{\Omega} \frac{ \partial K^{reg}}{\partial m_k} n_k + \frac{ \partial K^{reg}}{\partial m_{k,i}} n_{k,i} \; dx
209    \end{equation}
210    where $<.,.>$ denotes the dual product, see Chapter~\ref{chapter:ref:Minimization}. Consequently, the gradient $\nabla J^{reg}$
211    can be represented by a pair of values $Y$ and $X$
212    \begin{equation}\label{ref:EQU:CS:101}
213    \begin{array}{rcl}
214      Y_k & = & \displaystyle{\frac{\partial K^{reg}}{\partial m_k}} \\
215       X_{ki} & = & \displaystyle{\frac{\partial K^{reg}}{\partial m_{k,i}}}
216    \end{array}
217    \end{equation}
218    while the dual product $<.,.>$ of a level set increment $n$ and a gradient increment $g=(Y,X)$ is given as
219    \begin{equation}\label{REF:EQU:INTRO 2a}
220    <n,g> = \int_{\Omega} Y_k n_k + X_{ki} n_{k,i} \; dx
221    \end{equation}
222    We also need to provide (an approximation of) the value $p$ of the inverse of the Hessian operator $\nabla \nabla J$
223    for a given gradient increment $g=(Y,X)$. This means we need to (approximatively) solve the variational problem
224    \begin{equation}\label{REF:EQU:INTRO 2b}
225    <n,\nabla \nabla J p > = \int_{\Omega} Y_k n_k + X_{ki} n_{k,i} \; dx
226    \end{equation}
227    for all increments $n$ of the level set function. If we ignore contributions
228    from the forward models the left hand side takes the form
229    \begin{equation}\label{REF:EQU:INTRO 2c}
230    <n,\nabla \nabla J^{reg} p > = \int_{\Omega}
231    \displaystyle{\frac{\partial Y_k}{\partial m_l}} p_l n_k +
232    \displaystyle{\frac{\partial Y_k}{\partial m_{l,j}}} p_{l,j} n_k +
233    \displaystyle{\frac{\partial X_{ki}}{\partial m_l}} p_l n_{k,i} +
234    \displaystyle{\frac{\partial X_{ki}}{\partial m_{l,j}}} p_{l,j} n_{k,i}
235    \; dx
236    \end{equation}  We follow the concept as outlined in section~\ref{chapter:ref:inversion cost function:gradient}.
237    Notice that equation~\ref{REF:EQU:INTRO 2b} defines a system of linear PDEs
238    which is solved using \escript \class{LinearPDE} class. In the \escript notation we need to provide
239    \begin{equation}\label{ref:EQU:REG:600}
240    \begin{array}{rcl}
241     A_{kilj} & = &  \displaystyle{\frac{\partial X_{ki}}{\partial m_{l,j}}}  \\
242     B_{kil} & = &  \displaystyle{\frac{\partial X_{ki}}{\partial m_l}}   \\
243     C_{klj} & = &  \displaystyle{\frac{\partial Y_k}{\partial m_{l,j}}}    \\
244      D_{kl} & = & \displaystyle{\frac{\partial Y_k}{\partial m_l}}    \\
245    \end{array}
246    \end{equation}
247    The calculation of the gradient of the forward model component is more complicated:
248    the data defect $J^{f}$ for forward model $f$ is expressed use a cost function kernel $K^{f}$
249    \begin{equation}\label{REF:EQU:INTRO 2b}
250    J^{f}(p^f) = \int_{\Omega} K^{f} \; dx
251    \end{equation}
252    In this case the cost function kernel $K^{f}$ is a function of the
253    physical parameter $p^f$, which again is a function of the level-set function,
254    and the state variable $u^f_{k}$ and its gradient $u^f_{k,i}$. For the sake of a simpler
255    presentation the upper index $f$ is dropped.
256    
257    The gradient $\nabla_{p} J$ of the  $J$ with respect to
258    the physical property $p$ is given as
259    \begin{equation}\label{REF:EQU:costfunction 100b}
260    <q, \nabla_{p} J(p)> =    \int_{\Omega}
261    \displaystyle{\frac{\partial K }{\partial u_k }  } \displaystyle{\frac{\partial u_k }{\partial q }  } +
262    \displaystyle{\frac{\partial K }{\partial u_{k,i} }  } \left( \displaystyle{\frac{\partial u_k }{\partial q }  } \right)_{,i}+
263    \displaystyle{\frac{\partial K }{\partial p }  }  q \; dx
264    \end{equation}
265    for any $q$ as an increment to the physical parameter $p$. If the change
266      of the state variable
267    $u_f$ for physical parameter $p$ in the direction of $q$ is denoted as
268    \begin{equation}\label{REF:EQU:costfunction 100c}
269    d_k =\displaystyle{\frac{\partial u_k }{\partial q }  }
270    \end{equation}
271    equation~\ref{REF:EQU:costfunction 100b} can be written as
272    \begin{equation}\label{REF:EQU:costfunction 100d}
273    <q, \nabla_{p} J(p)> =    \int_{\Omega}
274    \displaystyle{\frac{\partial K }{\partial u_k }  } d_k +
275    \displaystyle{\frac{\partial K }{\partial u_{k,i} }  } d_{k,i}+
276    \displaystyle{\frac{\partial K }{\partial p }  }  q \; dx
277    \end{equation}
278    The  state variable are the solution of PDE which in variational from is given
279    \begin{equation}\label{REF:EQU:costfunction 100}
280    \int_{\Omega} F_k \cdot r_k +  G_{li} \cdot r_{k,i} \; dx = 0
281    \end{equation}
282    for all increments $r$ to the stat $u$. The functions $F$ and $G$ are given and describe the physical
283    model. They depend of the state variable $u_{k}$ and its gradient $u_{k,i}$ and the physical parameter $p$. The change
284    $d_k$  of the state
285    $u_f$ for physical parameter $p$ in the direction of $q$ is given from the equation
286    \begin{equation}\label{REF:EQU:costfunction 100b}
287     \int_{\Omega}
288    \displaystyle{\frac{\partial F_k }{\partial u_l }  } d_l r_k +
289    \displaystyle{\frac{\partial F_k }{\partial u_{l,j}} } d_{l,j} r_k +
290    \displaystyle{\frac{\partial F_k }{\partial p} }q r_k +
291    \displaystyle{\frac{\partial G_{ki}}{\partial u_l} } d_l r_{k,i} +
292    \displaystyle{\frac{\partial G_{ki}}{\partial u_{l,j}} } d_{l,j} r_{k,i}+
293    \displaystyle{\frac{\partial G_{ki}}{\partial p} } q r_{k,i}  
294    \; dx = 0  
295    \end{equation}
296    to be fulfilled for all functions $r$. Now let $d^*_k$ be the solution of the
297    variational equation
298    \begin{equation}\label{REF:EQU:costfunction 100d}
299     \int_{\Omega}
300    \displaystyle{\frac{\partial F_k }{\partial u_l }  } h_l d^*_k +
301    \displaystyle{\frac{\partial F_k }{\partial u_{l,j}} } h_{l,j} d^*_k +
302    \displaystyle{\frac{\partial G_{ki}}{\partial u_l} } h_l d^*_{k,i} +
303    \displaystyle{\frac{\partial G_{ki}}{\partial u_{l,j}} } h_{l,j} d^*_{k,i}
304    \; dx
305    = \int_{\Omega}
306    \displaystyle{\frac{\partial K }{\partial u_k }  } h_k +
307    \displaystyle{\frac{\partial K }{\partial u_{k,i} }  } h_{k,i}  \; dx
308    \end{equation}
309    for all increments $h_k$ to the physical property $p$. This problem
310    is solved using \escript \class{LinearPDE} class. In the \escript notation we need to provide
311    \begin{equation}\label{ref:EQU:REG:600}
312    \begin{array}{rcl}
313     A_{kilj} & = &  \displaystyle{\frac{\partial G_{lj}}{\partial u_{k,i}} }  \\
314     B_{kil} & = &  \displaystyle{\frac{\partial F_l }{\partial u_{k,i}} }   \\
315     C_{klj} & = & \displaystyle{\frac{\partial G_{lj}}{\partial u_k} }    \\
316      D_{kl} & = & \displaystyle{\frac{\partial F_l }{\partial u_k }  }   \\
317      Y_{k} & = & \displaystyle{\frac{\partial K }{\partial u_k }  }     \\
318      X_{ki} & = & \displaystyle{\frac{\partial K }{\partial u_{k,i} }  }    \\
319    \end{array}
320    \end{equation}
321    Notice that these coefficient are transposed to the coefficients used to solve for the
322    state variables in equation~\ref{REF:EQU:costfunction 100}.
323    
324    Setting $h_l=d_l$ in equation~\ref{REF:EQU:costfunction 100d} and
325    $r_k=d^*_k$ in equation~\ref{REF:EQU:costfunction 100b} one gets
326    \begin{equation}\label{ref:EQU:REG:601}
327    \int_{\Omega}
328    \displaystyle{\frac{\partial K }{\partial u_k }  } d_k +
329    \displaystyle{\frac{\partial K }{\partial u_{k,i} }  } d_{k,i}+
330    \displaystyle{\frac{\partial F_k }{\partial p} } q d^*_k +
331    \displaystyle{\frac{\partial G_{ki}}{\partial p} } q d^*_{k,i}  
332    \; dx = 0  
333    \end{equation}
334    which is inserted into equation~\ref{REF:EQU:costfunction 100d} to get
335    \begin{equation}\label{REF:EQU:costfunction 602}
336    <q, \nabla_{p} J(p)> =    \int_{\Omega} \left(
337    \displaystyle{\frac{\partial K }{\partial p }  } - \displaystyle{\frac{\partial F_k }{\partial p} } d^*_k
338    - \displaystyle{\frac{\partial G_{ki}}{\partial p} }  d^*_{k,i} \right) q \; dx
339    \end{equation}
340    We need in fact the gradient of $J^f$ with respect to the level set function which is given as
341    \begin{equation}\label{REF:EQU:costfunction 603}
342    <n, \nabla J^f> =    \int_{\Omega} \left(
343    \displaystyle{\frac{\partial K^f}{\partial p^f}  } - \displaystyle{\frac{\partial F^f_k }{\partial p^f} } d^{f*}_k
344    - \displaystyle{\frac{\partial G^f_{ki}}{\partial p^f} }  d^{f*}_{k,i} \right)
345    \cdot \displaystyle{\frac{\partial p^f }{\partial m_l} } n_l \; dx
346    \end{equation}
347    for any increment $n$ to the level set function. So in summary we get  
348    \begin{equation}\label{ref:EQU:CS:101}
349    \begin{array}{rcl}
350      Y_k & = & \displaystyle{\frac{\partial K^{reg}}{\partial m_k}} +
351     \sum_{f} \mu^{data}_{f} \left(
352    \displaystyle{\frac{\partial K^f}{\partial p^f}  } - \displaystyle{\frac{\partial F^f_l }{\partial p^f} } d^{f*}_l
353    - \displaystyle{\frac{\partial G^f_{li}}{\partial p^f} }  d^{f*}_{l,i} \right)
354    \cdot \displaystyle{\frac{\partial p^f }{\partial m_k} }
355    
356    \\
357       X_{ki} & = & \displaystyle{\frac{\partial K^{reg}}{\partial m_{k,i}}}
358    \end{array}
359    \end{equation}
360    to represent $\nabla J$ as the tuple $(Y,X)$. Contributions of the forward model to the
361    Hessian operator are ignored.
362    

Legend:
Removed from v.4141  
changed lines
  Added in v.4142

  ViewVC Help
Powered by ViewVC 1.1.26