/[escript]/release/3.4.2/doc/inversion/Regularization.tex
ViewVC logotype

Diff of /release/3.4.2/doc/inversion/Regularization.tex

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 4095 by caltinay, Wed Dec 5 05:32:22 2012 UTC revision 4122 by gross, Thu Dec 20 05:42:35 2012 UTC
# Line 1  Line 1 
1  \chapter{Regularization}\label{Chp:ref:regularization}  \chapter{Regularization}\label{Chp:ref:regularization}
2    
3  The general cost function $J_{total}$ to be minimized has some of the cost  The general cost function $J^{total}$ to be minimized has some of the cost
4  function $J_{forward}$ measuring the defect of the result from the  function $J^f$ measuring the defect of the result from the
5  forward model with the data, and the cost function $J_{reg}$ introducing the  forward model with the data, and the cost function $J^{reg}$ introducing the
6  regularization into the problem and makes sure that a unique answer exists.  regularization into the problem and makes sure that a unique answer exists.
7  The regularization term is a function of, possibly vector-valued, level set  The regularization term is a function of, possibly vector-valued, level set
8  function $m$ which represents the physical properties to be represented and is,  function $m$ which represents the physical properties to be represented and is,
9  from a mathematical point of view, the unknown of the inversion problem.  from a mathematical point of view, the unknown of the inversion problem.
10  It is the intention that the values of $m$ are between zero and one and that  It is the intention that the values of $m$ are between zero and one and that
11  actual physical values are created from a mapping before being fed into a  actual physical values are created from a mapping before being fed into a
12  forward model. In general the cost function $J_{reg}$ is defined as  forward model. In general the cost function $J^{reg}$ is defined as
13  \begin{equation}\label{EQU:REG:1}  \begin{equation}\label{EQU:REG:1}
14  J_{reg}(m) = \frac{1}{2} \int_{\Omega}  J^{reg}(m) = \frac{1}{2} \int_{\Omega} \left(
15   \sum_{k} \mu^{(0)}_k \cdot s^{(0)}_k \cdot m_k^2 +  \mu^{(1)}_{ki} \cdot s^{(1)}_{ki} \cdot L_i^2  \cdot m_{k,i} \cdot m_{k,i}   \sum_{k} \mu_k \cdot ( \omega^{(0)}_k \cdot m_k^2 + \omega^{(1)}_{ki}m_{k,i}^2 )
16  +  \sum_{l<k} \mu^{(c)}_{lk} \cdot s^{(c)}_{lk} \cdot L^4  \cdot  \sigma(m_l,m_k) dx  +  \sum_{l<k} \mu^{(c)}_{lk} \cdot \omega^{(c)}_{lk}  \cdot  \chi(m_l,m_k) \right) \; dx
17  \end{equation}  \end{equation}
18  where $s^{(0)}_k$, $s^{(1)}_{ki}$ and $s^{(c)}_{lk}$ are scaling factors with  where summation over $i$ is performed. The additional trade--off factors
19  values between zero and one (limits including).  $\mu_k$ and $\mu^{(c)}_{lk}$ ($l<k$) are between zero and one and constant across the
20  They may vary with their location within the domain $\Omega$.  domain. They are potentially modified during the inversion in order to improve the balance between the different terms
21  $L_i$ is the width of the domain in $x_i$ direction and $L^2=L_i \cdot L_i$.  in the cost function.
22  $\sigma$ is a given symmetric, non-negative cross correlation function.  
23  We use  $\chi$ is a given symmetric, non-negative cross-gradient function\index{cross-gradient
24    }. We use
25  \begin{equation}\label{EQU:REG:4}  \begin{equation}\label{EQU:REG:4}
26   \sigma(a,b) =  ( a_{,i} \cdot a_{,i}) \cdot ( b_{,i} \cdot b_{,i}) -   ( a_{,i} \cdot b_{,i})^2   \chi(a,b) =  ( a_{,i} a_{,i}) \cdot ( b_{,j} b_{,j}) -   ( a_{,i} b_{,i})^2
27    \end{equation}
28    where summations over $i$ and $j$  are performed. Notice that cross-gradient function
29    is measuring the angle between the surface normals of contours of level set functions. So
30    minimizing the cost function will align the surface normals of the contours.
31    
32    The coefficients $\omega^{(0)}_k$, $\omega^{(1)}_{ki}$ and $\omega^{(c)}_{lk}$ define weighting factors which
33    may depend on their location within the domain. We assume that for given level set function $k$ the
34    weighting factors $\omega^{(0)}_k$, $\omega^{(1)}_{ki}$ are scaled such that
35    \begin{equation}\label{ref:EQU:REG:5}
36    \int_{\Omega} ( \omega^{(0)}_k  + \frac{\omega^{(1)}_{ki}}{L_i^2} ) \; dx = \alpha_k
37    \end{equation}
38    where $\alpha_k$ defines the scale which is typically set to one. $L_i$ is the width of the domain in $x_i$ direction.
39    Similarly we set for $l<k$ we set
40    \begin{equation}\label{ref:EQU:REG:6}
41    \int_{\Omega} \frac{\omega^{(c)}_{lk}}{L^4} \; dx = \alpha^{(c)}_{lk}
42    \end{equation}
43    where $\alpha^{(c)}_{lk}$ defines the scale which is typically set to one and
44    \begin{equation}\label{ref:EQU:REG:6b}
45    \frac{1}{L^2} = \sum_i \frac{1}{L_i^2} \;.
46    \end{equation}
47    
48    In some cases values for the level set functions are known to be zero at certain regions in the domain. Typically this is the region
49    above the surface of the Earths. This expressed using a
50    a characteristic function $q$ which varies with its location within the domain. The function $q$ is set to zero except for those
51    locations $x$ within the domain where the values of the level set functions is known to be zero. For these locations $x$
52    $q$ takes a positive value. for a single level set function one has
53    \begin{equation}\label{ref:EQU:REG:7}
54    q(x) = \left\{
55    \begin{array}{rl}
56      1 & \mbox{ if } m \mbox{ is set to zero at location } x \\
57      0 & \mbox{ otherwise }
58    \end{array}
59    \right.
60    \end{equation}
61    For multi-valued level set function the  characteristic function is set componentwise:
62    \begin{equation}\label{ref:EQU:REG:7b}
63    q_k(x) = \left\{
64    \begin{array}{rl}
65      1 & \mbox{ if component } m_k \mbox{ is set to zero at location } x \\
66      0 & \mbox{ otherwise }
67    \end{array}
68    \right.
69  \end{equation}  \end{equation}
 Notice that the cross correlation function is measuring the angle between the  
 surface normals of contours of level set functions.  
 So minimizing the cost function will align the surface normals of the contours.  
 The additional weight factors $\mu^{(0)}_k$, $ \mu^{(1)}_{ki}$ and  
 $\mu^{(c)}_{lk}$ are between zero and one and constant across the domain.  
 They are potentially modified during the inversion in order to improve the  
 balance between the different terms in the cost function.  
 Notice that values for $\mu^{(0)}_k$, $ \mu^{(1)}_{ki}$ and $\mu^{(c)}_{lk}$  
 are relevant for which the values of the corresponding entries in scaling  
 factors $s^{(0)}_k$, $s^{(1)}_{ki}$ and $s^{(c)}_{lk}$ are non-zero.  
 Also notice that the factors $L^4$ and $L_i^2$ take care of any length scale changes.  
 With the notation  
 \begin{equation}\label{EQU:REG:2}  
 \begin{array}{rcl}  
 \widehat{s}^{(0)}_k & = & \mu^{(0)}_k \cdot w^{(0)}_k \\  
 \widehat{s}^{(1)}_{ki} & = & \mu^{(1)}_{ki} \cdot w^{(1)}_{ki} \cdot L_i^2  \\  
 \widehat{s}^{(c)}_{lk} & = & \mu^{c}_{lk} \cdot w^{(c)}_{lk} \cdot L^4  \\  
 \end{array}  
 \end{equation}  
 with $k<l$ we can write  
 \begin{equation}\label{EQU:REG:1b}  
 J_{reg}(m) = \frac{1}{2} \int_{\Omega} \left(  
  \sum_{k} \widehat{s}^{(0)}_k \cdot m_k^2 + \widehat{s}^{(1)}_{ki}  \cdot m_{k,i} \cdot m_{k,i}  
 +  \sum_{l<k} \widehat{s}^{(c)}_{lk} \cdot  \sigma(m_l,m_k) \right) dx  
 \end{equation}  
 We need to provide the derivative $\frac{ \partial J_{reg}}{\partial q}$  of  
 the cost function $J_{reg}$ with respect to a given direction $q$ which equals  
 zero at locations where $m$ is assumed to be zero.  
 The derivative is given as  
 \begin{equation}\label{EQU:REG:3}  
 \frac{ \partial J_{reg}}{\partial q}(m) =  
  \int_{\Omega} Y_k \cdot q_k + X_{k,i} q_{k,i} dx  
 \end{equation}  
 where  
70    
71    
72    \section{Usage}
73    
74  For a single-valued level set function this takes the form  \LG{Add example}
 \begin{equation}\label{EQU:REG:3}  
 \frac{ \partial J_{reg}}{\partial q}(m) =  
 \mu^{reg} \int_{\Omega} \omega \cdot m \cdot q  + \omega_i \cdot L_i^2 \cdot m_{,i} \cdot q_{,i} dx  
 \end{equation}  
 So we can represent the gradient $\nabla J_{reg}$ of the cost function  
 $J_{reg}$ by the pair of values $(Y,X)$ where we set    
 \begin{equation}\label{EQU:REG:3b}  
 Y=\mu^{reg} \cdot \omega \cdot m \mbox{ and } X_i = \mu^{reg} \cdot \omega_i \cdot L_i^2 \cdot m_{,i}  
 \end{equation}  
 and  
 \begin{equation}\label{EQU:REG:3c}  
 \frac{ \partial J_{reg}}{\partial q}(m) = [ \nabla J_{reg}(m), q ] =  
 \int_{\Omega} Y  \cdot q  + X_i  \cdot q_{,i} dx  
 \end{equation}  
 where $[.,.]$ is called the dual product.  
75    
76    \begin{classdesc}{Regularization}{domain
77            \optional{, w0=\None}
78            \optional{, w1=\None}
79            \optional{, wc=\None}
80            \optional{, location_of_set_m=Data()}
81            \optional{, numLevelSets=1}
82            \optional{, useDiagonalHessianApproximation=\False}
83            \optional{, tol=1e-8}
84            \optional{, scale=\None}
85            \optional{, scale_c=\None}
86            }
87    
88      
89    initializes a regularization component of the cost function for inversion.
90    \member{domain} defines the domain of the inversion. \member{numLevelSets}
91    sets the number of level set functions to be found during the inversion.
92    \member{w0}, \member{w1} and  \member{wc} define the weighting factors
93    $\omega^{(0)}$,
94    $\omega^{(1)}$ and
95    $\omega^{(c)}$, respectively. A value for \member{w0} or \member{w1} or both must be given.
96    If more then one level set function is involved  \member{wc} must be given.
97    \member{location_of_set_m} sets the characteristic function $q$
98    to define locations where the level set function is set to zero, see equation~(\ref{ref:EQU:REG:7}).
99    \member{scale} and
100    \member{scale_c} set the scales $\alpha_k$ in equation~(\ref{ref:EQU:REG:5}) and
101    $\alpha^{(c)}_{lk}$ in equation~(\ref{ref:EQU:REG:6}), respectively. By default, their values are set to one.
102    Notice that weighting factors are rescaled to meet the scaling conditions. \member{tol} sets the
103    tolerance for the calculation of the Hessian approximation. \member{useDiagonalHessianApproximation}
104    indicates to ignore coupling in the Hessian approximation produced by the
105    cross-gradient term. This can speed-up an individual iteration step in the inversion but typically leads to more
106    inversion steps.
107    \end{classdesc}
108    
109    \section{Gradient Calculation}
110    
111  ==============================================================  
112    The cost function kernel\index{cost function!kernel} is given as
113  where $<.,.>$ is an inner product. If the level set function  $m$ has several components $m_j$ the inner product $<.>$ is given  \begin{equation}\label{ref:EQU:REG:100}
114  in the form  K^{reg}(m) = \frac{1}{2}
115  $\omega^{(k)}$ and $\omega^{(k)}_i$ are fixed non-negative weighting factors and  $\mu^{reg}_k$ are weighting factors  \sum_{k} \mu_k \cdot ( \omega^{(0)}_k \cdot m_k^2 + \omega^{(1)}_{ki}m_{k,i}^2 )
116  which may be modified during the inversion.  $L_i$ is the length of the domain in $x_i$ direction. In the special case that  +  \sum_{l<k} \mu^{(c)}_{lk} \cdot \omega^{(c)}_{lk}  \cdot  \chi(m_l,m_k)
117  the level set function  $m$ has a single component the inner product takes the form  \end{equation}
118  \begin{equation}\label{EQU:REG:2b}  We need to provide the gradient of the cost function $J^{reg}$  with respect to the level set functions $m$.
119  <p,q> =  The gradient is represented by two functions $Y$ and $X$ which define the
120  \mu^{reg} \int_{\Omega} \omega \cdot p \cdot q  + \omega_i \cdot L_i^2 \cdot p_{,i} \cdot q_{,i} dx  derivative of the cost function kernel with respect to $m$ and to the gradient $m_{,i}$, respectively:
121  \end{equation}  \begin{equation}\label{ref:EQU:REG:101}
122  In practice it is assumed that the level set function is known to be zero in certain regions in the domain. Typically these regions  \begin{array}{rcl}
123  corresponds to region above the surface or regions explored by drilling.    Y_k & = & \displaystyle{\frac{\partial K^{reg}}{\partial m_k}} \\
124       X_{ki} & = & \displaystyle{\frac{\partial K^{reg}}{\partial m_{k,i}}}
125  We need to provide the derivative of the cost function $J_{reg}$ with respect to a given direction $q$ which equals zero at locations  \end{array}
 where $m$ is assumed to be zero. For a single-valued  
 level set function th is takes the form  
 \begin{equation}\label{EQU:REG:3}  
 \frac{ \partial J_{reg}}{\partial q}(m) =  
 \mu^{reg} \int_{\Omega} \omega \cdot m \cdot q  + \omega_i \cdot L_i^2 \cdot m_{,i} \cdot q_{,i} dx  
 \end{equation}  
 So we can represent the gradient $\nabla J_{reg}$ of the cost function $J_{reg}$ by the pair of values $(Y,X)$ where we set    
 \begin{equation}\label{EQU:REG:3b}  
 Y=\mu^{reg} \cdot \omega \cdot m \mbox{ and } X_i = \mu^{reg} \cdot \omega_i \cdot L_i^2 \cdot m_{,i}  
 \end{equation}  
 and  
 \begin{equation}\label{EQU:REG:3c}  
 \frac{ \partial J_{reg}}{\partial q}(m) = [ \nabla J_{reg}(m), q ] =  
 \int_{\Omega} Y  \cdot q  + X_i  \cdot q_{,i} dx  
 \end{equation}  
 where $[.,.]$ is called the dual product.  
   
 For a multi-valued level set function an additional correlation term is introduced into the cost function $J_{total}$:  
 \begin{equation}\label{EQU:REG:1c}  
 J_{reg}(m)  =  \frac{1}{2} < m,   m > + \frac{1}{2}  \sum_{k,l} \mu_{kl}^{sec} \cdot \int_{\Omega} \sigma(m_k,m_l) dx  
 \end{equation}  
 where $sigma$ is a given symmetric, non-negative correlation function, and $\mu_{kl}^{sec}$ are symmetric, weighting factors  
 ($\mu_{kl}^{sec} = \mu_{lk}^{sec}$, $\mu_{kk}^{sec}=0$) which may  
 be altered during the inversion. We use the correlation function  
 \begin{equation}\label{EQU:REG:4}  
  \sigma(a,b) = \frac{L^2}{2} \cdot ( ( a_{,i} \cdot a_{,i}) \cdot ( b_{,i} \cdot b_{,i}) -   ( a_{,i} \cdot b_{,i})^2 )  
126  \end{equation}  \end{equation}
127  with $L=L_i \cdot L_i$. Minimizing $J_{reg}(m)$ is minimizing the angle between the surface normals of the contours formed by  For the case of a single valued level set function $m$ we get
128  two level set function. the derivative of the cost function $J_{reg}$ with respect to a given direction $q$  which equals zero at locations  \begin{equation}\label{ref:EQU:REG:202}
129  where $m$ is assumed to be zero:  Y = \mu \cdot \omega^{(0)} \cdot m
 \begin{equation}\label{EQU:REG:5}  
 \begin{array}{ll}  
 \displaystyle{\frac{ \partial J_{reg}}{\partial q}(m)} =  
 \displaystyle{\sum_{k} \mu^{reg}_k \int_{\Omega} \omega^{(k)} \cdot m_k \cdot q_k  + \omega^{(k)}_i \cdot L_i^2 \cdot m_{k,i} \cdot q_{k,i} dx } \\  
 + \displaystyle{\sum_{k,l} \mu_{kl}^{sec} \cdot {L^2}  \int_{\Omega}  ( m_{k,i} \cdot q_{k,i}) \cdot ( m_{l,j} \cdot m_{l,j}) -   ( m_{k,j} \cdot m_{l,j}) \cdot  ( q_{l,i} \cdot m_{k,i}) } dx  
 \end{array}  
 \end{equation}  
 Similar to the single-case we can represent    
 the gradient $\nabla J_{reg}$ of the cost function $J_{reg}$ by the pair of values $(Y,X)$ where we set    
 \begin{equation}\label{EQU:REG:6}  
 Y_k= \mu^{reg}_k  \cdot \omega^{(k)} \cdot m_k  
130  \end{equation}  \end{equation}
131  and  and
132  \begin{equation}\label{EQU:REG:6b}  \begin{equation}\label{ref:EQU:REG:203}
133  X_{ki} = \mu^{reg}_k \cdot  \omega^{(k)} \cdot L_i^2 \cdot m_{k,i} +   X_{i} = \mu \cdot \omega^{(1)}_{i} \cdot m_{,i}
134  \sum_{l} \mu_{kl}^{sec} \cdot {L^2} \cdot ( ( m_{l,j} \cdot m_{l,j}) \cdot m_{k,i}  -   ( m_{l,j} \cdot m_{k,j}) \cdot m_{l,i} )  \end{equation}
135    For a two-valued level set function $(m_0,m_1)$ we have
136    \begin{equation}\label{ref:EQU:REG:302}
137    Y_k = \mu_k \cdot \omega^{(0)}_k \cdot m_k \mbox{ for } k=0,1
138  \end{equation}  \end{equation}
139  and  and for $X$
140  \begin{equation}\label{EQU:REG:7}  \begin{equation}\label{ref:EQU:REG:303}
141  \frac{ \partial J_{reg}}{\partial q}(m) = [ \nabla J_{reg}(m), q ] =  \begin{array}{rcl}
142  \int_{\Omega} Y_j  \cdot q_j  + X_{ki}  \cdot q_{k,i} dx   X_{0i} &  = & \mu_0 \cdot \omega^{(1)}_{0i} \cdot m_{0,i} + \mu^{(c)}_{01} \cdot \omega^{(c)}_{01} \cdot
143  \end{equation}  \left( (m_{1,j}m_{1,j} ) \cdot m_{0,i} - (m_{1,j}m_{0,j} ) \cdot m_{1,i} \right) \\
144  where $[.,.]$ is the dual product.   X_{1i} &  = & \mu_1 \cdot \omega^{(1)}_{1i} \cdot m_{1,i} + \mu^{(c)}_{01} \cdot \omega^{(c)}_{01} \cdot
145    \left( (m_{0,j}m_{0,j} ) \cdot m_{1,i} - (m_{1,j}m_{0,j} ) \cdot m_{0,i} \right)
146  We also need to provide an approximation of the inverse of the Hessian operator which provides a  \\
147  level set function $h$ for a given value $r$ represented by the pair of values $(Y,X)$. If one ignores the correlation function  \end{array}
148  the inner product defines the Hessian operator of the cost function. In this approach we set  \end{equation}  
149  \begin{equation}\label{EQU:REG:8}  We also need to provide an approximation of the inverse of the Hessian operator. The operator evaluation is executes as a solution
150   < p,   h > = [p, r]  of a linear PDE which is solved using \escript \class{LinearPDE} class. In the \escript notation we need to provide
151    \begin{equation}\label{ref:EQU:REG:600}
152    \begin{array}{rcl}
153     A_{kilj} & = & \displaystyle{\frac{\partial X_{ki}}{\partial m_{l,j}}} \\
154    D_{kl} & =  &  \displaystyle{\frac{\partial Y_{k}}{\partial m_{l}}}
155    \end{array}
156  \end{equation}  \end{equation}
157  for all $p$. This problem can be solved using \escript \class{LinearPDE} class by setting  For the case of a single valued level set function $m$ we get
158  \begin{equation}\label{EQU:REG:8b}  \begin{equation}\label{ref:EQU:REG:601}
159  \begin{array}{rcl}  \begin{array}{rcl}
160   A_{ij} & =&  (\omega_i \cdot L_i^2) \cdot \delta_{ij}  \\   A_{ij} & =&  \mu \cdot \omega^{(1)}_i \cdot \delta_{ij}  \\
161  D & = & \mu^{reg} \cdot \omega  D & = &  \mu \cdot \omega^{(0)}
162  \end{array}  \end{array}
163    \end{equation}
164    For a two-valued level set function $(m_0,m_1)$ we have
165    \begin{equation}\label{ref:EQU:REG:602}
166    D_{kl}  =   \mu_k \cdot \omega^{(0)}_k \cdot \delta_{kl}
167  \end{equation}  \end{equation}
168  and $X$ and $Y$ as defined by $r$ for the case of a single-valued level set function.  and
169  For a vector-valued level-set function one sets:  \begin{equation}\label{ref:EQU:REG:603}
 \begin{equation}\label{EQU:REG:8c}  
170  \begin{array}{rcl}  \begin{array}{rcl}
171   A_{kilj} & = & (\mu^{reg}_l \omega^{(l)}_i L_i^2) \cdot  \delta_{kl} \cdot  \delta_{ij}  \\  A_{0i0j} & = & \mu_0 \cdot \omega^{(1)}_{0i} \cdot \delta_{ij} + \mu^{(c)}_{01} \cdot \omega^{(c)}_{01} \cdot
172  D_{kl} & =  & \mu^{reg}_l  \cdot \omega^{(l)} \delta_{kl}  \omega  \left( (m_{1,j'}m_{1,j'} )\cdot \delta_{ij}  -  m_{1,i} \cdot m_{1,j} \right)    \\
173    A_{0i1j} & = & \mu^{(c)}_{01} \cdot \omega^{(c)}_{01} \cdot \left( 2 \cdot m_{0,i} \cdot  m_{1,j}
174    - m_{1,i} \cdot  m_{0,j} - ( m_{1,j'} m_{0,j'} ) \cdot  \delta_{ij}
175    \right)  \\
176    A_{1i0j} & = & \mu^{(c)}_{01} \cdot \omega^{(c)}_{01} \cdot \left( 2 \cdot m_{1,i} \cdot  m_{0,j}
177    - m_{0,i} \cdot  m_{1,j} - ( m_{1,j'} m_{0,j'} ) \cdot  \delta_{ij} \right)  \\
178    A_{1i1j} & = &  \mu_1 \cdot \omega^{(1)}_{1i} \cdot \delta_{ij} + \mu^{(c)}_{01} \cdot \omega^{(c)}_{01} \cdot
179    \left( (m_{0,j'}m_{0,j'} ) \cdot \delta_{ij}  -  m_{0,i} \cdot m_{0,j}  ) \right)
180  \end{array}  \end{array}
181  \end{equation}  \end{equation}
 ====================================================  
 \begin{classdesc}{Regularization}{domain  
         \optional{, s0=\None}  
         \optional{, s1=\None}  
         \optional{, sc=\None}  
         \optional{, location_of_set_m=Data()}  
         \optional{, numLevelSets=1}  
         \optional{, useDiagonalHessianApproximation=\True}  
         \optional{, tol=1e-8}}  
 opens a linear, steady, second order PDE on the \member{domain}.  
 The parameters \member{numEquations} and \member{numSolutions} give the number  
 of equations and the number of solution components.  
 If \member{numEquations} and \member{numSolutions} are non-positive, then the  
 number of equations and the number of solutions, respectively, stay undefined  
 until a coefficient is defined.  
 \end{classdesc}  
   
   
182    
 \section{The general regularization class}  
 \begin{classdesc}{RegularizationBase}{}  
183    
 \end{classdesc}  

Legend:
Removed from v.4095  
changed lines
  Added in v.4122

  ViewVC Help
Powered by ViewVC 1.1.26