347 lines
11 KiB
TeX
347 lines
11 KiB
TeX
\lecture{20}{2023-06-27}{}
|
|
\begin{refproof}{ceismartingale}
|
|
By the tower property (\autoref{cetower})
|
|
it is clear that $(\bE[X | \cF_n])_n$
|
|
is a martingale.
|
|
|
|
First step:
|
|
Assume that $X$ is bounded.
|
|
Then, by \autoref{cjensen}, $|X_n| \le \bE[|X| | \cF_n]$,
|
|
hence $\sup_{\substack{n \in \N \\ \omega \in \Omega}} | X_n(\omega)| < \infty$.
|
|
Thus $(X_n)_n$ is a martingale in $L^{\infty} \subseteq L^2$.
|
|
By the convergence theorem for martingales in $L^2$
|
|
(\autoref{martingaleconvergencel2})
|
|
there exists a random variable $Y$,
|
|
such that $X_n \xrightarrow{L^2} Y$.
|
|
|
|
Fix $m \in \N$ and $A \in \cF_m$.
|
|
Then
|
|
\begin{IEEEeqnarray*}{rCl}
|
|
\int_A Y \dif \bP
|
|
&=& \lim_{n \to \infty} \int_A X_n \dif \bP\\
|
|
&=& \lim_{n \to \infty} \bE[X_n \One_A]\\
|
|
&=& \lim_{n \to \infty} \bE[\bE[X | \cF_n] \One_A]\\
|
|
&\overset{A \in \cF_n}{=}& \lim_{\substack{n \to \infty\\n \ge m}} \bE[X \One_A]\\
|
|
\end{IEEEeqnarray*}
|
|
Hence $\int_A Y \dif \bP = \int_A X \dif \bP$ for all $m \in \N, A \in \cF_m$.
|
|
Since $\sigma(X) = \bigcup \cF_n$
|
|
this holds for all $A \in \sigma(X)$.
|
|
Hence $X = Y$ a.s., so $X_n \xrightarrow{L^2} X$.
|
|
Since $(X_n)_n$ is uniformly bounded, this also means
|
|
$X_n \xrightarrow{L^p} X$.
|
|
|
|
|
|
Second step:
|
|
Now let $X \in L^p$ be general and define
|
|
\[
|
|
X'(\omega) \coloneqq \begin{cases}
|
|
X(\omega)& \text{ if } |X(\omega)| \le M,\\
|
|
0&\text{ otherwise}
|
|
\end{cases}
|
|
\]
|
|
for some $M > 0$.
|
|
Then $X' \in L^\infty$ and
|
|
\begin{IEEEeqnarray*}{rCl}
|
|
\int | X - X'|^p \dif \bP &=& \int_{\{|X| > M\} } |X|^p \dif \bP \xrightarrow{M \to \infty} 0
|
|
\end{IEEEeqnarray*}
|
|
as $\bP$ is regular,
|
|
i.e.~$\forall \epsilon > 0 . ~\exists k . ~
|
|
\bP[|X|^p \in [-k,k]] \ge 1-\epsilon$.
|
|
|
|
Take some $\epsilon > 0$ and $M$ large enough such that
|
|
\[
|
|
\int |X - X'| \dif \bP < \epsilon.
|
|
\]
|
|
|
|
Let $(X_n')_n$ be the martingale given by $(\bE[X' | \cF_n])_n$.
|
|
Then $X_n' \xrightarrow{L^p} X'$ by the first step.
|
|
|
|
It is
|
|
\begin{IEEEeqnarray*}{rCl}
|
|
\|X_n - X_n'\|_{L^p}^p
|
|
&=& \bE[\bE[X - X' | \cF_n]^{p}]\\
|
|
&\overset{\text{Jensen}}{\le}& \bE[\bE[(X - X')^p | \cF_n]]\\
|
|
&=& \|X - X'\|_{L^p}^p\\
|
|
&<& \epsilon.
|
|
\end{IEEEeqnarray*}
|
|
|
|
Hence
|
|
\[
|
|
\|X_n - X\|_{L^p} %
|
|
\le \|X_n - X_n'\|_{L^p} + \|X_n' - X'\|_{L^p} + \|X - X'\|_{L^p} %
|
|
\le 3 \epsilon.
|
|
\]
|
|
Thus $X_n \xrightarrow{L^p} X$.
|
|
\end{refproof}
|
|
|
|
For the proof of \autoref{martingaleisce},
|
|
we need the following theorem, which we won't prove here:
|
|
\begin{theorem}[Banach Alaoglu]
|
|
\label{banachalaoglu}
|
|
Let $X$ be a normed vector space and $X^\ast$ its
|
|
continuous dual.
|
|
Then the closed unit ball in $X^\ast$ is compact
|
|
w.r.t.~the ${\text{weak}}^\ast$ topology.
|
|
\end{theorem}
|
|
\begin{fact}
|
|
We have $L^p \cong (L^q)^\ast$ for $\frac{1}{p} + \frac{1}{q} = 1$
|
|
via
|
|
\begin{IEEEeqnarray*}{rCl}
|
|
L^p &\longrightarrow & (L^q)^\ast \\
|
|
f &\longmapsto & (g \mapsto \int g f \dif\bP)
|
|
\end{IEEEeqnarray*}
|
|
|
|
We also have $(L^1)^\ast \cong L^\infty$,
|
|
however $ (L^\infty)^\ast \not\cong L^1$.
|
|
\end{fact}
|
|
|
|
\begin{refproof}{martingaleisce}
|
|
Since $(X_n)_n$ is bounded in $L^p$, by \autoref{banachalaoglu},
|
|
there exists $X \in L^p$ and a subsequence
|
|
$(X_{n_k})_k$ such that for all $Y \in L^q$ ($\frac{1}{p} + \frac{1}{q} = 1$ )
|
|
\[
|
|
\int X_{n_k} Y \dif \bP \to \int XY \dif \bP
|
|
\]
|
|
(Note that this argument does not work for $p = 1$,
|
|
because $(L^\infty)^\ast \not\cong L^1$).
|
|
|
|
Let $A \in \cF_m$ for some fixed $m$
|
|
and choose $Y = \One_A$.
|
|
Then
|
|
\begin{IEEEeqnarray*}{rCl}
|
|
\int_A X \dif \bP
|
|
&=& \lim_{k \to \infty} \int_A X_{n_k} \dif \bP\\
|
|
&=& \lim_{k \to \infty} \bE[X_{n_k} \One_A]\\
|
|
&\overset{\text{for }n_k \ge m}{=}& \bE[X_m \One_A].
|
|
\end{IEEEeqnarray*}
|
|
Hence $X_n = \bE[X | \cF_m]$ by the uniqueness of conditional expectation
|
|
and by \autoref{ceismartingale},
|
|
we get the convergence.
|
|
\end{refproof}
|
|
|
|
\begin{example}+[\vocab{Branching Process}; Exercise 10.1, 12.4]
|
|
Let $(Y_{n,k})_{n \in \N_0, k \in \N}$ be i.i.d.~with values in $\N_0$
|
|
such that $0 < \bE[Y_{n,k}] = m < \infty$.
|
|
Define
|
|
\[
|
|
S_0 \coloneqq 1, S_n \coloneqq \sum_{k=1}^{S_{n-1}} Y_{n-1,k}
|
|
\]
|
|
and let $M_n \coloneqq \frac{S_n}{m^n}$.
|
|
$S_n$ models the size of a population.
|
|
|
|
\begin{claim}
|
|
$M_n$ is a martingale.
|
|
\end{claim}
|
|
\begin{subproof}
|
|
We have
|
|
\begin{IEEEeqnarray*}{rCl}
|
|
\bE[M_{n+1} - M_n | \cF_n]
|
|
&=& \frac{1}{m^n} \left( \frac{1}{m}\sum_{k=1}^{S_{n}} \bE[X_{n,k}] - S_n\right)\\
|
|
&=& \frac{1}{m^n}(S_n - S_n).
|
|
\end{IEEEeqnarray*}
|
|
\end{subproof}
|
|
|
|
\begin{claim}
|
|
$(M_n)_{n \in \N}$ is bounded in $L^2$ iff $m > 1$.
|
|
\end{claim}
|
|
\todo{TODO}
|
|
\begin{claim}
|
|
If $m > 1$ and $M_n \to M_\infty$,
|
|
then
|
|
\[
|
|
\Var(M_\infty) = \sigma^2(m(m-1))^{-1}.
|
|
\]
|
|
\end{claim}
|
|
\todo{TODO}
|
|
\end{example}
|
|
|
|
\subsection{Stopping Times}
|
|
|
|
\begin{definition}[Stopping time]
|
|
\label{def:stopping-time}
|
|
A random variable $T: \Omega \to \N_0 \cup \{\infty\}$ on a filtered probability space $(\Omega, \cF, \{\cF_n\}_n, \bP)$ is called a \vocab{stopping time},
|
|
if
|
|
\[
|
|
\{T \le n\} \in \cF_n
|
|
\]
|
|
for all $n \in \N$.
|
|
Equivalently, $\{T = n\} \in \cF_n$ for all $n \in \N$.
|
|
\end{definition}
|
|
|
|
\begin{example}
|
|
A constant random variable $T = c$ is a stopping time.
|
|
\end{example}
|
|
|
|
\begin{example}[Hitting times]
|
|
For an adapted process $(X_n)_n$
|
|
with values in $\R$ and $A \in \cB(\R)$, the \vocab{hitting time}
|
|
\[
|
|
T \coloneqq \inf \{n \in \N : X_n \in A\}
|
|
\]
|
|
is a stopping time,
|
|
as
|
|
\[
|
|
\{T \le n \} = \bigcup_{k=1}^n \{X_k \in A\} \in \cF_n.
|
|
\]
|
|
|
|
However, the last exit time
|
|
\[
|
|
T \coloneqq \sup \{n \in \N : X_n \in A\}
|
|
\]
|
|
is not a stopping time.
|
|
\end{example}
|
|
|
|
|
|
\begin{example}
|
|
Consider the simple random walk, i.e.
|
|
$X_n$ i.i.d.~with $\bP[X_n = 1] = \bP[X_n = -1] = \frac{1}{2}$.
|
|
Set $S_n \coloneqq \sum_{i=1}^{n} X_n$.
|
|
Then
|
|
\[
|
|
T \coloneqq \inf \{n \in \N : S_n \ge A \lor S_n \le B\}
|
|
\]
|
|
is a stopping time.
|
|
\end{example}
|
|
|
|
\begin{fact}
|
|
If $T_1, T_2$ are stopping times with respect to the same filtration,
|
|
then
|
|
\begin{itemize}
|
|
\item $T_1 + T_2$,
|
|
\item $\min \{T_1, T_2\}$ and
|
|
\item $\max \{T_1, T_2\}$
|
|
\end{itemize}
|
|
are stopping times.
|
|
\end{fact}
|
|
\begin{warning}
|
|
Note that $T_1 - T_2$ is not a stopping time.
|
|
\end{warning}
|
|
|
|
\begin{remark}
|
|
There are two ways to look at the interaction between a stopping time $T$
|
|
and a stochastic process $(X_n)_n$:
|
|
\begin{itemize}
|
|
\item The behaviour of $ X_n$ until $T$, i.e.
|
|
\[
|
|
X^T \coloneqq \left(X_{T \wedge n}\right)_{n \in \N}
|
|
\]
|
|
is called the \vocab{stopped process}.
|
|
\item The value of $(X_n)_n)$ at time $T$,
|
|
i.e.~looking at $X_T$.
|
|
\end{itemize}
|
|
\end{remark}
|
|
\begin{example}
|
|
If we look at a process
|
|
\[ S_n = \sum_{i=1}^{n} X_i \]
|
|
for some $(X_n)_n$,
|
|
then
|
|
\[ S^T = (\sum_{i=1}^{T \wedge n} X_i)_n \]
|
|
and
|
|
\[ S_T = \sum_{i=1}^{T} X_i. \]
|
|
\end{example}
|
|
|
|
\begin{theorem}
|
|
If $(X_n)_n$ is a supermartingale and $T$ is a stopping time,
|
|
then $X^T$ is also a supermartingale,
|
|
and we have $\bE[X_{T \wedge n}] \le \bE[X_0]$ for all $n$.
|
|
If $(X_n)_n$ is a martingale, then so is $X^T$
|
|
and $\bE[X_{T \wedge n}] = \bE[X_0]$.
|
|
\end{theorem}
|
|
\begin{proof}
|
|
First, we need to show that $X^T$ is adapted.
|
|
This is clear since
|
|
\begin{IEEEeqnarray*}{rCl}
|
|
X^T_n &=& X_T \One_{T < n} + X_n \One_{T \ge n}\\
|
|
&=& \sum_{k=1}^{n-1} X_k \One_{T = k} + X_n \One_{T \ge n}.
|
|
\end{IEEEeqnarray*}
|
|
|
|
It is also clear that $X^T_n$ is integrable since
|
|
\[
|
|
\bE[|X^T_n|] \le \sum_{k=1}^{n} \bE[|X_k|] < \infty.
|
|
\]
|
|
|
|
We have
|
|
\begin{IEEEeqnarray*}{rCl}
|
|
&&\bE[X^T_n - X^T_{n-1} | \cF_{n-1}]\\
|
|
&=& \bE\left[X_n \One_{\{T \ge n\}} + \sum_{k=1}^{n-1} X_k \One_{\{ T = k\} }
|
|
- X_{n-1}(\One_{T \ge n} + \One_{\{T = n-1\}})\right.\\
|
|
&&\left.+ \sum_{k=1}^{n-2} X_k \One_{\{T = k\} } \middle| \cF_{n-1}\right]\\
|
|
&=& \bE[(X_n - X_{n-1}) \One_{\{ T \ge n\} } | \cF_{n-1}]\\
|
|
&=& \One_{\{ T \ge n\}} (\bE[X_n | \cF_{n-1}] - X_{n-1})
|
|
\begin{cases}
|
|
\le 0\\
|
|
= 0 \text{ if $(X_n)_n$ is a martingale}.
|
|
\end{cases}
|
|
\end{IEEEeqnarray*}
|
|
\end{proof}
|
|
|
|
\begin{remark}
|
|
\label{roptionalstoppingi}
|
|
We now want a similar statement for $X_T$.
|
|
In the case that $T \le M$ is bounded,
|
|
we get from the above that
|
|
\[
|
|
\bE[X_T] \overset{n \ge M}{=} \bE[X^T_n] \begin{cases}
|
|
\le \bE[X_0] & \text{ supermartingale},\\
|
|
= \bE[X_0] & \text{ martingale}.
|
|
\end{cases}
|
|
\]
|
|
However if $T$ is not bounded, this does not hold in general.
|
|
\end{remark}
|
|
\begin{example}
|
|
Let $(S_n)_n$ be the simple random walk
|
|
and take $T \coloneqq \inf \{n : S_n = 1\}$.
|
|
Then $\bP[T < \infty] = 1$, but
|
|
\[
|
|
1 = \bE[S_T] \neq \bE[S_0] = 0.
|
|
\]
|
|
\end{example}
|
|
|
|
\begin{theorem}[Optional Stopping]
|
|
\label{optionalstopping}
|
|
Let $(X_n)_n$ be a supermartingale
|
|
and let $T$ be a stopping time
|
|
taking values in $\N$.
|
|
|
|
If one of the following holds
|
|
\begin{enumerate}[(i)]
|
|
\item $T \le M$ is bounded,
|
|
\item $(X_n)_n$ is uniformly bounded
|
|
and $T < \infty$ a.s.,
|
|
\item $\bE[T] < \infty$
|
|
and $|X_n(\omega) - X_{n-1}(\omega)| \le K$
|
|
for all $n \in \N, \omega \in \Omega$ and
|
|
some $K > 0$,
|
|
\end{enumerate}
|
|
then $\bE[X_T] \le \bE[X_0]$.
|
|
|
|
If $(X_n)_n$ even is a martingale, then
|
|
under the same conditions
|
|
$\bE[X_T] = \bE[X_0]$.
|
|
\end{theorem}
|
|
\begin{proof}
|
|
(i) was already done in \autoref{roptionalstoppingi}.
|
|
|
|
(ii): Since $(X_n)_n$ is bounded, we get that
|
|
\begin{IEEEeqnarray*}{rCl}
|
|
\bE[|X_T - X_0|] &\overset{\text{dominated convergence}}{=}& \lim_{n \to \infty} \bE[|X_{T \wedge n} - X_0|]\\
|
|
&\overset{\text{part (i)}}{\le}& 0.
|
|
\end{IEEEeqnarray*}
|
|
|
|
(iii): It is
|
|
\begin{IEEEeqnarray*}{rCl}
|
|
|X_{T \wedge n}- X_0| &\le& | \sum_{k=1}^{T \wedge n} X_k - X_{k-1}|\\
|
|
&\le & (T \wedge n) \cdot K\\
|
|
&\le & T \cdot K < \infty.
|
|
\end{IEEEeqnarray*}
|
|
|
|
Hence, we can apply dominated convergence and obtain
|
|
\begin{IEEEeqnarray*}{rCl}
|
|
\bE[X_T - X_0] &=& \lim_{n \to \infty} \bE[X_{T \wedge n} - X_0].
|
|
\end{IEEEeqnarray*}
|
|
Thus, we can apply (ii).
|
|
|
|
The statement about martingales follows from
|
|
applying this to $(X_n)_n$ and $(-X_n)_n$,
|
|
which are both supermartingales.
|
|
\end{proof}
|