lecture 21

This commit is contained in:
Josia Pietsch 2023-06-29 20:31:46 +02:00
parent 62fc7f892d
commit d0b12bd4a9
Signed by: josia
GPG Key ID: E70B571D66986A2D
5 changed files with 162 additions and 21 deletions

View File

@ -16,7 +16,7 @@ First, let us recall some basic definitions:
\item $\bP$ is a \vocab{probability measure}, i.e.~$\bP$ is a function $\bP: \cF \to [0,1]$
such that
\begin{itemize}
\item $\bP(\emptyset) = 1$, $\bP(\Omega) = 1$,
\item $\bP(\emptyset) = 0$, $\bP(\Omega) = 1$,
\item $\bP\left( \bigsqcup_{n \in \N} A_n \right) = \sum_{n \in \N} \bP(A_n)$
for mutually disjoint $A_n \in \cF$.
\end{itemize}

View File

@ -116,7 +116,7 @@ we need the following theorem, which we won't prove here:
\subsection{Stopping times}
\begin{definition}[Stopping time]
A random variable $T: \Omega \to \N \cup \{\infty\}$ on a filtered probability space $(\Omega, \cF, \{\cF_n\}_n, \bP)$ is called a \vocab{stopping time},
A random variable $T: \Omega \to \N_0 \cup \{\infty\}$ on a filtered probability space $(\Omega, \cF, \{\cF_n\}_n, \bP)$ is called a \vocab{stopping time},
if
\[
\{T \le n\} \in \cF_n

136
inputs/lecture_21.tex Normal file
View File

@ -0,0 +1,136 @@
\lecture{21}{2023-06-29}{}
% TODO: replace bf
This is the last lecture relevant for the exam.
(Apart from lecture 22 which will be a repetion).
\begin{goal}
We want to see an application of the
optional stopping theorem \ref{optionalstopping}.
\end{goal}
\begin{notation}
Let $E$ be a complete, separable metric space (e.g.~$E = \R$).
Suppose that for all $x \in E$ we have a probability measure
$\bfP(x, \dif y)$ on $E$.
% i.e. $\mu(A) \coloneqq \int_A \bP(x, \dif y)$ is a probability measure.
Such a probability measure is a called
a \vocab{transition probability measure}.
\end{notation}
\begin{examle}
$E =\R$,
\[\bfP(x, \dif y) = \frac{1}{\sqrt{2 \pi} } e^{- \frac{(x-y)^2}{2}} \dif y\]
is a transition probability measure.
\end{examle}
\begin{example}[Simple random walk as a transition probability measure]
$E = \Z$, $\bfP(x, \dif y)$
assigns mass $\frac{1}{2}$ to $y = x+1$ and $y = x -1$.
\end{example}
\begin{definition}
For every bounded, measurable function $f : E \to \R$,
$x \in E$
define
\[
(\bfP f)(x) \coloneqq \int_E f(y) \bfP(x, \dif y).
\]
This $\bfP$ is called a \vocab{transition operator}.
\end{definition}
\begin{fact}
If $f \ge 0$, then $(\bfP f)(\cdot ) \ge 0$.
If $f \equiv 1$, we have $(\bfP f) \equiv 1$.
\end{fact}
\begin{notation}
Let $\bfI$ denote the \vocab{identity operator},
i.e.
\[
(\bfI f)(x) = f(x)
\]
for all $f$.
Then for a transition operator $\bfP$ we write
\[
\bfL \coloneqq \bfI - \bfP.
\]
\end{notation}
\begin{goal}
Take $E = \R$.
Suppose that $A^c \subseteq \R$ is a bounded domain.
Given a bounded function $f$ on $\R$,
we want a function $u$ which is bounded,
such that
$Lu = 0$ on $A^c$ and $u = f$ on $A$.
\end{goal}
We will show that $u(x) = \bE_x[f(X_{T_A})]$
is the unique solution to this problem.
\begin{definition}
Let $(\Omega, \cF, \{\cF_n\}_n, \bP_x)$
be a filtered probability space, where for every $x \in \R$,
$\bP_x$ is a probability measure.
Let $\bE_x$ denote expectation with respect to $\bfP(x, \cdot )$.
Then $(X_n)_{n \ge 0}$ is a \vocab{Markov chain} starting at $x \in \R$
with \vocab[Markov chain!Transition probability]{transition probability}
$\bfP(x, \cdot )$ if
\begin{enumerate}[(i)]
\item $\bP_x[X_0 = x] = 1$,
\item for all bounded, measurable $f: \R \to \R$,
\[\bE_x[f(X_{n+1}) | \cF_n] \overset{\text{a.s.}}{=}%
\bE_{x}[f(X_{n+1}) | X_n] = %
\int f(y) \bfP(X_n, \dif y).\]
\end{enumerate}
(Recall $\cF_n = \sigma(X_1,\ldots, X_n)$.)
\end{definition}
\begin{example}
Suppose $B \in \cB(\R)$ and $f = \One_B$.
Then the first equality of (ii) simplifies to
\[
\bP_x[X_{n+1} \in B | \cF_n] = \bP_x[X_{n+1} \in B | \sigma(X_n)].
\]
\end{example}
\begin{definition}[Conditional probability]
\[
\bP[A | \cG] \coloneqq \bE[\One_A | \cG].
\]
\end{definition}
\begin{example}
Let $\xi_i$ be i.i.d.~with$\bP[\xi_i = 1] = \bP[\xi_i = -1] = \frac{1}{2}$
and define $X_n \coloneqq \sum_{i=1}^{n} \xi_i$.
Intuitively, conditioned on $X_n$, $X_{n+1}$ should
be independent of $\sigma(X_1,\ldots, X_{n-1})$.
For a set $B$, we have
\[
\bP_0[X_{n+1} \in B| \sigma(X_1,\ldots, X_n)]
= \bE[\One_{X_n + \xi_{n+1} \in B} | \sigma(X_1,\ldots, X_n)]
= \bE[\One_{X_n + \xi_{n+1} \in B} | \sigma(X_n)].
\]
\begin{claim}
$\bE[\One_{X_{n+1} \in B} | \sigma(X_1,\ldots, X_n)] = \bE[\One_{X_{n+1} \in B} | \sigma(X_n)]$.
\end{claim}
\begin{subproof}
The rest of the lecture was very chaotic...
\end{subproof}
\end{example}
%TODO
{ \huge\color{red}
New information after this point is not relevant for the exam.
}
Stopping times and optional stopping are very relevant for the exam,
the Markov property is not.

View File

@ -40,6 +40,10 @@
\RequirePackage{mkessler-faktor}
\RequirePackage{mkessler-mathsymb}
\RequirePackage[extended]{mkessler-mathalias}
% \makeatletter
% \expandafter\MakeAliasesForwith\expandafter\mathbf\expandafter{\expandafter bf\expandafter}\expandafter{\mkessler@mathalias@all}
% \makeatother
\RequirePackage{mkessler-refproof}
% mkessler-mathfont has already been imported

View File

@ -44,6 +44,7 @@
\input{inputs/lecture_18.tex}
\input{inputs/lecture_19.tex}
\input{inputs/lecture_20.tex}
\input{inputs/lecture_21.tex}
\cleardoublepage