From d0b12bd4a9addf321e3d603597160807db0cc549 Mon Sep 17 00:00:00 2001 From: Josia Pietsch Date: Thu, 29 Jun 2023 20:31:46 +0200 Subject: [PATCH] lecture 21 --- inputs/lecture_1.tex | 2 +- inputs/lecture_20.tex | 40 ++++++------ inputs/lecture_21.tex | 136 +++++++++++++++++++++++++++++++++++++++++ jrpie-math.sty | 4 ++ probability_theory.tex | 1 + 5 files changed, 162 insertions(+), 21 deletions(-) create mode 100644 inputs/lecture_21.tex diff --git a/inputs/lecture_1.tex b/inputs/lecture_1.tex index 02f89c6..bc0ad45 100644 --- a/inputs/lecture_1.tex +++ b/inputs/lecture_1.tex @@ -16,7 +16,7 @@ First, let us recall some basic definitions: \item $\bP$ is a \vocab{probability measure}, i.e.~$\bP$ is a function $\bP: \cF \to [0,1]$ such that \begin{itemize} - \item $\bP(\emptyset) = 1$, $\bP(\Omega) = 1$, + \item $\bP(\emptyset) = 0$, $\bP(\Omega) = 1$, \item $\bP\left( \bigsqcup_{n \in \N} A_n \right) = \sum_{n \in \N} \bP(A_n)$ for mutually disjoint $A_n \in \cF$. \end{itemize} diff --git a/inputs/lecture_20.tex b/inputs/lecture_20.tex index 711f6a7..b3102e7 100644 --- a/inputs/lecture_20.tex +++ b/inputs/lecture_20.tex @@ -2,7 +2,7 @@ By the tower property (\autoref{cetower}) it is clear that $(\bE[X | \cF_n])_n$ is a martingale. - + First step: Assume that $X$ is bounded. Then, by \autoref{cejensen}, $|X_n| \le \bE[|X| | \cF_n]$, @@ -84,7 +84,7 @@ we need the following theorem, which we won't prove here: L^p &\longrightarrow & (L^q)^\ast \\ f &\longmapsto & (g \mapsto \int g f \dif d\bP) \end{IEEEeqnarray*} - + We also have $(L^1)^\ast \cong L^\infty$, however $ (L^\infty)^\ast \not\cong L^1$. \end{fact} @@ -95,7 +95,7 @@ we need the following theorem, which we won't prove here: $(X_{n_k})_k$ such that for all $Y \in L^q$ ($\frac{1}{p} + \frac{1}{q} = 1$ ) \[ \int X_{n_k} Y \dif \bP \to \int XY \dif \bP - \] + \] (Note that this argument does not work for $p = 1$, because $(L^\infty)^\ast \not\cong L^1$). @@ -116,14 +116,14 @@ we need the following theorem, which we won't prove here: \subsection{Stopping times} \begin{definition}[Stopping time] - A random variable $T: \Omega \to \N \cup \{\infty\}$ on a filtered probability space $(\Omega, \cF, \{\cF_n\}_n, \bP)$ is called a \vocab{stopping time}, + A random variable $T: \Omega \to \N_0 \cup \{\infty\}$ on a filtered probability space $(\Omega, \cF, \{\cF_n\}_n, \bP)$ is called a \vocab{stopping time}, if \[ \{T \le n\} \in \cF_n - \] + \] for all $n \in \N$. Equivalently, $\{T = n\} \in \cF_n$ for all $n \in \N$. - + \end{definition} \begin{example} @@ -131,21 +131,21 @@ we need the following theorem, which we won't prove here: \end{example} \begin{example}[Hitting times] - For an adapted process $(X_n)_n$ + For an adapted process $(X_n)_n$ with values in $\R$ and $A \in \cB(\R)$, the \vocab{hitting time} \[ - T \coloneqq \inf \{n \in \N : X_n \in A\} + T \coloneqq \inf \{n \in \N : X_n \in A\} \] is a stopping time, as \[ \{T \le n \} = \bigcup_{k=1}^n \{X_k \in A\} \in \cF_n. - \] + \] However, the last exit time \[ - T \coloneqq \sup \{n \in \N : X_n \in A\} - \] + T \coloneqq \sup \{n \in \N : X_n \in A\} + \] is not a stopping time. \end{example} @@ -158,7 +158,7 @@ we need the following theorem, which we won't prove here: Then \[ T \coloneqq \inf \{n \in \N : S_n \ge A \lor S_n \le B\} - \] + \] is a stopping time. \end{example} @@ -173,11 +173,11 @@ we need the following theorem, which we won't prove here: are stopping times. Note that $T_1 - T_2$ is not a stopping time. - + \end{example} \begin{remark} - There are two ways to interpret the interaction between a stopping time $T$ + There are two ways to interpret the interaction between a stopping time $T$ and a stochastic process $(X_n)_n$. \begin{itemize} \item The behaviour of $ X_n$ until $T$, @@ -193,22 +193,22 @@ we need the following theorem, which we won't prove here: If we look at a process \[ S_n = \sum_{i=1}^{n} X_i - \] + \] for some $(X_n)_n$, then \[ S^T = (\sum_{i=1}^{T \wedge n} X_i)_n - \] + \] and \[ S_T = \sum_{i=1}^{T} X_i. - \] + \] \end{example} \begin{theorem} If $(X_n)_n$ is a supermartingale and $T$ is a stopping time, then $X^T$ is also a supermartingale, and we have $\bE[X_{T \wedge n}] \le \bE[X_0]$ for all $n$. - If $(X_n)_n$ is a martingale, then so is $X^T$ + If $(X_n)_n$ is a martingale, then so is $X^T$ and $\bE[X_{T \wedge n}] \le \bE[X_0]$. \end{theorem} \begin{proof} @@ -222,7 +222,7 @@ we need the following theorem, which we won't prove here: It is also clear that $X^T_n$ is integrable since \[ \bE[|X^T_n|] \le \sum_{k=1}^{n} \bE[|X_k|] < \infty. - \] + \] We have \begin{IEEEeqnarray*}{rCl} @@ -259,7 +259,7 @@ we need the following theorem, which we won't prove here: Then $\bP[T < \infty] = 1$, but \[ 1 = \bE[S_T] \neq \bE[S_0] = 0. - \] + \] \end{example} \begin{theorem}[Optional Stopping] diff --git a/inputs/lecture_21.tex b/inputs/lecture_21.tex new file mode 100644 index 0000000..4475ba0 --- /dev/null +++ b/inputs/lecture_21.tex @@ -0,0 +1,136 @@ +\lecture{21}{2023-06-29}{} +% TODO: replace bf + +This is the last lecture relevant for the exam. +(Apart from lecture 22 which will be a repetion). + +\begin{goal} + We want to see an application of the + optional stopping theorem \ref{optionalstopping}. +\end{goal} + +\begin{notation} + Let $E$ be a complete, separable metric space (e.g.~$E = \R$). + Suppose that for all $x \in E$ we have a probability measure + $\bfP(x, \dif y)$ on $E$. + % i.e. $\mu(A) \coloneqq \int_A \bP(x, \dif y)$ is a probability measure. + Such a probability measure is a called + a \vocab{transition probability measure}. +\end{notation} +\begin{examle} + $E =\R$, + \[\bfP(x, \dif y) = \frac{1}{\sqrt{2 \pi} } e^{- \frac{(x-y)^2}{2}} \dif y\] + is a transition probability measure. +\end{examle} +\begin{example}[Simple random walk as a transition probability measure] + $E = \Z$, $\bfP(x, \dif y)$ + assigns mass $\frac{1}{2}$ to $y = x+1$ and $y = x -1$. +\end{example} + +\begin{definition} + For every bounded, measurable function $f : E \to \R$, + $x \in E$ + define + \[ + (\bfP f)(x) \coloneqq \int_E f(y) \bfP(x, \dif y). + \] + This $\bfP$ is called a \vocab{transition operator}. +\end{definition} +\begin{fact} + If $f \ge 0$, then $(\bfP f)(\cdot ) \ge 0$. + + If $f \equiv 1$, we have $(\bfP f) \equiv 1$. +\end{fact} + +\begin{notation} + Let $\bfI$ denote the \vocab{identity operator}, + i.e. + \[ + (\bfI f)(x) = f(x) + \] + for all $f$. + Then for a transition operator $\bfP$ we write + \[ + \bfL \coloneqq \bfI - \bfP. + \] +\end{notation} + +\begin{goal} +Take $E = \R$. +Suppose that $A^c \subseteq \R$ is a bounded domain. +Given a bounded function $f$ on $\R$, +we want a function $u$ which is bounded, +such that +$Lu = 0$ on $A^c$ and $u = f$ on $A$. +\end{goal} + +We will show that $u(x) = \bE_x[f(X_{T_A})]$ +is the unique solution to this problem. + +\begin{definition} + Let $(\Omega, \cF, \{\cF_n\}_n, \bP_x)$ + be a filtered probability space, where for every $x \in \R$, + $\bP_x$ is a probability measure. + Let $\bE_x$ denote expectation with respect to $\bfP(x, \cdot )$. + Then $(X_n)_{n \ge 0}$ is a \vocab{Markov chain} starting at $x \in \R$ + with \vocab[Markov chain!Transition probability]{transition probability} + $\bfP(x, \cdot )$ if + \begin{enumerate}[(i)] + \item $\bP_x[X_0 = x] = 1$, + \item for all bounded, measurable $f: \R \to \R$, + \[\bE_x[f(X_{n+1}) | \cF_n] \overset{\text{a.s.}}{=}% + \bE_{x}[f(X_{n+1}) | X_n] = % + \int f(y) \bfP(X_n, \dif y).\] + \end{enumerate} + (Recall $\cF_n = \sigma(X_1,\ldots, X_n)$.) +\end{definition} +\begin{example} + Suppose $B \in \cB(\R)$ and $f = \One_B$. + Then the first equality of (ii) simplifies to + \[ + \bP_x[X_{n+1} \in B | \cF_n] = \bP_x[X_{n+1} \in B | \sigma(X_n)]. + \] +\end{example} + +\begin{definition}[Conditional probability] + \[ + \bP[A | \cG] \coloneqq \bE[\One_A | \cG]. + \] +\end{definition} + +\begin{example} + Let $\xi_i$ be i.i.d.~with$\bP[\xi_i = 1] = \bP[\xi_i = -1] = \frac{1}{2}$ + and define $X_n \coloneqq \sum_{i=1}^{n} \xi_i$. + + Intuitively, conditioned on $X_n$, $X_{n+1}$ should + be independent of $\sigma(X_1,\ldots, X_{n-1})$. + + For a set $B$, we have + \[ + \bP_0[X_{n+1} \in B| \sigma(X_1,\ldots, X_n)] + = \bE[\One_{X_n + \xi_{n+1} \in B} | \sigma(X_1,\ldots, X_n)] + = \bE[\One_{X_n + \xi_{n+1} \in B} | \sigma(X_n)]. + \] + + \begin{claim} + $\bE[\One_{X_{n+1} \in B} | \sigma(X_1,\ldots, X_n)] = \bE[\One_{X_{n+1} \in B} | \sigma(X_n)]$. + \end{claim} + \begin{subproof} + The rest of the lecture was very chaotic... + \end{subproof} +\end{example} + + +%TODO + + + + + + + +{ \huge\color{red} + New information after this point is not relevant for the exam. +} +Stopping times and optional stopping are very relevant for the exam, +the Markov property is not. diff --git a/jrpie-math.sty b/jrpie-math.sty index f3fc711..83b7e17 100644 --- a/jrpie-math.sty +++ b/jrpie-math.sty @@ -40,6 +40,10 @@ \RequirePackage{mkessler-faktor} \RequirePackage{mkessler-mathsymb} \RequirePackage[extended]{mkessler-mathalias} +% \makeatletter +% \expandafter\MakeAliasesForwith\expandafter\mathbf\expandafter{\expandafter bf\expandafter}\expandafter{\mkessler@mathalias@all} +% \makeatother + \RequirePackage{mkessler-refproof} % mkessler-mathfont has already been imported diff --git a/probability_theory.tex b/probability_theory.tex index fb2b4f9..cea6835 100644 --- a/probability_theory.tex +++ b/probability_theory.tex @@ -44,6 +44,7 @@ \input{inputs/lecture_18.tex} \input{inputs/lecture_19.tex} \input{inputs/lecture_20.tex} +\input{inputs/lecture_21.tex} \cleardoublepage