some changes
This commit is contained in:
parent
e5b22cd4a3
commit
c45b643ea0
24 changed files with 122 additions and 83 deletions
|
@ -1,2 +1,6 @@
|
||||||
\section{Counterexamples}
|
\section{Counterexamples}
|
||||||
|
|
||||||
|
Exercise 4.3
|
||||||
|
10.2
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -9,12 +9,13 @@ in the summer term 2023 at the University Münster.
|
||||||
\end{warning}
|
\end{warning}
|
||||||
|
|
||||||
These notes contain errors almost surely.
|
These notes contain errors almost surely.
|
||||||
If you find some of them or want to improve something, please send me a message:
|
If you find some of them or want to improve something,
|
||||||
|
please send me a message:\\
|
||||||
\texttt{notes\_probability\_theory@jrpie.de}.
|
\texttt{notes\_probability\_theory@jrpie.de}.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Topics of this lecture:
|
\paragraph{Topics of this lecture}
|
||||||
\begin{enumerate}[(1)]
|
\begin{enumerate}[(1)]
|
||||||
\item Limit theorems: Laws of large numbers and the central limit theorem for i.i.d.~sequences,
|
\item Limit theorems: Laws of large numbers and the central limit theorem for i.i.d.~sequences,
|
||||||
\item Conditional expectation and conditional probabilities,
|
\item Conditional expectation and conditional probabilities,
|
||||||
|
|
|
@ -62,6 +62,7 @@ The converse to this fact is also true:
|
||||||
\begin{proof}
|
\begin{proof}
|
||||||
See theorem 2.4.3 in Stochastik.
|
See theorem 2.4.3 in Stochastik.
|
||||||
\end{proof}
|
\end{proof}
|
||||||
|
|
||||||
\begin{example}[Some important probability distribution functions]\hfill
|
\begin{example}[Some important probability distribution functions]\hfill
|
||||||
\begin{enumerate}[(1)]
|
\begin{enumerate}[(1)]
|
||||||
\item \vocab{Uniform distribution} on $[0,1]$:
|
\item \vocab{Uniform distribution} on $[0,1]$:
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
% lecture 10 - 2023-05-09
|
\lecture{10}{2023-05-09}{}
|
||||||
|
|
||||||
% RECAP
|
% RECAP
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
\subsection{The central limit theorem}
|
\lecture{11}{}{Intuition for the CLT}
|
||||||
|
\subsection{The Central Limit Theorem}
|
||||||
|
|
||||||
For $X_1, X_2,\ldots$ i.i.d.~we were looking
|
For $X_1, X_2,\ldots$ i.i.d.~we were looking
|
||||||
at $S_n \coloneqq \sum_{i=1}^n X_i$.
|
at $S_n \coloneqq \sum_{i=1}^n X_i$.
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
\lecture{12}{2023-05-16}{}
|
\lecture{12}{2023-05-16}{Proof of the CLT}
|
||||||
|
|
||||||
We now want to prove \autoref{clt}.
|
We now want to prove \autoref{clt}.
|
||||||
The plan is to do the following:
|
The plan is to do the following:
|
||||||
|
|
|
@ -47,6 +47,18 @@ in this lecture. However, they are quite important.
|
||||||
|
|
||||||
We will now sketch the proof of \autoref{levycontinuity},
|
We will now sketch the proof of \autoref{levycontinuity},
|
||||||
details can be found in the notes.\notes
|
details can be found in the notes.\notes
|
||||||
|
\begin{definition}
|
||||||
|
Let $(X_n)_n$ be a sequence of random variables.
|
||||||
|
The distribution of $(X_n)_n$ is called
|
||||||
|
\vocab[Distribution!tight]{tight} (dt. ``straff''),
|
||||||
|
if
|
||||||
|
\[
|
||||||
|
\lim_{a \to \infty} \sup_{n \in \N} \bP[|X_n| > a] = 0.
|
||||||
|
\]
|
||||||
|
\end{definition}
|
||||||
|
\begin{example}+[Exercise 8.1]
|
||||||
|
\todo{Copy}
|
||||||
|
\end{example}
|
||||||
A generalized version of \autoref{levycontinuity} is the following:
|
A generalized version of \autoref{levycontinuity} is the following:
|
||||||
\begin{theorem}[A generalized version of Levy's continuity \autoref{levycontinuity}]
|
\begin{theorem}[A generalized version of Levy's continuity \autoref{levycontinuity}]
|
||||||
\label{genlevycontinuity}
|
\label{genlevycontinuity}
|
||||||
|
@ -55,14 +67,14 @@ A generalized version of \autoref{levycontinuity} is the following:
|
||||||
for some function $\phi$ on $\R$.
|
for some function $\phi$ on $\R$.
|
||||||
Then the following are equivalent:
|
Then the following are equivalent:
|
||||||
\begin{enumerate}[(a)]
|
\begin{enumerate}[(a)]
|
||||||
\item The distribution of $X_n$ is \vocab[Distribution!tight]{tight} (dt. ``straff''),
|
\item The distribution of $X_n$ is tight.
|
||||||
i.e.~$\lim_{a \to \infty} \sup_{n \in \N} \bP[|X_n| > a] = 0$.
|
|
||||||
\item $X_n \xrightarrow{(d)} X$ for some real-valued random variable $X$.
|
\item $X_n \xrightarrow{(d)} X$ for some real-valued random variable $X$.
|
||||||
\item $\phi$ is the characteristic function of $X$.
|
\item $\phi$ is the characteristic function of $X$.
|
||||||
\item $\phi$ is continuous on all of $\R$.
|
\item $\phi$ is continuous on all of $\R$.
|
||||||
\item $\phi$ is continuous at $0$.
|
\item $\phi$ is continuous at $0$.
|
||||||
\end{enumerate}
|
\end{enumerate}
|
||||||
\end{theorem}
|
\end{theorem}
|
||||||
|
\todo{Proof of \autoref{genlevycontinuity} (Exercise 8.2)}
|
||||||
\begin{example}
|
\begin{example}
|
||||||
Let $Z \sim \cN(0,1)$ and $X_n \coloneqq n Z$.
|
Let $Z \sim \cN(0,1)$ and $X_n \coloneqq n Z$.
|
||||||
We have $\phi_{X_n}(t) = \bE[[e^{\i t X_n}] = e^{-\frac{1}{2} t^2 n^2} \xrightarrow{n \to \infty} \One_{\{t = 0\} }$.
|
We have $\phi_{X_n}(t) = \bE[[e^{\i t X_n}] = e^{-\frac{1}{2} t^2 n^2} \xrightarrow{n \to \infty} \One_{\{t = 0\} }$.
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
\lecture{14}{2023-05-25}{Conditional expectation}
|
\lecture{14}{2023-05-25}{Conditional expectation}
|
||||||
|
|
||||||
\section{Conditional expectation}
|
\section{Conditional Expectation}
|
||||||
|
|
||||||
\subsection{Introduction}
|
\subsection{Introduction}
|
||||||
|
|
||||||
|
@ -87,7 +87,7 @@ We now want to generalize this to arbitrary random variables.
|
||||||
\]
|
\]
|
||||||
\end{definition}
|
\end{definition}
|
||||||
|
|
||||||
\subsection{Existence of conditional probability}
|
\subsection{Existence of Conditional Probability}
|
||||||
|
|
||||||
We will give two different proves of \autoref{conditionalexpectation}.
|
We will give two different proves of \autoref{conditionalexpectation}.
|
||||||
The first one will use orthogonal projections.
|
The first one will use orthogonal projections.
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
\lecture{15}{2023-06-06}{}
|
\lecture{15}{2023-06-06}{}
|
||||||
\subsection{Properties of conditional expectation}
|
\subsection{Properties of Conditional Expectation}
|
||||||
|
|
||||||
We want to derive some properties of conditional expectation.
|
We want to derive some properties of conditional expectation.
|
||||||
|
|
||||||
\begin{theorem}[Law of total expectation] % Thm 1
|
\begin{theorem}[Law of total expectation]
|
||||||
\label{ceprop1}
|
\label{ceprop1}
|
||||||
\label{totalexpectation}
|
\label{totalexpectation}
|
||||||
\[
|
\[
|
||||||
|
@ -50,7 +50,6 @@ We want to derive some properties of conditional expectation.
|
||||||
|
|
||||||
\begin{theorem}[Positivity]
|
\begin{theorem}[Positivity]
|
||||||
\label{ceprop4}
|
\label{ceprop4}
|
||||||
% 4
|
|
||||||
\label{cpositivity}
|
\label{cpositivity}
|
||||||
If $X \ge 0$, then $\bE[X | \cG] \ge 0$ a.s.
|
If $X \ge 0$, then $\bE[X | \cG] \ge 0$ a.s.
|
||||||
\end{theorem}
|
\end{theorem}
|
||||||
|
@ -66,12 +65,10 @@ We want to derive some properties of conditional expectation.
|
||||||
\end{proof}
|
\end{proof}
|
||||||
\begin{theorem}[Conditional monotone convergence theorem]
|
\begin{theorem}[Conditional monotone convergence theorem]
|
||||||
\label{ceprop5}
|
\label{ceprop5}
|
||||||
% 5
|
|
||||||
\label{mcmt}
|
\label{mcmt}
|
||||||
Let $X_n,X \in L^1(\Omega, \cF, \bP)$.
|
Let $X_n,X \in L^1(\Omega, \cF, \bP)$.
|
||||||
Suppose $X_n \ge 0$ with $X_n \uparrow X$.
|
Suppose $X_n \ge 0$ with $X_n \uparrow X$.
|
||||||
Then $\bE[X_n|\cG] \uparrow \bE[X|\cG]$.
|
Then $\bE[X_n|\cG] \uparrow \bE[X|\cG]$.
|
||||||
|
|
||||||
\end{theorem}
|
\end{theorem}
|
||||||
\begin{proof}
|
\begin{proof}
|
||||||
Let $Z_n$ be a version of $\bE[X_n | Y]$.
|
Let $Z_n$ be a version of $\bE[X_n | Y]$.
|
||||||
|
@ -187,12 +184,10 @@ Recall
|
||||||
\]
|
\]
|
||||||
\end{theorem}
|
\end{theorem}
|
||||||
\begin{proof}
|
\begin{proof}
|
||||||
Similar to the proof of Hölder's inequality.
|
|
||||||
\todo{Exercise}
|
\todo{Exercise}
|
||||||
\end{proof}
|
\end{proof}
|
||||||
|
|
||||||
\begin{theorem}[Tower property]
|
\begin{theorem}[Tower property]
|
||||||
% 10
|
|
||||||
\label{ceprop10}
|
\label{ceprop10}
|
||||||
\label{cetower}
|
\label{cetower}
|
||||||
Suppose $\cF \supset \cG \supset \cH$ are sub-$\sigma$-algebras.
|
Suppose $\cF \supset \cG \supset \cH$ are sub-$\sigma$-algebras.
|
||||||
|
@ -202,11 +197,17 @@ Recall
|
||||||
\]
|
\]
|
||||||
\end{theorem}
|
\end{theorem}
|
||||||
\begin{proof}
|
\begin{proof}
|
||||||
\todo{Exercise}
|
By definition, $\bE[\bE[X | \cG] | \cH]$ is $\cH$-measurable.
|
||||||
|
For any $H \in \cH$, we have
|
||||||
|
\begin{IEEEeqnarray*}{rCl}
|
||||||
|
\int_H \bE[\bE[X | \cG] | \cH] \dif \bP
|
||||||
|
&=& \int_{H} \bE[X | \cG] \dif \bP\\
|
||||||
|
&=& \int_H X \dif \bP.
|
||||||
|
\end{IEEEeqnarray*}
|
||||||
|
Hence $\bE[\bE[X | \cG] | \cH] \overset{\text{a.s.}}{=} \bE[X | \cH]$.
|
||||||
\end{proof}
|
\end{proof}
|
||||||
|
|
||||||
\begin{theorem}[Taking out what is known]
|
\begin{theorem}[Taking out what is known]
|
||||||
% 11
|
|
||||||
\label{ceprop11}
|
\label{ceprop11}
|
||||||
\label{takingoutwhatisknown}
|
\label{takingoutwhatisknown}
|
||||||
|
|
||||||
|
|
|
@ -32,7 +32,7 @@
|
||||||
\end{refproof}
|
\end{refproof}
|
||||||
|
|
||||||
|
|
||||||
\subsection{The Radon Nikodym theorem}
|
\subsection{The Radon Nikodym Theorem}
|
||||||
|
|
||||||
First, let us recall some basic facts:
|
First, let us recall some basic facts:
|
||||||
\begin{fact}
|
\begin{fact}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
\lecture{17}{2023-06-15}{}
|
\lecture{17}{2023-06-15}{}
|
||||||
|
|
||||||
\subsection{Doob's martingale convergence theorem}
|
\subsection{Doob's Martingale Convergence Theorem}
|
||||||
|
|
||||||
|
|
||||||
\begin{definition}[Stochastic process]
|
\begin{definition}[Stochastic process]
|
||||||
|
@ -37,7 +37,7 @@
|
||||||
Then $(Y_n)_{n \ge 1}$ is also a (sub/super-) martingale.
|
Then $(Y_n)_{n \ge 1}$ is also a (sub/super-) martingale.
|
||||||
\end{lemma}
|
\end{lemma}
|
||||||
\begin{proof}
|
\begin{proof}
|
||||||
Exercise. \todo{Copy}
|
Exercise. \todo{Copy Exercise 10.4}
|
||||||
\end{proof}
|
\end{proof}
|
||||||
\begin{remark}
|
\begin{remark}
|
||||||
The assumption of $K_n$ being constant can be weakened to
|
The assumption of $K_n$ being constant can be weakened to
|
||||||
|
|
|
@ -13,7 +13,7 @@ Hence the same holds for submartingales, i.e.
|
||||||
a.s.~to a finite limit, which is a.s.~finite.
|
a.s.~to a finite limit, which is a.s.~finite.
|
||||||
\end{lemma}
|
\end{lemma}
|
||||||
|
|
||||||
\subsection{Doob's $L^p$ inequality}
|
\subsection{Doob's $L^p$ Inequality}
|
||||||
|
|
||||||
|
|
||||||
\begin{question}
|
\begin{question}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
\lecture{19}{2023-06-22}{}
|
\lecture{19}{2023-06-22}{}
|
||||||
|
|
||||||
\subsection{Uniform integrability}
|
\subsection{Uniform Integrability}
|
||||||
|
|
||||||
\begin{example}
|
\begin{example}
|
||||||
Let $\Omega = [0,1]$, $\cF = \cB$
|
Let $\Omega = [0,1]$, $\cF = \cB$
|
||||||
|
@ -198,7 +198,7 @@ However, some subsets can be easily described, e.g.
|
||||||
\]
|
\]
|
||||||
\end{proof}
|
\end{proof}
|
||||||
|
|
||||||
\subsection{Martingale convergence theorems in $L^p, p \ge 1$}
|
\subsection{Martingale Convergence Theorems in \texorpdfstring{$L^p, p \ge 1$}{$Lp, p >= 1$}}
|
||||||
|
|
||||||
Let $(\Omega, \cF, \bP)$ as always and let $(\cF_n)_n$ always be a filtration.
|
Let $(\Omega, \cF, \bP)$ as always and let $(\cF_n)_n$ always be a filtration.
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
\lecture{2}{}{}
|
\lecture{2}{}{}
|
||||||
\section{Independence and product measures}
|
\section{Independence and Product Measures}
|
||||||
|
|
||||||
In order to define the notion of independence, we first need to construct
|
In order to define the notion of independence, we first need to construct
|
||||||
product measures.
|
product measures.
|
||||||
|
|
|
@ -66,7 +66,7 @@
|
||||||
|
|
||||||
Hence
|
Hence
|
||||||
\[
|
\[
|
||||||
\|X_n - X\|_{L^p} %
|
\|X_n - X\|_{L^p} %
|
||||||
\le \|X_n - X_n'\|_{L^p} + \|X_n' - X'\|_{L^p} + \|X - X'\|_{L^p} %
|
\le \|X_n - X_n'\|_{L^p} + \|X_n' - X'\|_{L^p} + \|X - X'\|_{L^p} %
|
||||||
\le 3 \epsilon.
|
\le 3 \epsilon.
|
||||||
\]
|
\]
|
||||||
|
@ -118,7 +118,7 @@ we need the following theorem, which we won't prove here:
|
||||||
we get the convergence.
|
we get the convergence.
|
||||||
\end{refproof}
|
\end{refproof}
|
||||||
|
|
||||||
\subsection{Stopping times}
|
\subsection{Stopping Times}
|
||||||
|
|
||||||
\begin{definition}[Stopping time]
|
\begin{definition}[Stopping time]
|
||||||
A random variable $T: \Omega \to \N_0 \cup \{\infty\}$ on a filtered probability space $(\Omega, \cF, \{\cF_n\}_n, \bP)$ is called a \vocab{stopping time},
|
A random variable $T: \Omega \to \N_0 \cup \{\infty\}$ on a filtered probability space $(\Omega, \cF, \{\cF_n\}_n, \bP)$ is called a \vocab{stopping time},
|
||||||
|
@ -128,7 +128,6 @@ we need the following theorem, which we won't prove here:
|
||||||
\]
|
\]
|
||||||
for all $n \in \N$.
|
for all $n \in \N$.
|
||||||
Equivalently, $\{T = n\} \in \cF_n$ for all $n \in \N$.
|
Equivalently, $\{T = n\} \in \cF_n$ for all $n \in \N$.
|
||||||
|
|
||||||
\end{definition}
|
\end{definition}
|
||||||
|
|
||||||
\begin{example}
|
\begin{example}
|
||||||
|
@ -152,7 +151,6 @@ we need the following theorem, which we won't prove here:
|
||||||
T \coloneqq \sup \{n \in \N : X_n \in A\}
|
T \coloneqq \sup \{n \in \N : X_n \in A\}
|
||||||
\]
|
\]
|
||||||
is not a stopping time.
|
is not a stopping time.
|
||||||
|
|
||||||
\end{example}
|
\end{example}
|
||||||
|
|
||||||
|
|
||||||
|
@ -167,7 +165,7 @@ we need the following theorem, which we won't prove here:
|
||||||
is a stopping time.
|
is a stopping time.
|
||||||
\end{example}
|
\end{example}
|
||||||
|
|
||||||
\begin{example}
|
\begin{fact}
|
||||||
If $T_1, T_2$ are stopping times with respect to the same filtration,
|
If $T_1, T_2$ are stopping times with respect to the same filtration,
|
||||||
then
|
then
|
||||||
\begin{itemize}
|
\begin{itemize}
|
||||||
|
@ -176,37 +174,32 @@ we need the following theorem, which we won't prove here:
|
||||||
\item $\max \{T_1, T_2\}$
|
\item $\max \{T_1, T_2\}$
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
are stopping times.
|
are stopping times.
|
||||||
|
\end{fact}
|
||||||
|
\begin{warning}
|
||||||
Note that $T_1 - T_2$ is not a stopping time.
|
Note that $T_1 - T_2$ is not a stopping time.
|
||||||
|
\end{warning}
|
||||||
\end{example}
|
|
||||||
|
|
||||||
\begin{remark}
|
\begin{remark}
|
||||||
There are two ways to interpret the interaction between a stopping time $T$
|
There are two ways to look at the interaction between a stopping time $T$
|
||||||
and a stochastic process $(X_n)_n$.
|
and a stochastic process $(X_n)_n$:
|
||||||
\begin{itemize}
|
\begin{itemize}
|
||||||
\item The behaviour of $ X_n$ until $T$,
|
\item The behaviour of $ X_n$ until $T$, i.e.
|
||||||
i.e.~looking at the \vocab{stopped process}
|
|
||||||
\[
|
\[
|
||||||
X^T \coloneqq \left(X_{T \wedge n}\right)_{n \in \N}
|
X^T \coloneqq \left(X_{T \wedge n}\right)_{n \in \N}
|
||||||
\].
|
\]
|
||||||
|
is called the \vocab{stopped process}.
|
||||||
\item The value of $(X_n)_n)$ at time $T$,
|
\item The value of $(X_n)_n)$ at time $T$,
|
||||||
i.e.~looking at $X_T$.
|
i.e.~looking at $X_T$.
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
\end{remark}
|
\end{remark}
|
||||||
\begin{example}
|
\begin{example}
|
||||||
If we look at a process
|
If we look at a process
|
||||||
\[
|
\[ S_n = \sum_{i=1}^{n} X_i \]
|
||||||
S_n = \sum_{i=1}^{n} X_i
|
for some $(X_n)_n$,
|
||||||
\]
|
then
|
||||||
for some $(X_n)_n$, then
|
\[ S^T = (\sum_{i=1}^{T \wedge n} X_i)_n \]
|
||||||
\[
|
|
||||||
S^T = (\sum_{i=1}^{T \wedge n} X_i)_n
|
|
||||||
\]
|
|
||||||
and
|
and
|
||||||
\[
|
\[ S_T = \sum_{i=1}^{T} X_i. \]
|
||||||
S_T = \sum_{i=1}^{T} X_i.
|
|
||||||
\]
|
|
||||||
\end{example}
|
\end{example}
|
||||||
|
|
||||||
\begin{theorem}
|
\begin{theorem}
|
||||||
|
@ -242,7 +235,6 @@ we need the following theorem, which we won't prove here:
|
||||||
= 0 \text{ if $(X_n)_n$ is a martingale}.
|
= 0 \text{ if $(X_n)_n$ is a martingale}.
|
||||||
\end{cases}
|
\end{cases}
|
||||||
\end{IEEEeqnarray*}
|
\end{IEEEeqnarray*}
|
||||||
|
|
||||||
\end{proof}
|
\end{proof}
|
||||||
|
|
||||||
\begin{remark}
|
\begin{remark}
|
||||||
|
@ -256,7 +248,6 @@ we need the following theorem, which we won't prove here:
|
||||||
= \bE[X_0] & \text{ martingale}.
|
= \bE[X_0] & \text{ martingale}.
|
||||||
\end{cases}
|
\end{cases}
|
||||||
\]
|
\]
|
||||||
|
|
||||||
However if $T$ is not bounded, this does not hold in general.
|
However if $T$ is not bounded, this does not hold in general.
|
||||||
\end{remark}
|
\end{remark}
|
||||||
\begin{example}
|
\begin{example}
|
||||||
|
@ -291,7 +282,7 @@ we need the following theorem, which we won't prove here:
|
||||||
$\bE[X_T] = \bE[X_0]$.
|
$\bE[X_T] = \bE[X_0]$.
|
||||||
\end{theorem}
|
\end{theorem}
|
||||||
\begin{proof}
|
\begin{proof}
|
||||||
(i) was dealt with in \autoref{roptionalstoppingi}.
|
(i) was already done in \autoref{roptionalstoppingi}.
|
||||||
|
|
||||||
(ii): Since $(X_n)_n$ is bounded, we get that
|
(ii): Since $(X_n)_n$ is bounded, we get that
|
||||||
\begin{IEEEeqnarray*}{rCl}
|
\begin{IEEEeqnarray*}{rCl}
|
||||||
|
@ -312,7 +303,6 @@ we need the following theorem, which we won't prove here:
|
||||||
\end{IEEEeqnarray*}
|
\end{IEEEeqnarray*}
|
||||||
Thus, we can apply (ii).
|
Thus, we can apply (ii).
|
||||||
|
|
||||||
|
|
||||||
The statement about martingales follows from
|
The statement about martingales follows from
|
||||||
applying this to $(X_n)_n$ and $(-X_n)_n$,
|
applying this to $(X_n)_n$ and $(-X_n)_n$,
|
||||||
which are both supermartingales.
|
which are both supermartingales.
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
\lecture{22}{2023-07-04}{Intro Markov Chains II}
|
\lecture{22}{2023-07-04}{Introduction Markov Chains II}
|
||||||
\begin{goal}
|
\begin{goal}
|
||||||
We want to start with the basics of the theory of Markov chains.
|
We want to start with the basics of the theory of Markov chains.
|
||||||
\end{goal}
|
\end{goal}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
\lecture{5}{2023-04-21}{}
|
\lecture{5}{2023-04-21}{}
|
||||||
\subsection{The laws of large numbers}
|
\subsection{The Laws of Large Numbers}
|
||||||
|
|
||||||
|
|
||||||
We want to show laws of large numbers:
|
We want to show laws of large numbers:
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
\lecture{6}{}{}
|
||||||
\todo{Large parts of lecture 6 are missing}
|
\todo{Large parts of lecture 6 are missing}
|
||||||
\begin{refproof}{lln}
|
\begin{refproof}{lln}
|
||||||
We want to deduce the SLLN (\autoref{lln}) from \autoref{thm2}.
|
We want to deduce the SLLN (\autoref{lln}) from \autoref{thm2}.
|
||||||
|
|
|
@ -1,9 +1,10 @@
|
||||||
% TODO \begin{goal}
|
\lecture{7}{}{Kolmogorov's three series theorem}
|
||||||
% TODO We want to drop our assumptions on finite mean or variance
|
\begin{goal}
|
||||||
% TODO and say something about the behaviour of $ \sum_{n \ge 1} X_n$
|
We want to drop our assumptions on finite mean or variance
|
||||||
% TODO when the $X_n$ are independent.
|
and say something about the behaviour of $ \sum_{n \ge 1} X_n$
|
||||||
% TODO \end{goal}
|
when the $X_n$ are independent.
|
||||||
\begin{theorem}[Theorem 3, Kolmogorov's three-series theorem] % Theorem 3
|
\end{goal}
|
||||||
|
\begin{theorem}[Kolmogorov's three-series theorem] % Theorem 3
|
||||||
\label{thm3}
|
\label{thm3}
|
||||||
Let $X_n$ be a family of independent random variables.
|
Let $X_n$ be a family of independent random variables.
|
||||||
\begin{enumerate}[(a)]
|
\begin{enumerate}[(a)]
|
||||||
|
@ -20,7 +21,7 @@
|
||||||
\end{enumerate}
|
\end{enumerate}
|
||||||
\end{theorem}
|
\end{theorem}
|
||||||
For the proof we'll need a slight generalization of \autoref{thm2}:
|
For the proof we'll need a slight generalization of \autoref{thm2}:
|
||||||
\begin{theorem}[Theorem 4] % Theorem 4
|
\begin{theorem} %[Theorem 4]
|
||||||
\label{thm4}
|
\label{thm4}
|
||||||
Let $\{X_n\}_n$ be independent and \vocab{uniformly bounded}
|
Let $\{X_n\}_n$ be independent and \vocab{uniformly bounded}
|
||||||
(i.e. $\exists M < \infty : \sup_n \sup_\omega |X_n(\omega)| \le M$).
|
(i.e. $\exists M < \infty : \sup_n \sup_\omega |X_n(\omega)| \le M$).
|
||||||
|
@ -166,14 +167,13 @@ More formally:
|
||||||
However
|
However
|
||||||
\[
|
\[
|
||||||
\sum_{n} X_n \frac{1}{n^{\frac{1}{2} + \epsilon}}
|
\sum_{n} X_n \frac{1}{n^{\frac{1}{2} + \epsilon}}
|
||||||
\]
|
\]
|
||||||
where $\bP[X_n = 1] = \bP[X_n = -1] = \frac{1}{2}$
|
where $\bP[X_n = 1] = \bP[X_n = -1] = \frac{1}{2}$
|
||||||
converges almost surely for all $\epsilon > 0$.
|
converges almost surely for all $\epsilon > 0$.
|
||||||
And
|
And
|
||||||
\[
|
\[
|
||||||
\sum_{n} X_n \frac{1}{n^{\frac{1}{2} - \epsilon}}
|
\sum_{n} X_n \frac{1}{n^{\frac{1}{2} - \epsilon}}
|
||||||
\]
|
\]
|
||||||
does not converge.
|
does not converge.
|
||||||
|
|
||||||
|
|
||||||
\end{example}
|
\end{example}
|
||||||
|
|
|
@ -24,7 +24,7 @@ of sequences of random variables.
|
||||||
is again a $\sigma$-algebra, $\cT$ is indeed a $\sigma$-algebra.
|
is again a $\sigma$-algebra, $\cT$ is indeed a $\sigma$-algebra.
|
||||||
\item We have
|
\item We have
|
||||||
\[
|
\[
|
||||||
\cT = \{A \in \cF ~|~ \forall i ~ \exists B \in \cB(\R)^{\otimes \N} : A = \{\omega | (X_i(\omega), X_{i+1}(\omega), \ldots) \in B\} \}. % TODO?
|
\cT = \{A \in \cF ~|~ \forall i ~ \exists B \in \cB(\R)^{\otimes \N} : A = \{\omega | (X_i(\omega), X_{i+1}(\omega), \ldots) \in B\} \}.
|
||||||
\]
|
\]
|
||||||
\end{enumerate}
|
\end{enumerate}
|
||||||
\end{remark}
|
\end{remark}
|
||||||
|
@ -146,5 +146,3 @@ for any $k \in \N$.
|
||||||
\]
|
\]
|
||||||
hence $\bP[T] \in \{0,1\}$.
|
hence $\bP[T] \in \{0,1\}$.
|
||||||
\end{refproof}
|
\end{refproof}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
|
\lecture{9}{}{Percolation, Introduction to characteristic functions}
|
||||||
\subsubsection{Application: Percolation}
|
\subsubsection{Application: Percolation}
|
||||||
|
|
||||||
|
|
||||||
We will now discuss another application of Kolmogorov's $0-1$-law, percolation.
|
We will now discuss another application of Kolmogorov's $0-1$-law, percolation.
|
||||||
|
|
||||||
\begin{definition}[\vocab{Percolation}]
|
\begin{definition}[\vocab{Percolation}]
|
||||||
|
@ -41,7 +41,7 @@ For $d > 2$ this is unknown.
|
||||||
We'll get back to percolation later.
|
We'll get back to percolation later.
|
||||||
|
|
||||||
|
|
||||||
\section{Characteristic functions, weak convergence and the central limit theorem}
|
\section{Characteristic Functions, Weak Convergence and the Central Limit Theorem}
|
||||||
|
|
||||||
% Characteristic functions are also known as the \vocab{Fourier transform}.
|
% Characteristic functions are also known as the \vocab{Fourier transform}.
|
||||||
%Weak convergence is also known as \vocab{convergence in distribution} / \vocab{convergence in law}.
|
%Weak convergence is also known as \vocab{convergence in distribution} / \vocab{convergence in law}.
|
||||||
|
@ -77,7 +77,7 @@ This will be the weakest notion of convergence, hence it is called
|
||||||
\vocab{weak convergence}.
|
\vocab{weak convergence}.
|
||||||
This notion of convergence will be defined in terms of characteristic functions of Fourier transforms.
|
This notion of convergence will be defined in terms of characteristic functions of Fourier transforms.
|
||||||
|
|
||||||
\subsection{Characteristic functions and Fourier transform}
|
\subsection{Characteristic Functions and Fourier Transform}
|
||||||
|
|
||||||
\begin{definition}
|
\begin{definition}
|
||||||
Consider $(\R, \cB(\R), \bP)$.
|
Consider $(\R, \cB(\R), \bP)$.
|
||||||
|
@ -152,4 +152,3 @@ We will prove this later.
|
||||||
$F(b) - F(a_n) = G(b) - G(a_n)$ hence $F(b) = G(b)$.
|
$F(b) - F(a_n) = G(b) - G(a_n)$ hence $F(b) = G(b)$.
|
||||||
Since $F$ and $G$ are right-continuous, it follows that $F = G$.
|
Since $F$ and $G$ are right-continuous, it follows that $F = G$.
|
||||||
\end{refproof}
|
\end{refproof}
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
% This section provides a short recap of things that should be known
|
This section provides a short recap of things that should be known
|
||||||
% from the lecture on stochastics.
|
from the lecture on stochastic.
|
||||||
|
|
||||||
\subsection{Notions of convergence}
|
\subsection{Notions of Convergence}
|
||||||
\begin{definition}
|
\begin{definition}
|
||||||
Fix a probability space $(\Omega,\cF,\bP)$.
|
Fix a probability space $(\Omega,\cF,\bP)$.
|
||||||
Let $X, X_1, X_2,\ldots$ be random variables.
|
Let $X, X_1, X_2,\ldots$ be random variables.
|
||||||
|
@ -147,7 +147,29 @@ The first thing that should come to mind is:
|
||||||
|
|
||||||
We used Chebyshev's inequality. Linearity of $\bE$, $\Var(cX) = c^2\Var(X)$ and $\Var(X_1 +\ldots + X_n) = \Var(X_1) + \ldots + \Var(X_n)$ for independent $X_i$.
|
We used Chebyshev's inequality. Linearity of $\bE$, $\Var(cX) = c^2\Var(X)$ and $\Var(X_1 +\ldots + X_n) = \Var(X_1) + \ldots + \Var(X_n)$ for independent $X_i$.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Modes of covergence: $L^p$, in probability, a.s.
|
|
||||||
\fi
|
\fi
|
||||||
|
|
||||||
|
\subsection{Some Facts from Measure Theory}
|
||||||
|
\begin{fact}+[Finite measures are {\vocab[Measure]{regular}}, Exercise 3.1]
|
||||||
|
Let $\mu$ be a finite measure on $(\R, \cB(\R))$.
|
||||||
|
Then for all $\epsilon > 0$,
|
||||||
|
there exists a compact set $K \in \cB(\R)$ such that
|
||||||
|
$\mu(K) > \mu(\R) - \epsilon$.
|
||||||
|
\end{fact}
|
||||||
|
\begin{proof}
|
||||||
|
We have $[-k,k] \uparrow \R$, hence $\mu([-k,k]) \uparrow \mu(\R)$.
|
||||||
|
\end{proof}
|
||||||
|
|
||||||
|
\begin{theorem}[Riemann-Lebesgue]
|
||||||
|
\label{riemann-lebesgue}
|
||||||
|
Let $f: \R \to \R$ be integrable.
|
||||||
|
Then
|
||||||
|
\[
|
||||||
|
\lim_{n \to \infty} \int_{\R} f(x) \cos(n x) \lambda(\dif x) = 0.
|
||||||
|
\]
|
||||||
|
\end{theorem}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
\documentclass[10pt,a4paper, fancyfoot, git, english]{mkessler-script}
|
\documentclass[fancyfoot, git, english]{mkessler-script}
|
||||||
|
|
||||||
\course{Probability Theory}
|
\course{Probability Theory}
|
||||||
\lecturer{Prof.~Chiranjib Mukherjee}
|
\lecturer{Prof.~Chiranjib Mukherjee}
|
||||||
|
@ -50,8 +50,10 @@
|
||||||
|
|
||||||
\cleardoublepage
|
\cleardoublepage
|
||||||
|
|
||||||
%\backmatter
|
\begin{landscape}
|
||||||
%\chapter{Appendix}
|
\section{Appendix}
|
||||||
|
\input{inputs/a_0_distributions.tex}
|
||||||
|
\end{landscape}
|
||||||
|
|
||||||
\cleardoublepage
|
\cleardoublepage
|
||||||
\printvocabindex
|
\printvocabindex
|
||||||
|
|
13
wtheo.sty
13
wtheo.sty
|
@ -11,6 +11,7 @@
|
||||||
\usepackage[normalem]{ulem}
|
\usepackage[normalem]{ulem}
|
||||||
\usepackage{pdflscape}
|
\usepackage{pdflscape}
|
||||||
\usepackage{longtable}
|
\usepackage{longtable}
|
||||||
|
\usepackage{colortbl}
|
||||||
\usepackage{xcolor}
|
\usepackage{xcolor}
|
||||||
\usepackage{dsfont}
|
\usepackage{dsfont}
|
||||||
\usepackage{csquotes}
|
\usepackage{csquotes}
|
||||||
|
@ -98,9 +99,15 @@
|
||||||
\NewFancyTheorem[thmtools = { style = thmredmargin} , group = { big } ]{warning}
|
\NewFancyTheorem[thmtools = { style = thmredmargin} , group = { big } ]{warning}
|
||||||
\DeclareSimpleMathOperator{Var}
|
\DeclareSimpleMathOperator{Var}
|
||||||
|
|
||||||
\DeclareSimpleMathOperator{Bin}
|
\DeclareSimpleMathOperator{Bin} % binomial distribution
|
||||||
\DeclareSimpleMathOperator{Ber}
|
\DeclareSimpleMathOperator{Geo} % geometric distribution
|
||||||
\DeclareSimpleMathOperator{Exp}
|
\DeclareSimpleMathOperator{Poi} % Poisson distribution
|
||||||
|
|
||||||
|
\DeclareSimpleMathOperator{Unif} % uniform distribution
|
||||||
|
\DeclareSimpleMathOperator{Exp} % exponential distribution
|
||||||
|
\DeclareSimpleMathOperator{Cauchy} % Cauchy distribution
|
||||||
|
% \DeclareSimpleMathOperator{Normal} % normal distribution
|
||||||
|
|
||||||
|
|
||||||
\newcommand*\dif{\mathop{}\!\mathrm{d}}
|
\newcommand*\dif{\mathop{}\!\mathrm{d}}
|
||||||
\newcommand\lecture[3]{\hrule{\color{darkgray}\hfill{\tiny[Lecture #1, #2]}}}
|
\newcommand\lecture[3]{\hrule{\color{darkgray}\hfill{\tiny[Lecture #1, #2]}}}
|
||||||
|
|
Loading…
Reference in a new issue