removed whitespace

This commit is contained in:
Josia Pietsch 2023-07-06 00:36:26 +02:00
parent 023e865eb4
commit 2139f9d465
Signed by untrusted user who does not match committer: josia
GPG key ID: E70B571D66986A2D
14 changed files with 83 additions and 84 deletions

View file

@ -3,4 +3,3 @@
Exercise 4.3 Exercise 4.3
10.2 10.2

View file

@ -21,7 +21,7 @@ where $\mu = \bP X^{-1}$.
Note that the term on the RHS is integrable, as Note that the term on the RHS is integrable, as
\[ \[
\lim_{t \to 0} \frac{e^{-\i t b} - e^{-\i t a}}{- \i t} \pi(t) = a - b \lim_{t \to 0} \frac{e^{-\i t b} - e^{-\i t a}}{- \i t} \pi(t) = a - b
\] \]
and note that $\phi(0) = 1$ and $|\phi(t)| \le 1$. and note that $\phi(0) = 1$ and $|\phi(t)| \le 1$.
% TODO think about this % TODO think about this
@ -45,7 +45,7 @@ where $\mu = \bP X^{-1}$.
\label{fact:intsinxx} \label{fact:intsinxx}
\[ \[
\int_0^\infty \frac{\sin x}{x} dx = \frac{\pi}{2} \int_0^\infty \frac{\sin x}{x} dx = \frac{\pi}{2}
\] \]
where the LHS is an improper Riemann-integral. where the LHS is an improper Riemann-integral.
Note that the LHS is not Lebesgue-integrable. Note that the LHS is not Lebesgue-integrable.
It follows that It follows that
@ -65,7 +65,7 @@ where $\mu = \bP X^{-1}$.
Then $\bP$ has a continuous probability density given by Then $\bP$ has a continuous probability density given by
\[ \[
f(x) = \frac{1}{2 \pi} \int_{\R} e^{-\i t x} \phi_{\R}(t) dt. f(x) = \frac{1}{2 \pi} \int_{\R} e^{-\i t x} \phi_{\R}(t) dt.
\] \]
\end{theorem} \end{theorem}
\begin{example} \begin{example}
@ -74,12 +74,12 @@ where $\mu = \bP X^{-1}$.
Then Then
\[ \[
\phi_{\R}(t) = \int e^{\i t x} d \delta_0(x) = e^{\i t 0 } = 1 \phi_{\R}(t) = \int e^{\i t x} d \delta_0(x) = e^{\i t 0 } = 1
\] \]
\item Let $\bP = \frac{1}{2} \delta_1 + \frac{1}{2} \delta_{-1}$. \item Let $\bP = \frac{1}{2} \delta_1 + \frac{1}{2} \delta_{-1}$.
Then Then
\[ \[
\phi_{\R}(t) = \frac{1}{2} e^{\i t} + \frac{1}{2} e^{- \i t} = \cos(t) \phi_{\R}(t) = \frac{1}{2} e^{\i t} + \frac{1}{2} e^{- \i t} = \cos(t)
\] \]
\end{itemize} \end{itemize}
\end{example} \end{example}
\begin{refproof}{thm:lec10_3} \begin{refproof}{thm:lec10_3}
@ -93,16 +93,16 @@ where $\mu = \bP X^{-1}$.
Then Then
\[ \[
|e^{-\i t x} \phi(t)| \le |\phi(t)| |e^{-\i t x} \phi(t)| \le |\phi(t)|
\] \]
and $\phi \in L^1$, hence $f(x_n) \to f(x)$ and $\phi \in L^1$, hence $f(x_n) \to f(x)$
by the dominated convergence theorem. by the dominated convergence theorem.
\end{subproof} \end{subproof}
We'll show that for all $a < b$ we have We'll show that for all $a < b$ we have
\[ \[
\bP\left( (a,b] \right) = \int_a^b (x) dx.\label{thm10_3eq1} \bP\left( (a,b] \right) = \int_a^b (x) dx.\label{thm10_3eq1}
\] \]
Let $F$ be the distribution function of $\bP$. Let $F$ be the distribution function of $\bP$.
It is enough to prove \autoref{thm10_3eq1} It is enough to prove \autoref{thm10_3eq1}
for all continuity points $a $ and $ b$ of $F$. for all continuity points $a $ and $ b$ of $F$.
@ -123,8 +123,8 @@ However, Fourier analysis is not only useful for continuous probability density
Then Then
\[ \[
\forall x \in \R ~ \bP\left( \{x\} \right) = \lim_{T \to \infty} \frac{1}{2 T} \int_{-T}^T e^{-\i t x } \phi(t) dt. \forall x \in \R ~ \bP\left( \{x\} \right) = \lim_{T \to \infty} \frac{1}{2 T} \int_{-T}^T e^{-\i t x } \phi(t) dt.
\] \]
\end{theorem} \end{theorem}
\begin{refproof}{bochnersformula} \begin{refproof}{bochnersformula}
We have We have
@ -140,10 +140,10 @@ However, Fourier analysis is not only useful for continuous probability density
1, &y = x,\\ 1, &y = x,\\
0, &y \neq x. 0, &y \neq x.
\end{cases} \end{cases}
\] \]
Hence Hence
\begin{IEEEeqnarray*}{rCl} \begin{IEEEeqnarray*}{rCl}
\lim_{T \to \infty} \frac{1}{2 T }\int_{\R} \frac{2 \sin(T (y-x)}{T (y-x)} d \bP(y) &=& \bP\left( \{x\}\right) \lim_{T \to \infty} \frac{1}{2 T }\int_{\R} \frac{2 \sin(T (y-x)}{T (y-x)} d \bP(y) &=& \bP\left( \{x\}\right)
\end{IEEEeqnarray*} \end{IEEEeqnarray*}
% TODO by dominated convergence? % TODO by dominated convergence?
\end{refproof} \end{refproof}
@ -177,7 +177,7 @@ However, Fourier analysis is not only useful for continuous probability density
\begin{theorem}[Bochner's theorem]\label{bochnersthm} \begin{theorem}[Bochner's theorem]\label{bochnersthm}
The converse to \autoref{thm:lec_10thm5} holds, i.e.~ any The converse to \autoref{thm:lec_10thm5} holds, i.e.~ any
$\phi: \R \to \C$ satisfying (a) and (b) of \autoref{thm:lec_10thm5} $\phi: \R \to \C$ satisfying (a) and (b) of \autoref{thm:lec_10thm5}
must be the Fourier transform of a probability measure $\bP$ must be the Fourier transform of a probability measure $\bP$
on $(\R, \cB(\R))$. on $(\R, \cB(\R))$.
\end{theorem} \end{theorem}
Unfortunately, we won't prove \autoref{bochnersthm} in this lecture. Unfortunately, we won't prove \autoref{bochnersthm} in this lecture.

View file

@ -21,7 +21,7 @@ For intuition, watch \url{https://3blue1brown.com/lessons/clt}.
\begin{example} \begin{example}
We throw a fair die $n = 100$ times and denote the sum of the faces We throw a fair die $n = 100$ times and denote the sum of the faces
by $S_n \coloneqq X_1 + \ldots + X_n$, where $X_1,\ldots, X_n$ by $S_n \coloneqq X_1 + \ldots + X_n$, where $X_1,\ldots, X_n$
are i.i.d.~and uniformly distributed on $\{1,\ldots,6\}$. are i.i.d.~and uniformly distributed on $\{1,\ldots,6\}$.
Then $\bE[S_n] = 350$ and $\sqrt{\Var(S_n)} = \sigma \approx 17.07$. Then $\bE[S_n] = 350$ and $\sqrt{\Var(S_n)} = \sigma \approx 17.07$.
\todo{Missing pictures} \todo{Missing pictures}
@ -32,7 +32,7 @@ For intuition, watch \url{https://3blue1brown.com/lessons/clt}.
\end{question} \end{question}
By definition, $\Var(X) = \bE[(X- \bE(X))^2]$, hence $\sqrt{\Var(X)}$ By definition, $\Var(X) = \bE[(X- \bE(X))^2]$, hence $\sqrt{\Var(X)}$
can be interpreted as a distance. can be interpreted as a distance.
One could also define $\Var(X)$ to be $\bE[|X - \bE(X)|]$ but this is not One could also define $\Var(X)$ to be $\bE[|X - \bE(X)|]$ but this is not
well behaved. well behaved.
@ -95,7 +95,7 @@ If $S_n \sim \Bin(n,p)$ and $[a,b] \subseteq \R$, we have
With this in mind, a better approximation is With this in mind, a better approximation is
\[ \[
\bP[S \le 25] = \bP[S \le 25.5] \approx \Phi\left( \frac{5.5}{\sqrt{10} } \right) \approx 0.9541. \bP[S \le 25] = \bP[S \le 25.5] \approx \Phi\left( \frac{5.5}{\sqrt{10} } \right) \approx 0.9541.
\] \]
\end{example} \end{example}
\begin{example} \begin{example}

View file

@ -23,7 +23,7 @@ if $X_1, X_2,\ldots$ are i.i.d.~with $ \mu = \bE[X_1]$,
Then the CLT holds, i.e.~ Then the CLT holds, i.e.~
\[ \[
\frac{\sum_{i=1}^n (X_i - \mu_i)}{S_n} \xrightarrow{(d)} \cN(0,1). \frac{\sum_{i=1}^n (X_i - \mu_i)}{S_n} \xrightarrow{(d)} \cN(0,1).
\] \]
\end{theorem} \end{theorem}
\begin{theorem}[Lyapunov condition] \begin{theorem}[Lyapunov condition]
@ -34,7 +34,7 @@ if $X_1, X_2,\ldots$ are i.i.d.~with $ \mu = \bE[X_1]$,
Then, assume that, for some $\delta > 0$, Then, assume that, for some $\delta > 0$,
\[ \[
\lim_{n \to \infty} \sum_{i=1}^{n} \bE[(X_i - \mu_i)^{2 + \delta}] = 0 \lim_{n \to \infty} \sum_{i=1}^{n} \bE[(X_i - \mu_i)^{2 + \delta}] = 0
\] \]
(\vocab{Lyapunov condition}). (\vocab{Lyapunov condition}).
Then the CLT holds. Then the CLT holds.
\end{theorem} \end{theorem}
@ -54,7 +54,7 @@ details can be found in the notes.\notes
if if
\[ \[
\lim_{a \to \infty} \sup_{n \in \N} \bP[|X_n| > a] = 0. \lim_{a \to \infty} \sup_{n \in \N} \bP[|X_n| > a] = 0.
\] \]
\end{definition} \end{definition}
\begin{example}+[Exercise 8.1] \begin{example}+[Exercise 8.1]
\todo{Copy} \todo{Copy}
@ -194,7 +194,7 @@ for all $f \in C_b$ and $x \to e^{\i t x}$ is continuous and bounded.
It suffices to show that It suffices to show that
\[ \[
\frac{A}{2} \left| \int_{-\frac{2}{A}}^{\frac{2}{A}} \phi_n(t) dt\right| - 1 \ge 1 - 2\epsilon \frac{A}{2} \left| \int_{-\frac{2}{A}}^{\frac{2}{A}} \phi_n(t) dt\right| - 1 \ge 1 - 2\epsilon
\] \]
or or
\[ \[
1 - \frac{A}{4} \left|\int_{-\frac{2}{A}}^{\frac{2}{A}} \phi_n(t) dt \right| \le \epsilon, 1 - \frac{A}{4} \left|\int_{-\frac{2}{A}}^{\frac{2}{A}} \phi_n(t) dt \right| \le \epsilon,

View file

@ -74,7 +74,7 @@ We now want to generalize this to arbitrary random variables.
then then
\[ \[
\bE[X | \cG] = (\omega \mapsto \bE[X]) \bE[X | \cG] = (\omega \mapsto \bE[X])
\] \]
is a constant random variable. is a constant random variable.
\end{remark} \end{remark}
@ -128,13 +128,13 @@ and then do the harder proof.
which is a contradiction, since which is a contradiction, since
\[ \[
\bE[(Z - Z') \One_{Z - Z' > \frac{1}{n}}] \ge \frac{1}{n} \bP[ Z - Z' > \frac{1}{n}] > 0. \bE[(Z - Z') \One_{Z - Z' > \frac{1}{n}}] \ge \frac{1}{n} \bP[ Z - Z' > \frac{1}{n}] > 0.
\] \]
\bigskip \bigskip
Existence of $\bE(X | \cG)$ for $X \in L^2$: Existence of $\bE(X | \cG)$ for $X \in L^2$:
Let $H = L^2(\Omega, \cF, \bP)$ Let $H = L^2(\Omega, \cF, \bP)$
and $K = L^2(\Omega, \cG, \bP)$. and $K = L^2(\Omega, \cG, \bP)$.
$K$ is closed, since a pointwise limit of $\cG$-measurable $K$ is closed, since a pointwise limit of $\cG$-measurable

View file

@ -25,7 +25,7 @@ We want to derive some properties of conditional expectation.
Then Then
\[ \[
\int_A X \dif \bP \ge \frac{1}{n}\bP(A) + \int_A Y \dif \bP, \int_A X \dif \bP \ge \frac{1}{n}\bP(A) + \int_A Y \dif \bP,
\] \]
contradicting property (b) from \autoref{conditionalexpectation}. contradicting property (b) from \autoref{conditionalexpectation}.
\end{proof} \end{proof}
@ -61,7 +61,7 @@ We want to derive some properties of conditional expectation.
However it follows that However it follows that
\[ \[
\int_G \bP[X | \cG] \dif \bP \le -\frac{1}{n} \bP[G] < 0 \le \int_G X \dif \bP. \int_G \bP[X | \cG] \dif \bP \le -\frac{1}{n} \bP[G] < 0 \le \int_G X \dif \bP.
\] \]
\end{proof} \end{proof}
\begin{theorem}[Conditional monotone convergence theorem] \begin{theorem}[Conditional monotone convergence theorem]
\label{ceprop5} \label{ceprop5}
@ -77,11 +77,11 @@ We want to derive some properties of conditional expectation.
we have we have
\[ \[
\bE[X_n | \cG] \overset{\text{a.s.}}{\ge } 0 \bE[X_n | \cG] \overset{\text{a.s.}}{\ge } 0
\] \]
and and
\[ \[
\bE[X_n | \cG] \uparrow \text{a.s.} \bE[X_n | \cG] \uparrow \text{a.s.}
\] \]
(consider $X_{n+1} - X_n$ ). (consider $X_{n+1} - X_n$ ).
Define $Z \coloneqq \limsup_{n \to \infty} Z_n$. Define $Z \coloneqq \limsup_{n \to \infty} Z_n$.
@ -91,7 +91,7 @@ We want to derive some properties of conditional expectation.
Take some $G \in \cG$. Take some $G \in \cG$.
We know by (b) % TODO REF We know by (b) % TODO REF
that $\bE[Z_n \One_G] = \bE[X_n \One_G]$. that $\bE[Z_n \One_G] = \bE[X_n \One_G]$.
The LHS increases to $\bE[Z \One_G]$ by the monotone The LHS increases to $\bE[Z \One_G]$ by the monotone
convergence theorem. convergence theorem.
Again by MCT, $\bE[X_n \One_G]$ increases to Again by MCT, $\bE[X_n \One_G]$ increases to
$\bE[X \One_G]$. $\bE[X \One_G]$.
@ -105,7 +105,7 @@ We want to derive some properties of conditional expectation.
Then Then
\[ \[
\bE[ \liminf_{n \to \infty} X_n | \cG] \le \liminf_{n \to \infty} \bE[X_n | \cG]. \bE[ \liminf_{n \to \infty} X_n | \cG] \le \liminf_{n \to \infty} \bE[X_n | \cG].
\] \]
\end{theorem} \end{theorem}
\begin{proof} \begin{proof}
\notes \notes
@ -117,7 +117,7 @@ We want to derive some properties of conditional expectation.
Suppose $|X_n(\omega)| < X(\omega)$ a.e.~ Suppose $|X_n(\omega)| < X(\omega)$ a.e.~
and $\int |X| \dif \bP < \infty$. and $\int |X| \dif \bP < \infty$.
Then $X_n(\omega) \to X\left( \omega \right) \implies \bE[ X_n | \cG] \to \bE[X | \cG]$. Then $X_n(\omega) \to X\left( \omega \right) \implies \bE[ X_n | \cG] \to \bE[X | \cG]$.
\end{theorem} \end{theorem}
\begin{proof} \begin{proof}
\notes \notes
@ -144,7 +144,7 @@ For conditional expectation, we have
such that such that
\[ \[
c(x) = \sup_n(a_n x + b_n). c(x) = \sup_n(a_n x + b_n).
\] \]
\end{fact} \end{fact}
\begin{refproof}{cjensen} \begin{refproof}{cjensen}
By \autoref{convapprox}, $c(x) \ge a_n X + b_n$ By \autoref{convapprox}, $c(x) \ge a_n X + b_n$
@ -153,14 +153,14 @@ For conditional expectation, we have
\[ \[
\bE[c(X) | \cG] \ge a_n \bE[X | \cG] + \bE[b_n | \cG] \bE[c(X) | \cG] \ge a_n \bE[X | \cG] + \bE[b_n | \cG]
= a_n \bE[X | \cG] + b_n \text{ a.s.} = a_n \bE[X | \cG] + b_n \text{ a.s.}
\] \]
for all $n$. for all $n$.
Using that a countable union of sets o f measure zero has measure zero, Using that a countable union of sets o f measure zero has measure zero,
we conclude that a.s~this happens simultaneously for all $n$. we conclude that a.s~this happens simultaneously for all $n$.
Hence Hence
\[ \[
\bE[c(X) | \cG] \ge \sup_n (a_n \bE[X | \cG] + b_n) \overset{\text{\autoref{convapprox}}}{=} c(\bE(X | \cG)). \bE[c(X) | \cG] \ge \sup_n (a_n \bE[X | \cG] + b_n) \overset{\text{\autoref{convapprox}}}{=} c(\bE(X | \cG)).
\] \]
\end{refproof} \end{refproof}
Recall Recall
@ -170,7 +170,7 @@ Recall
Then Then
\[ \[
\bE(X Y) \le \underbrace{\bE(|X|^p)^{\frac{1}{p}}}_{\text{\reflectbox{$\coloneqq$}} \|X\|_{L^p}} \bE(|Y|^q)^{\frac{1}{q}}. \bE(X Y) \le \underbrace{\bE(|X|^p)^{\frac{1}{p}}}_{\text{\reflectbox{$\coloneqq$}} \|X\|_{L^p}} \bE(|Y|^q)^{\frac{1}{q}}.
\] \]
\end{fact} \end{fact}
\begin{theorem}[Conditional Hölder's inequality] \begin{theorem}[Conditional Hölder's inequality]
@ -181,7 +181,7 @@ Recall
Then Then
\[ \[
\bE(X Y | \cG) \le \bE(|X|^p | \cG)^{\frac{1}{p}} \bE(|Y|^q | \cG)^{\frac{1}{q}}. \bE(X Y | \cG) \le \bE(|X|^p | \cG)^{\frac{1}{p}} \bE(|Y|^q | \cG)^{\frac{1}{q}}.
\] \]
\end{theorem} \end{theorem}
\begin{proof} \begin{proof}
\todo{Exercise} \todo{Exercise}
@ -194,7 +194,7 @@ Recall
Then Then
\[ \[
\bE\left[\bE[X | \cG] \mid \cH\right] \overset{\text{a.s.}}{=} \bE[X | \cH]. \bE\left[\bE[X | \cG] \mid \cH\right] \overset{\text{a.s.}}{=} \bE[X | \cH].
\] \]
\end{theorem} \end{theorem}
\begin{proof} \begin{proof}
By definition, $\bE[\bE[X | \cG] | \cH]$ is $\cH$-measurable. By definition, $\bE[\bE[X | \cG] | \cH]$ is $\cH$-measurable.
@ -214,7 +214,7 @@ Recall
If $Y$ is $\cG$-measurable and bounded, then If $Y$ is $\cG$-measurable and bounded, then
\[ \[
\bE[YX| \cG] \overset{\text{a.s.}}{=} Y \bE[X | \cG]. \bE[YX| \cG] \overset{\text{a.s.}}{=} Y \bE[X | \cG].
\] \]
\end{theorem} \end{theorem}
\begin{proof} \begin{proof}
Assume w.l.o.g.~$X \ge 0$. Assume w.l.o.g.~$X \ge 0$.
@ -239,13 +239,13 @@ Assume $Y = \One_B$, then $Y$ simple, then take the limit (using that $Y$ is bou
then then
\[ \[
\bE[X | \sigma(\cG, \cH)] \overset{\text{a.s.}}{=} \bE[X | \cG]. \bE[X | \sigma(\cG, \cH)] \overset{\text{a.s.}}{=} \bE[X | \cG].
\] \]
In particular, if $X$ is independent of $\cG$, In particular, if $X$ is independent of $\cG$,
then then
\[ \[
\bE[X | \cG] \overset{\text{a.s.}}{=} \bE[X]. \bE[X | \cG] \overset{\text{a.s.}}{=} \bE[X].
\] \]
\end{theorem} \end{theorem}
\begin{example}[Martingale property of the simple random walk] \begin{example}[Martingale property of the simple random walk]

View file

@ -5,7 +5,7 @@
\begin{refproof}{ceroleofindependence} \begin{refproof}{ceroleofindependence}
Let $\cH$ be independent of $\sigma(\sigma(X), \cG)$. Let $\cH$ be independent of $\sigma(\sigma(X), \cG)$.
Then for all $H \in \cH$, we have that $\One_H$ Then for all $H \in \cH$, we have that $\One_H$
and any random variable measurable with respect to either $\sigma(X)$ and any random variable measurable with respect to either $\sigma(X)$
or $\cG$ must be independent. or $\cG$ must be independent.
It suffices to consider the case of $X \ge 0$. It suffices to consider the case of $X \ge 0$.
@ -55,7 +55,7 @@ The Radon Nikodym theorem is the converse of that:
Suppose Suppose
\[ \[
\forall A \in \cF . ~ \mu(A) = 0 \implies \nu(A) = 0. \forall A \in \cF . ~ \mu(A) = 0 \implies \nu(A) = 0.
\] \]
Then Then
\begin{enumerate}[(1)] \begin{enumerate}[(1)]
\item there exists $Z: \Omega \to [0, \infty)$ measurable, \item there exists $Z: \Omega \to [0, \infty)$ measurable,
@ -70,7 +70,7 @@ The Radon Nikodym theorem is the converse of that:
\begin{definition} \begin{definition}
Whenever the property $\forall A \in \cF, \mu(A) = 0 \implies \nu(A) = 0$ Whenever the property $\forall A \in \cF, \mu(A) = 0 \implies \nu(A) = 0$
holds for two measures $\mu$ and $\nu$, holds for two measures $\mu$ and $\nu$,
we say that $\nu$ is \vocab{absolutely continuous} we say that $\nu$ is \vocab{absolutely continuous}
w.r.t.~$\mu$. w.r.t.~$\mu$.
This is written as $\nu \ll \mu$. This is written as $\nu \ll \mu$.
\end{definition} \end{definition}
@ -81,7 +81,7 @@ of conditional expectation:
Let $(\Omega, \cF, \bP)$ as always, $X \in L^1(\bP)$ and $\cG \subseteq \cF$. Let $(\Omega, \cF, \bP)$ as always, $X \in L^1(\bP)$ and $\cG \subseteq \cF$.
It suffices to consider the case of $X \ge 0$. It suffices to consider the case of $X \ge 0$.
For all $G \in \cG$, define $\nu(G) \coloneqq \int_G X \dif \bP$. For all $G \in \cG$, define $\nu(G) \coloneqq \int_G X \dif \bP$.
Obviously, $\nu \ll \bP$ on $\cG$. Obviously, $\nu \ll \bP$ on $\cG$.
Then apply \autoref{radonnikodym}. Then apply \autoref{radonnikodym}.
\end{proof} \end{proof}
@ -89,7 +89,7 @@ of conditional expectation:
\begin{refproof}{radonnikodym} \begin{refproof}{radonnikodym}
We will only sketch the proof. We will only sketch the proof.
A full proof can be found in the official notes. A full proof can be found in the official notes.
\paragraph{Step 1: Uniqueness} \notes \paragraph{Step 1: Uniqueness} \notes
\paragraph{Step 2: Reduction to the finite measure case} \paragraph{Step 2: Reduction to the finite measure case}
\notes \notes
@ -114,7 +114,7 @@ of conditional expectation:
\item For all $f \in \cC$, we have \item For all $f \in \cC$, we have
\[ \[
\int_\Omega f \dif \mu \le \nu(\Omega) < \infty. \int_\Omega f \dif \mu \le \nu(\Omega) < \infty.
\] \]
\end{enumerate} \end{enumerate}
Define $\alpha \coloneqq \sup \{ \int f \dif \mu : f \in \cC\} \le \nu(\Omega) < \infty$. Define $\alpha \coloneqq \sup \{ \int f \dif \mu : f \in \cC\} \le \nu(\Omega) < \infty$.
@ -142,7 +142,7 @@ of conditional expectation:
Then $\lambda(A) = 0$ since $\mu$ is finite. Then $\lambda(A) = 0$ since $\mu$ is finite.
Assume the claim does not hold. Assume the claim does not hold.
Then there must be some $k \in \N$, $A \in \cF$ Then there must be some $k \in \N$, $A \in \cF$
such that $\lambda(A) - \frac{1}{k} \mu(A) > 0$. such that $\lambda(A) - \frac{1}{k} \mu(A) > 0$.
Fix this $A$ and $k$. Fix this $A$ and $k$.
Then $A$ satisfies condition (i) of being good, Then $A$ satisfies condition (i) of being good,
@ -187,8 +187,8 @@ Typically $\cF_n = \sigma(X_1, \ldots, X_n)$ for a sequence of random variables.
if it is adapted to $\cF_n$ but if it is adapted to $\cF_n$ but
\[ \[
\bE[X_{n+1} | \cF_n] \overset{\text{a.s.}}{\ge} X_n. \bE[X_{n+1} | \cF_n] \overset{\text{a.s.}}{\ge} X_n.
\] \]
It is called a \vocab{super-martingale} It is called a \vocab{super-martingale}
if it is adapted but $\bE[X_{n+1} | \cF_n] \overset{\text{a.s.}}{\le} X_n$. if it is adapted but $\bE[X_{n+1} | \cF_n] \overset{\text{a.s.}}{\le} X_n$.
\end{definition} \end{definition}
\begin{corollary} \begin{corollary}

View file

@ -11,7 +11,7 @@
\begin{goal} \begin{goal}
What about a ``gambling strategy''? What about a ``gambling strategy''?
Consider a stochastic process $(X_n)_{n \in \N}$. Consider a stochastic process $(X_n)_{n \in \N}$.
Note that the increments $X_{n+1} - X_n$ can be thought of as the win Note that the increments $X_{n+1} - X_n$ can be thought of as the win
@ -27,7 +27,7 @@
while while
\[ \[
Y_n \coloneqq \sum_{j=1}^n C_j(X_j - X_{j-1}) Y_n \coloneqq \sum_{j=1}^n C_j(X_j - X_{j-1})
\] \]
defines the cumulative win process. defines the cumulative win process.
\end{goal} \end{goal}
\begin{lemma} \begin{lemma}
@ -70,11 +70,11 @@ It follows that the monotonic limit $U_\infty([a,b]) \coloneqq \lim_{N \to \inf
\label{lec17l1} \label{lec17l1}
\[ \[
\{\omega | \liminf_{N \to \infty} Z_N(\omega) < a < b < \{\omega | \liminf_{N \to \infty} Z_N(\omega) < a < b <
\limsup_{N \to \infty} Z_N(\omega)\} \subseteq \limsup_{N \to \infty} Z_N(\omega)\} \subseteq
\{\omega: U^{Z}_\infty([a,b])(\omega) = \infty\} \{\omega: U^{Z}_\infty([a,b])(\omega) = \infty\}
\] \]
for every sequence of measurable functions $(Z_n)_{n \ge 1}$. for every sequence of measurable functions $(Z_n)_{n \ge 1}$.
\end{lemma} \end{lemma}
\begin{lemma} % 2 \begin{lemma} % 2
\label{lec17l2} \label{lec17l2}
@ -111,7 +111,7 @@ It follows that the monotonic limit $U_\infty([a,b]) \coloneqq \lim_{N \to \inf
by the monotone convergence theorem by the monotone convergence theorem
\[ \[
\bE(U_n([a,b])] \uparrow \bE[U_\infty([a,b])]. \bE(U_n([a,b])] \uparrow \bE[U_\infty([a,b])].
\] \]
\end{proof} \end{proof}
Assume now, that our process $(X_n)_{n \ge 1}$ is a supermartingale Assume now, that our process $(X_n)_{n \ge 1}$ is a supermartingale
@ -119,7 +119,7 @@ bounded in $L^1(\bP)$.
Let Let
\[ \[
\Lambda \coloneqq \{\omega | X_n(\omega) \text{ does not converge to anything in $[-\infty,\infty]$}\}. \Lambda \coloneqq \{\omega | X_n(\omega) \text{ does not converge to anything in $[-\infty,\infty]$}\}.
\] \]
We have We have
\begin{IEEEeqnarray*}{rCl} \begin{IEEEeqnarray*}{rCl}
\Lambda &=& \{\omega | \liminf_N X_N(\omega) < \limsup_N X_N(\omega)\}\\ \Lambda &=& \{\omega | \liminf_N X_N(\omega) < \limsup_N X_N(\omega)\}\\
@ -160,7 +160,7 @@ The second part follows from
\end{claim} \end{claim}
\begin{subproof} \begin{subproof}
We need to show $\sup_n \bE(|X_n|) < \infty$. We need to show $\sup_n \bE(|X_n|) < \infty$.
Since the supermartingale is non-negative, we have $\bE[|X_n|] = \bE[X_n]$ Since the supermartingale is non-negative, we have $\bE[|X_n|] = \bE[X_n]$
and since it is a supermartingale $\bE[X_n] \le \bE[X_0]$. and since it is a supermartingale $\bE[X_n] \le \bE[X_0]$.
\end{subproof} \end{subproof}

View file

@ -3,7 +3,7 @@
Recall our key lemma \ref{lec17l3} for supermartingales from last time: Recall our key lemma \ref{lec17l3} for supermartingales from last time:
\[ \[
(b-a) \bE[U_N([a,b])] \le \bE[(X_n - a)^-]. (b-a) \bE[U_N([a,b])] \le \bE[(X_n - a)^-].
\] \]
What happens for submartingales? What happens for submartingales?
If $(X_n)_{n \in \N}$ is a submartingale, then $(-X_n)_{n \in \N}$ is a supermartingale. If $(X_n)_{n \in \N}$ is a submartingale, then $(-X_n)_{n \in \N}$ is a supermartingale.
@ -40,12 +40,12 @@ Hence the same holds for submartingales, i.e.
By the SLLN, we have By the SLLN, we have
\[ \[
\frac{1}{n} \sum_{k=1}^{n} Z_k \xrightarrow{a.s.} \bE[Z_1] = zp - 1. \frac{1}{n} \sum_{k=1}^{n} Z_k \xrightarrow{a.s.} \bE[Z_1] = zp - 1.
\] \]
Hence Hence
\[ \[
\left(\frac{X_n}{x}\right)^{\frac{1}{n}} = u^{\frac{1}{n} \sum_{k=1}^n Z_k} \left(\frac{X_n}{x}\right)^{\frac{1}{n}} = u^{\frac{1}{n} \sum_{k=1}^n Z_k}
\xrightarrow{a.s.} u^{zp -1}. \xrightarrow{a.s.} u^{zp -1}.
\] \]
Since $(X_n)_{n \ge 0}$ is a martingale, we must have $\bE[u^{Z_1}] = 1$. Since $(X_n)_{n \ge 0}$ is a martingale, we must have $\bE[u^{Z_1}] = 1$.
Hence $2p - 1 < 0$, because $u > 1$. Hence $2p - 1 < 0$, because $u > 1$.
@ -67,7 +67,7 @@ consider $L^2$.
\begin{fact}[Martingale increments are orthogonal in $L^2$ ] \begin{fact}[Martingale increments are orthogonal in $L^2$ ]
\label{martingaleincrementsorthogonal} \label{martingaleincrementsorthogonal}
Let $(X_n)_n$ be a martingale Let $(X_n)_n$ be a martingale
and let $Y_n \coloneqq X_n - X_{n-1}$ and let $Y_n \coloneqq X_n - X_{n-1}$
denote the \vocab{martingale increments}. denote the \vocab{martingale increments}.
Then for all $m \neq n$ we have that Then for all $m \neq n$ we have that
\[ \[
@ -86,7 +86,7 @@ consider $L^2$.
Then Then
\[ \[
2 \bE[X^2] + 2 \bE[Y^2] = \bE[(X+Y)^2] + \bE[(X-Y)^2]. 2 \bE[X^2] + 2 \bE[Y^2] = \bE[(X+Y)^2] + \bE[(X-Y)^2].
\] \]
\end{fact} \end{fact}
\begin{theorem}\label{martingaleconvergencel2} \begin{theorem}\label{martingaleconvergencel2}
@ -96,23 +96,23 @@ consider $L^2$.
Then there is a random variable $X_\infty$ such that Then there is a random variable $X_\infty$ such that
\[ \[
X_n \xrightarrow{L^2} X_\infty. X_n \xrightarrow{L^2} X_\infty.
\] \]
\end{theorem} \end{theorem}
\begin{proof} \begin{proof}
Let $Y_n \coloneqq X_n - X_{n-1}$ and write Let $Y_n \coloneqq X_n - X_{n-1}$ and write
\[ \[
X_n = \sum_{j=1}^{n} Y_j. X_n = \sum_{j=1}^{n} Y_j.
\] \]
We have We have
\[ \[
\bE[X_n^2] = \bE[X_0^2] + \sum_{j=1}^{n} \bE[Y_j^2] \bE[X_n^2] = \bE[X_0^2] + \sum_{j=1}^{n} \bE[Y_j^2]
\] \]
by \autoref{martingaleincrementsorthogonal} by \autoref{martingaleincrementsorthogonal}
% (this is known as the \vocab{parallelogram identity}). % TODO how exactly is this used here? % (this is known as the \vocab{parallelogram identity}). % TODO how exactly is this used here?
In particular, In particular,
\[ \[
\sup_n \bE[X_n^2] < \infty \iff \sum_{j=1}^{\infty} \bE[Y_j^2] < \infty. \sup_n \bE[X_n^2] < \infty \iff \sum_{j=1}^{\infty} \bE[Y_j^2] < \infty.
\] \]
Since $(X_n)_n$ is bounded in $L^2$, Since $(X_n)_n$ is bounded in $L^2$,
there exists $X_\infty$ such that $X_n \xrightarrow{\text{a.s.}} X_\infty$ there exists $X_\infty$ such that $X_n \xrightarrow{\text{a.s.}} X_\infty$
@ -128,7 +128,7 @@ consider $L^2$.
limit limit
\[ \[
\sum_{j \ge n + 1} \xrightarrow{n\to \infty} 0 \sum_{j \ge n + 1} \xrightarrow{n\to \infty} 0
\] \]
we get $\bE[(X_\infty - X_n)^2] \xrightarrow{n\to \infty} 0$. we get $\bE[(X_\infty - X_n)^2] \xrightarrow{n\to \infty} 0$.
\end{proof} \end{proof}
@ -192,8 +192,8 @@ In order to prove \autoref{dooblp}, we first need
where where
\[ \[
E_j = \{|X_1| \le \ell, |X_2| \le \ell, \ldots, |X_{j-1}| \le \ell, |X_j| \ge \ell\}. E_j = \{|X_1| \le \ell, |X_2| \le \ell, \ldots, |X_{j-1}| \le \ell, |X_j| \ge \ell\}.
\] \]
Then Then
\begin{equation} \begin{equation}
\bP[E_j] \overset{\text{Markov}}{\le } \frac{1}{\ell} \int_{E_j} |X_j| \dif \bP \bP[E_j] \overset{\text{Markov}}{\le } \frac{1}{\ell} \int_{E_j} |X_j| \dif \bP
\label{lec18eq2star} \label{lec18eq2star}
@ -211,7 +211,7 @@ In order to prove \autoref{dooblp}, we first need
\begin{equation} \begin{equation}
\bE[\One_{E_j} (|X_n| - |X_j|)] \ge 0. \label{lec18eq3star} \bE[\One_{E_j} (|X_n| - |X_j|)] \ge 0. \label{lec18eq3star}
\end{equation} \end{equation}
Now Now
\begin{IEEEeqnarray*}{rCl} \begin{IEEEeqnarray*}{rCl}
\bP(E) &=& \sum_{j=1}^n \bP(E_j)\\ \bP(E) &=& \sum_{j=1}^n \bP(E_j)\\
@ -221,6 +221,6 @@ In order to prove \autoref{dooblp}, we first need
This proves the first part. This proves the first part.
For the second part, we apply the first part and For the second part, we apply the first part and
\autoref{dooplplemma} (choose $Y \coloneqq X_n^\ast$). \autoref{dooplplemma} (choose $Y \coloneqq X_n^\ast$).
\end{refproof} \end{refproof}

View file

@ -179,7 +179,7 @@ However, some subsets can be easily described, e.g.
(1) $\implies$ (2) (1) $\implies$ (2)
$X_n \xrightarrow{L^1} X \implies X_n \xrightarrow{\bP} X$ $X_n \xrightarrow{L^1} X \implies X_n \xrightarrow{\bP} X$
by Markov's inequality. by Markov's inequality.
Fix $\epsilon > 0$. Fix $\epsilon > 0$.
@ -195,7 +195,7 @@ However, some subsets can be easily described, e.g.
Then by \autoref{lec19f4} part (a) it follows that Then by \autoref{lec19f4} part (a) it follows that
\[ \[
\int_{|X_n| > k} |X_n| \dif \bP \le \underbrace{\int |X - X_n| \dif \bP}_{< \epsilon} + \int_{|X_n| > k} |X| \dif \bP \le 2 \epsilon. \int_{|X_n| > k} |X_n| \dif \bP \le \underbrace{\int |X - X_n| \dif \bP}_{< \epsilon} + \int_{|X_n| > k} |X| \dif \bP \le 2 \epsilon.
\] \]
\end{proof} \end{proof}
\subsection{Martingale Convergence Theorems in \texorpdfstring{$L^p, p \ge 1$}{$Lp, p >= 1$}} \subsection{Martingale Convergence Theorems in \texorpdfstring{$L^p, p \ge 1$}{$Lp, p >= 1$}}
@ -211,7 +211,7 @@ Let $(\Omega, \cF, \bP)$ as always and let $(\cF_n)_n$ always be a filtration.
It is clear that $(\bE[X | \cF_n])_n$ is adapted to $(\cF_n)_n$. It is clear that $(\bE[X | \cF_n])_n$ is adapted to $(\cF_n)_n$.
Let $X_n \coloneqq \bE[X | \cF_n]$. Let $X_n \coloneqq \bE[X | \cF_n]$.
Consider Consider
\begin{IEEEeqnarray*}{rCl} \begin{IEEEeqnarray*}{rCl}
\bE[X_n - X_{n-1} | \cF_{n-1}] \bE[X_n - X_{n-1} | \cF_{n-1}]
&=& \bE[\bE[X | \cF_n] - \bE[X | \cF_{n-1}] | \cF_{n-1}]\\ &=& \bE[\bE[X | \cF_n] - \bE[X | \cF_{n-1}] | \cF_{n-1}]\\

View file

@ -108,7 +108,7 @@ is the unique solution to this problem.
\begin{subproof} \begin{subproof}
\todo{TODO} \todo{TODO}
% We have $\sigma(\One_{X_{n+1} \in B}) \subseteq \sigma(X_{n}, \xi_{n+1})$. % We have $\sigma(\One_{X_{n+1} \in B}) \subseteq \sigma(X_{n}, \xi_{n+1})$.
% $\sigma(X_1,\ldots,X_{n-1})$ % $\sigma(X_1,\ldots,X_{n-1})$
% is independent of $\sigma( \sigma(\One_{X_{n+1} \in B}), X_n)$. % is independent of $\sigma( \sigma(\One_{X_{n+1} \in B}), X_n)$.
% Hence the claim follows from \autoref{ceroleofindependence}. % Hence the claim follows from \autoref{ceroleofindependence}.
\end{subproof} \end{subproof}

View file

@ -49,7 +49,7 @@ For the proof of (b) we need the following general result:
\end{theorem} \end{theorem}
\begin{proof} \begin{proof}
\end{proof} \end{proof}
\begin{question} \begin{question}
@ -60,7 +60,7 @@ This does not hold. Consider for example $X_n = \frac{1}{n^2} \delta_n + \frac{1
\begin{refproof}{lln} \begin{refproof}{lln}
\begin{enumerate} \begin{enumerate}
\item[(b)] \item[(b)]
\end{enumerate} \end{enumerate}
\end{refproof} \end{refproof}

View file

@ -21,7 +21,7 @@
Then $a_1 + \ldots + a_n = (S_1 - S_0) + 2(S_2 - S_1) + 3(S_3 - S_2) + Then $a_1 + \ldots + a_n = (S_1 - S_0) + 2(S_2 - S_1) + 3(S_3 - S_2) +
\ldots + n (S_n - S_{n-1})$. \ldots + n (S_n - S_{n-1})$.
Thus $a_1 + \ldots + a_n = n S_n - (S1 $ % TODO Thus $a_1 + \ldots + a_n = n S_n - (S1 $ % TODO
\end{subproof} \end{subproof}
The SLLN follows from the claim. The SLLN follows from the claim.
\end{refproof} \end{refproof}
@ -50,12 +50,12 @@ We need the fol]
(By the independence of $X_1,\ldots, X_n$ and therefore that of $E$ and $D$ and $\bE(X_{i+1}) = \ldots = \bE(X_n) = 0$ we have $\int D E d\bP = 0$.) (By the independence of $X_1,\ldots, X_n$ and therefore that of $E$ and $D$ and $\bE(X_{i+1}) = \ldots = \bE(X_n) = 0$ we have $\int D E d\bP = 0$.)
% TODO % TODO
\end{proof} \end{proof}
\begin{refproof}{thm2} \begin{refproof}{thm2}
% TODO % TODO
\end{refproof} \end{refproof}
@ -67,7 +67,7 @@ We need the fol]
Let $S_n \coloneqq \sum_{i=1}^n X_i$. Let $S_n \coloneqq \sum_{i=1}^n X_i$.
For all $t > 0$ let \[ For all $t > 0$ let \[
N_t \coloneqq \sup \{n : S_n \le t\}. N_t \coloneqq \sup \{n : S_n \le t\}.
\] \]
Then $\frac{N_t}{t} \xrightarrow{a.s.} \frac{1}{m}$ as $t \to \infty$. Then $\frac{N_t}{t} \xrightarrow{a.s.} \frac{1}{m}$ as $t \to \infty$.
\end{theorem} \end{theorem}
@ -92,7 +92,7 @@ We need the fol]
By definition, we have $S_{N_t} \le t \le S_{N_t + t}$. By definition, we have $S_{N_t} \le t \le S_{N_t + t}$.
Then $\frac{S_{N_t}}{N_t} \le \frac{t}{N_t} \le S_{N_t + 1}{N_t} \le \frac{S_{N_t + 1}}{N_t + 1} \cdot \frac{N_t + 1}{N_t}$. Then $\frac{S_{N_t}}{N_t} \le \frac{t}{N_t} \le S_{N_t + 1}{N_t} \le \frac{S_{N_t + 1}}{N_t + 1} \cdot \frac{N_t + 1}{N_t}$.
Hence $\frac{t}{N_t} \to m$. Hence $\frac{t}{N_t} \to m$.
\end{proof} \end{proof}

View file

@ -98,7 +98,7 @@ from the lecture on stochastic.
\end{claim} \end{claim}
\begin{subproof} \begin{subproof}
We can use the same counterexample as in c). We can use the same counterexample as in c).
$\bP[\lim_{n \to \infty} X_n = 0] \ge \bP[X_n = 0] = 1 - \frac{1}{n} \to 0$. $\bP[\lim_{n \to \infty} X_n = 0] \ge \bP[X_n = 0] = 1 - \frac{1}{n} \to 0$.
We have already seen, that $X_n$ does not converge in $L_1$. We have already seen, that $X_n$ does not converge in $L_1$.
\end{subproof} \end{subproof}
@ -166,7 +166,7 @@ We used Chebyshev's inequality. Linearity of $\bE$, $\Var(cX) = c^2\Var(X)$ and
Then Then
\[ \[
\lim_{n \to \infty} \int_{\R} f(x) \cos(n x) \lambda(\dif x) = 0. \lim_{n \to \infty} \int_{\R} f(x) \cos(n x) \lambda(\dif x) = 0.
\] \]
\end{theorem} \end{theorem}