some small changes

This commit is contained in:
Josia Pietsch 2023-07-14 22:07:36 +02:00
parent 594d933beb
commit 6122151d4a
Signed by untrusted user who does not match committer: josia
GPG Key ID: E70B571D66986A2D
6 changed files with 124 additions and 24 deletions

View File

@ -114,8 +114,10 @@ The converse to this fact is also true:
\begin{figure}[H]
\centering
\begin{tikzpicture}
\begin{axis}[samples=1000, width=10cm, height=5cm]
\addplot[] {and(x >= -1, x < 1) * 0.5 + (x >= 1)};
\begin{axis}[samples=1000, width=10cm, height=5cm, xmin=-2, xmax=2]
\addplot[ domain=-2.5:-1]{ 0 };
\addplot[ domain=-1:1] { 1 / 2 };
\addplot[ domain=1:2.5] { 1 };
\end{axis}
\end{tikzpicture}
\end{figure}

View File

@ -33,7 +33,7 @@ where $\mu = \bP X^{-1}$.
&=& \lim_{T \to \infty} \frac{1}{2 \pi} \int_{\R} \underbrace{\int_{-T}^T \left[ \frac{\cos(t (x-b)) - \cos(t(x-a))}{-\i t}\right] \dif t}_{=0 \text{, as the function is odd}} \bP(\dif x) \\
&& + \lim_{T \to \infty} \frac{1}{2\pi} \int_{\R}\int_{-T}^T \frac{\sin(t ( x - b)) - \sin(t(x-a))}{-t} \dif t \bP(\dif x)\\
&=& \lim_{T \to \infty} \frac{1}{\pi} \int_\R \int_{0}^T \frac{\sin(t(x-a)) - \sin(t(x-b))}{t} \dif t \bP(\dif x)\\
&\overset{\substack{\text{\autoref{fact:intsinxx},}\\\text{dominated convergence}}}{=}&
&\overset{\substack{\text{\autoref{fact:sincint},}\\\text{dominated convergence}}}{=}&
\frac{1}{\pi} \int -\frac{\pi}{2} \One_{x < a} + \frac{\pi}{2} \One_{x > a}
- (- \frac{\pi}{2} \One_{x < b} + \frac{\pi}{2} \One_{x > b}) \bP(\dif x)\\
&=& \frac{1}{2} \bP(\{a\} ) + \frac{1}{2} \bP(\{b\}) + \bP((a,b))\\
@ -42,7 +42,7 @@ where $\mu = \bP X^{-1}$.
\end{refproof}
\begin{fact}
\label{fact:intsinxx}
\label{fact:sincint}
\[
\int_0^\infty \frac{\sin x}{x} \dif x = \frac{\pi}{2}
\]
@ -259,6 +259,73 @@ for all $f \in C_b(\R)$.
$X_n \xrightarrow{\text{dist}} X$ iff
$F_n(t) \to F(t)$ for all continuity points $t$ of $F$.
\end{theorem}
% \begin{proof}\footnote{This proof was not done in the lecture,
% but can be found in the official notes from lecture 13}
% ``$\implies$''
% Suppose $\mu_n \implies \mu$.
% Let $F_n$ and $F$ denote the respective density functions.
% Fix a continuity point $x_0 \in \R$ of $F$.
% We'll show
% \[
% \limsup_{n \to \infty} F_n(x_0) \le F(x_0) + \epsilon
% \]
% and
% \[
% \liminf_{ \to \infty} F_n(x_0) \ge F(x_0) - \epsilon
% \]
% for all $\epsilon > 0$.
% Fix some $\epsilon > 0$.
% Choose $\delta > 0$ such that $F(x_0 + \delta) < F(x_0) + \epsilon$
% and define
% \[
% g(x) \coloneqq \begin{cases}
% 1 &\text{if } x \le x_0,\\
% 1 - \frac{1}{\delta}(x - x_0)&
% \text{if } x \in (x_0, x_0 + \delta],\\
% 0 &\text{if } x \ge x_0 + \delta.
% \end{cases}
% \]
% Since $g$ is continuous and bounded, we have
% \[
% \int g \dif \mu_n \to \int g \dif \mu.
% \]
% It is clear that $\One_{(-\infty, x_0]} \le g$.
% Hence
% \[
% F_n(x_0) = \int \One_{(-\infty, x_0]} \dif \mu_n \le \int g \dif \mu_n.
% \]
% It follows that
% \begin{IEEEeqnarray*}{rCl}
% \limsup_{n} F_n(x_0)
% &\le& \limsup_n \int g \dif \mu_n\\
% &=& \lim_n \int g \dif \mu_n\\
% &=& \int g \dif \mu\\
% &\overset{g \le \One_{(-\infty, x + \delta]}}{=}& F(x + \delta)\\
% &=& F(x) + \epsilon.
% \end{IEEEeqnarray*}
% The assertion about $\liminf_{n \to \infty} F_n(x_0)$
% follows by a similar argument.
%
% ``$\impliedby$''
% Assume that $F_n(x) \to F(x)$ at all continuity points of $F$.
% We need to show
% \[
% \fgrall g \in C_b(\R) .~\int g \dif \mu_n \to \int g \dif \mu.
% \]
% Let $C$ denote the set of continuity points of $f$.
% We apply measure theoretic induction:
% \begin{itemize}
% \item For $g = \One_{(a,b]}$, $a< b \in C$,
% we have
% \[\int g \dif \mu_n = F_n(b) - F_n(a) \to F(b) - F(a) = \int g \dif \mu.\]
% \item For $g = \sum_{i} \alpha_i \One_{(a_i, b_i]}$,
% $a_i < b_i \in C$,
% we get $\int g \dif \mu_n \to \int g \dif \mu$
% by the same argument.
% \item % TODO continue from Lec13 page 21 (iii)
% \end{itemize}
%
% \end{proof}
\begin{theorem}[Levy's continuity theorem]\label{levycontinuity}
% Theorem 2
$X_n \xrightarrow{\text{dist}} X$ iff

View File

@ -152,15 +152,39 @@ We will need the following:
Then $\mu\left( (-A,A) \right) \ge \frac{A}{2} \left| \int_{-\frac{2}{A}}^{\frac{2}{A}} \phi(t) d t \right| - 1$.
\end{lemma}
\begin{refproof}{s7e1}
Exercise.\todo{TODO}
We have
\begin{IEEEeqnarray*}{rCl}
\int_{-\frac{2}{A}}^{\frac{2}{A}} \phi(t) \dif t
&=& \int_{-\frac{2}{A}}^{\frac{2}{A}} \int_{\R} e^{\i t x} \mu(\dif x) \dif t\\
&=& \int_{\R} \int_{-\frac{2}{A}}^{\frac{2}{A}} e^{\i t x} \dif t \mu(\dif x)\\
&=& \int_{\R} \int_{-\frac{2}{A}}^{\frac{2}{A}} \cos(t x) \dif t \mu(\dif x)\\
&=& \int_{\R} \frac{2 \sin\left( \frac{2x}{A}\right) }{x} \mu(\dif x).\\
\end{IEEEeqnarray*}
Hence
\begin{IEEEeqnarray*}{rCl}
\frac{A}{2}\left|\int_{-\frac{2}{A}}^{\frac{2}{A}} \phi(t) \dif t\right|
&=& \left| A \int_{\R} \frac{\sin\left( \frac{2x}{A} \right) }{x} \mu(\dif t)\right|\\
&=& 2\left| \int_{\R} \sinc\left( \frac{2x}{A} \right) \mu(\dif t)\right|\\
&\le& 2 \left[ \int_{|x| < A} \underbrace{\left|\sinc\left( \frac{2x}{A} \right) \right|}_{\le 1} \mu(\dif x)
+ \int_{|x| \ge A} \left|\sinc\left( \frac{2x}{A} \right)\right| \mu(\dif x) \right]\\
&\le& 2 \left[ \mu\left( (-A,A) \right)
+ \frac{A}{2} \int_{|x| \ge A} \frac{\sin(2x / A)|}{|x|} \mu(\dif x) \right]\\
&\le& 2 \left[ \mu\left( (-A,A) \right)
+ \frac{A}{2} \int_{|x| \ge A} \frac{1}{A} \mu(\dif x) \right]\\
&\le& 2 \mu((-A,A)) + \mu((-A,A)^c)\\
&=& 1 + \mu((-A,A)).
\end{IEEEeqnarray*}
\end{refproof}
\begin{refproof}{levycontinuity}
``$\implies$ '' If $\mu_n \implies \mu$, then
$\int f d \mu_n \to \int f d \mu$
for all $f \in C_b$ and $x \to e^{\i t x}$ is continuous and bounded.
``$\implies$ '' If $\mu_n \implies \mu$,
then by definition
$\int f \dif \mu_n \to \int f \dif \mu$
for all $f \in C_b$.
Since $x \to e^{\i t x}$ is continuous and bounded,
it follows that $\phi_n(t) \to \phi(t)$
for all $t \in \R$.
``$ \impliedby$''
@ -237,8 +261,8 @@ such that $F_{n_k}(x) \to F(x)$ for all $x$ where $F$ is continuous.
Since $F$ is a probability distribution function, there exists
a probability measure $\nu$ on $\R$ such that $F$ is the distribution
function of $\nu$.
Since $F_{n_k}(x) \to F_n(x)$ at all continuity points $x$ of $F$.
By \autoref{lec10_thm1} we obtain that
Since $F_{n_k}(x) \to F_n(x)$ at all continuity points $x$ of $F$,
by \autoref{lec10_thm1} we obtain that
$\mu_{n_k} \overset{k \to \infty}{\implies} \nu$.
Hence
$\phi_{\mu_{n_k}}(t) \to \phi_\nu(t)$, by the other direction of that theorem.
@ -269,8 +293,6 @@ However $G_1, G_2, \ldots$ is not converging to $F$,
as this would fail at $x_0$. This is a contradiction.
\end{refproof}
% IID is over now
\subsection{Summary}
What did we learn:
@ -281,5 +303,3 @@ What did we learn:
\item Kolmogorov's three series theorem
\item Fourier transform, weak convergence and CLT
\end{itemize}

View File

@ -125,6 +125,7 @@ We want to derive some properties of conditional expectation.
Recall
\begin{fact}[Jensen's inequality]
\label{jensen}
If $c : \R \to \R$ is convex and $\bE[|c \circ X|] < \infty$,
then $\bE[c \circ X] \overset{\text{a.s.}}{\ge} c(\bE[X])$.
\end{fact}

View File

@ -25,7 +25,8 @@
\begin{definition}
A sequence of random variables $(X_n)_n$ is called \vocab{uniformly integrable} (UI),
if
\[\forall \epsilon > 0 .~\exists k > 0 .~ \forall n.~\bE[|X_n| \One_{\{|X_n > k\} }] < \epsilon.\]
\[\forall \epsilon > 0 .~\exists K > 0 .~ \forall n.~
\bE[|X_n| \One_{\{|X_n| > K\} }] < \epsilon.\]
Similarly, we define uniformly integrable for sets of random variables.
\end{definition}
@ -45,24 +46,32 @@ However, some subsets can be easily described, e.g.
Choose $q$ such that $\frac{1}{p} + \frac{1}{q} = 1$.
Then
\begin{IEEEeqnarray*}{rCl}
\bE[|X_n| \One_{|X_n| > k}] &\le& \bE[|X_n|^p]^{\frac{1}{p}} \bP[|X_n| > k]^{\frac{1}{q}}\\
\bE[|X_n| \One_{|X_n| > K}]
&\le& \bE[|X_n|^p]^{\frac{1}{p}} \bP[|X_n| > k]^{\frac{1}{q}},\\
\end{IEEEeqnarray*}
i.e.
\begin{IEEEeqnarray*}{rCl}
\sup_n\bE[|X_n| \One_{|X_n| > k}] &\le& \underbrace{\sup_n\bE[|X_n|^p]^{\frac{1}{p}}}_{< \infty} \sup_n \underbrace{\bP[|X_n| > k]^{\frac{1}{q}}}_{\le k^{\frac{1}{q}} \bE[|X_n|]^{\frac{1}{q}}}\\
\sup_n\bE[|X_n| \One_{|X_n| > k}]
&\le& \underbrace{\sup_n\bE[|X_n|^p]^{\frac{1}{p}}}_{< \infty}
\sup_n \underbrace{\bP[|X_n| > K]^{\frac{1}{q}}}_%
{\le K^{-\frac{1}{q}} \bE[|X_n|]^{\frac{1}{q}}}\\
\end{IEEEeqnarray*}
where we have applied Markov's inequality. % TODO REF
Since $\sup_n \bE[|X_n|^{1+\delta}] < \infty$,
we have that $\sup_n \bE[|X_n|] < \infty$ by Jensen (\autoref{cjensen}).
Hence, choose $k$ large enough to make the relevant
term less than $\epsilon$.
we have that $\sup_n \bE[|X_n|] < \infty$ by Jensen (\autoref{jensen}).
Hence for $K$ large enough relevant term is less than $\epsilon$.
\end{proof}
\begin{fact}\label{lec19f2}
If $(X_n)_n$ is uniformly integrable,
then $(X_n)_n$ is bounded in $L^1$.
\end{fact}
\begin{proof}
Take some $\epsilon > 0$ and $K$ such that
$\sup_n\bE[|X_n| \One_{|X_n| > K}] < \epsilon$.
Then $\sup_n\|X_n\|_{L^1} \le K + \epsilon$.
\end{proof}
\begin{fact}\label{lec19f3}
Suppose $Y \in L^1(\bP)$ and $\sup_n |X_n(\cdot )| \le Y(\cdot )$.
@ -125,7 +134,7 @@ However, some subsets can be easily described, e.g.
\label{lec19eqstar}
\end{equation}
Let $Y = \bE[X | \cG]$ for some sub-$\sigma$-algebra $\cG$.
Then, by \autoref{condjensen}, $|Y| \le \bE[ |X| | \cG]$.
Then, by \autoref{cjensen}, $|Y| \le \bE[ |X| | \cG]$.
Hence $\bE[|Y|] \le \bE[|X|]$.
It follows that $\bP[|Y| > k] < \delta$
for $k$ suitably large,
@ -191,7 +200,7 @@ However, some subsets can be easily described, e.g.
\end{IEEEeqnarray*}
for all $\delta > 0$ and suitable $k$.
Hence $\bP[|X_n| < k] < \delta$ by Markov's inequality.
Hence $\bP[|X_n| \ge k] < \delta$ by Markov's inequality.
Then by \autoref{lec19f4} part (a) it follows that
\[
\int_{|X_n| > k} |X_n| \dif \bP \le \underbrace{\int |X - X_n| \dif \bP}_{< \epsilon} + \int_{|X_n| > k} |X| \dif \bP \le 2 \epsilon.
@ -227,6 +236,7 @@ Let $(\Omega, \cF, \bP)$ as always and let $(\cF_n)_n$ always be a filtration.
Then $X_n \coloneqq \bE[X | \cF_n]$ defines a martingale which converges
to $X$ in $L^p$.
\end{theorem}
% \todo{Proof ?}
\begin{theorem}
\label{martingaleisce}

View File

@ -44,7 +44,7 @@
\begin{IEEEeqnarray*}{rCl}
\int | X - X'|^p \dif \bP &=& \int_{\{|X| > M\} } |X|^p \dif \bP \xrightarrow{M \to \infty} 0
\end{IEEEeqnarray*}
as $\bP$ is \vocab[Measure!regular]{regular}, \todo{Make this a definition?}
as $\bP$ is regular,
i.e.~$\forall \epsilon > 0 . ~\exists k . ~\bP[|X|^p \in [-k,k] \ge 1-\epsilon$.
Take some $\epsilon > 0$ and $M$ large enough such that