s23-probability-theory/inputs/lecture_10.tex

260 lines
11 KiB
TeX
Raw Normal View History

2023-05-10 18:56:36 +02:00
% lecture 10 - 2023-05-09
% RECAP
First, we will prove some of the most important facts about Fourier transforms.
We consider $(\R, \cB(\R))$.
\begin{notation}
By $M_1 (\R)$ we denote the set of all probability measures on $\left( \R, \cB(\R) \right)$.
\end{notation}
For all $\bP \in M_1(\R)$ we define $\phi_{\bP}(t) = \int_{\R} e^{\i t x}d\bP(x)$.
If $X: (\Omega, \cF) \to (\R, \cB(\R))$ is a random variable, we write
$\phi_X(t) \coloneqq \bE[e^{\i t X}] = \phi_{\mu}(t)$,
where $\mu = \bP X^{-1}$.
\begin{refproof}{inversionformula}
We will prove that the limit in the RHS of \autoref{invf}
exists and is equal to the LHS.
Note that the term on the RHS is integrable, as
\[
\lim_{t \to 0} \frac{e^{-\i t b} - e^{-\i t a}}{- \i t} \pi(t) = a - b
\]
and note that $\phi(0) = 1$ and $|\phi(t)| \le 1$.
% TODO think about this
We have
\begin{IEEEeqnarray*}{rCl}
2023-05-11 23:55:51 +02:00
&&\lim_{T \to \infty} \frac{1}{2 \pi} \int_{-T}^T \int_{\R} \frac{e^{-\i t b}- e^{-\i t a}}{-\i t} e^{\i t x} dt d \bP(x)\\
&\overset{\text{Fubini for $L^1$}}{=}& \lim_{T \to \infty} \frac{1}{2 \pi} \int_{\R} \int_{-T}^T \frac{e^{-\i t b}- e^{-\i t a}}{-\i t} e^{\i t x} dt d \bP(x)\\
&=& \lim_{T \to \infty} \frac{1}{2 \pi} \int_{\R} \int_{-T}^T \frac{e^{\i t (b-x)}- e^{\i t (x-a)}}{-\i t} dt d \bP(x)\\
&=& \lim_{T \to \infty} \frac{1}{2 \pi} \int_{\R} \underbrace{\int_{-T}^T \left[ \frac{\cos(t (x-b)) - \cos(t(x-a))}{-\i t}\right] dt d \bP(x)}_{=0 \text{, as the function is odd}}
2023-05-10 18:56:36 +02:00
\\&&
+ \lim_{T \to \infty} \frac{1}{2\pi} \int_{\R}\int_{-T}^T \frac{\sin(t ( x - b)) - \sin(t(x-a))}{-t} dt d\bP(x)\\
&=& \lim_{T \to \infty} \frac{1}{\pi} \int_\R \int_{0}^T \frac{\sin(t(x-a)) - \sin(t(x-b))}{t} dt d\bP(x)\\
2023-05-14 22:49:50 +02:00
&\overset{\substack{\text{\autoref{fact:intsinxx},}\\\text{dominated convergence}}}{=}& \frac{1}{\pi} \int -\frac{\pi}{2} \One_{x < a} + \frac{\pi}{2} \One_{x > a }
2023-05-10 18:56:36 +02:00
- (- \frac{\pi}{2} \One_{x < b} + \frac{\pi}{2} \One_{x > b}) d\bP(x)\\
&=& \frac{1}{2} \bP(\{a\} ) + \frac{1}{2} \bP(\{b\}) + \bP((a,b))\\
&=& \frac{F(b) + F(b-)}{2} - \frac{F(a) - F(a-)}{2}
\end{IEEEeqnarray*}
\end{refproof}
\begin{fact}
\label{fact:intsinxx}
\[
\int_0^\infty \frac{\sin x}{x} dx = \frac{\pi}{2}
\]
where the LHS is an improper Riemann-integral.
Note that the LHS is not Lebesgue-integrable.
It follows that
\begin{IEEEeqnarray*}{rCl}
\lim_{T \to \infty} \int_0^T \frac{\sin(t(x-a))}{x} dt &=&
\begin{cases}
- \frac{\pi}{2}, &x < a,\\
0, &x = a,\\
\frac{\pi}{2}, & \frac{\pi}{2}
\end{cases}
\end{IEEEeqnarray*}
\end{fact}
\begin{theorem} % Theorem 3
\label{thm:lec10_3}
Let $\bP \in M_1(\R)$ such that $\phi_\R \in L^1(\lambda)$.
Then $\bP$ has a continuous probability density given by
\[
f(x) = \frac{1}{2 \pi} \int_{\R} e^{-\i t x} \phi_{\R(t) dt}.
\]
\end{theorem}
\begin{example}
\begin{itemize}
\item Let $\bP = \delta_{\{0\}}$.
Then
\[
\phi_{\R}(t) = \int e^{\i t x} d \delta_0(x) = e^{\i t 0 } = 1
\]
\item Let $\bP = \frac{1}{2} \delta_1 + \frac{1}{2} \delta_{-1}$.
Then
\[
\phi_{\R}(t) = \frac{1}{2} e^{\i t} + \frac{1}{2} e^{- \i t} = \cos(t)
\]
\end{itemize}
\end{example}
\begin{refproof}{thm:lec10_3}
Let $f(x) \coloneqq \frac{1}{2 \pi} \int_{\R} e^{ - \i t x} \phi(t) dt$.
\begin{claim}
If $x_n \to x$, then $f(x_n) \to f(x)$.
\end{claim}
\begin{subproof}
If $e^{-\i t x_n} \phi(t) \xrightarrow{n \to \infty} e^{-\i t x } \phi(t) $ for all $t$.
Then
\[
|e^{-\i t x} \phi(t)| \le |\phi(t)|
\]
and $\phi \in L^1$, hence $f(x_n) \to f(x)$
by the dominated convergence theorem.
\end{subproof}
We'll show that for all $a < b$ we have
\[
\bP\left( (a,b] \right) = \int_a^b (x) dx.\label{thm10_3eq1}
\]
Let $F$ be the distribution function of $\bP$.
It is enough to prove \autoref{thm10_3eq1}
for all continuity points $a $ and $ b$ of $F$.
We have
\begin{IEEEeqnarray*}{rCl}
RHS &\overset{\text{Fubini}}{=}& \frac{1}{2 \pi} \int_{\R} \int_{a}^b e^{-\i t x} \phi(t) dx dt\\
&=& \frac{1}{2 \pi} \int_\R \phi(t) \int_a^b e^{-\i t x} dx dt\\
&=& \frac{1}{2\pi} \int_{\R} \phi(t) \left( \frac{e^{-\i t b} - e^{-\i t a}}{- \i t} \right) dt\\
&\overset{\text{dominated convergence}}{=}& \lim_{T \to \infty} \frac{1}{2\pi} \int_{-T}^{T} \phi(t) \left( \frac{e^{-\i t b} - e^{- \i t a}}{- \i t} \right) dt
\end{IEEEeqnarray*}
By \autoref{inversionformula}, the RHS is equal to $F(b) - F(a) = \bP\left( (a,b] \right)$.
\end{refproof}
However, Fourier analysis is not only useful for continuous probability density functions:
\begin{theorem}[Bochner's formula for the mass at a point]\label{bochnersformula} % Theorem 4
Let $\bP \in M_1(\lambda)$.
Then
\[
\forall x \in \R ~ \bP\left( \{x\} \right) = \lim_{T \to \infty} \frac{1}{2 T} \int_{-T}^T e^{-\i t x } \phi(t) dt.
\]
\end{theorem}
\begin{refproof}{bochnersformula}
We have
\begin{IEEEeqnarray*}{rCl}
RHS &=& \lim_{T \to \infty} \frac{1}{2 T} \int_{-T}^T e^{-\i t x} \int_{\R} e^{\i t y} d \bP(y) \\
&\overset{\text{Fubini}}{=}& \lim_{T \to \infty} \frac{1}{2 T} \int_\R \bP(dy) \int_{-T}^T \underbrace{e^{-\i t (y - x)}}_{\cos(t ( y - x)) + \i \sin(t (y-x))} dt\\
&=& \lim_{T \to \infty} \frac{1}{2T} \int_{\R} d\bP(y) \int_{-T}^T \cos(t(y - x)) dt\\
&=& \lim_{T \to \infty} \frac{1}{2 T }\int_{\R} \frac{2 \sin(T (y-x)}{T (y-x)} d \bP(y)\\
\end{IEEEeqnarray*}
Furthermore
\[
\lim_{T \to \infty} \frac{\sin(T(x-y)}{T (y- x)} = \begin{cases}
1, &y = x,\\
0, &y \neq x.
\end{cases}
\]
Hence
\begin{IEEEeqnarray*}{rCl}
\lim_{T \to \infty} \frac{1}{2 T }\int_{\R} \frac{2 \sin(T (y-x)}{T (y-x)} d \bP(y) &=& \bP\left( \{x\}\right)
\end{IEEEeqnarray*}
% TODO by dominated convergence?
\end{refproof}
\begin{theorem} % Theorem 5
\label{thm:lec_10thm5}
Let $\phi$ be the characteristic function of $\bP \in M_1(\lambda)$.
Then
\begin{enumerate}[(a)]
\item $\phi(0) = 1$, $|\phi(t)| \le t$ and $\phi(\cdot )$ is continuous.
\item $\phi$ is a \vocab{positive definite function},
i.e.~
\[\forall t_1,\ldots, t_n \in \R, (c_1,\ldots,c_n) \in \C^n ~ \sum_{j,k = 1}^n c_j \overline{c_k} \phi(t_j - t_k) \ge 0
\]
(equivalently, the matix $(\phi(t_j- t_k))_{j,k}$ is positive definite.
\end{enumerate}
\end{theorem}
\begin{refproof}{thm:lec_10thm5}
Part (a) is obvious.
% TODO
For part (b) we have:
\begin{IEEEeqnarray*}{rCl}
\sum_{j,k} c_j \overline{c_k} \phi(t_j - t_k) &=& \sum_{j,k} c_j \overline{c_k} \int_\R e^{\i (t_j - t_k) x} d \bP(x)\\
&=& \int_{\R} \sum_{j,k} c_j \overline{c_k} e^{\i t_j x} \overline{e^{\i t_k x}} d\bP(x)\\
&=& \int_{\R}\sum_{j,k} c_j e^{\i t_j x} \overline{c_k e^{\i t_k x}} d\bP(x)\\
&=& \int_{\R} \left| \sum_{l} c_l e^{\i t_l x}\right|^2 \ge 0
\end{IEEEeqnarray*}
\end{refproof}
\begin{theorem}[Bochner's theorem]\label{bochnersthm}
2023-05-11 17:51:08 +02:00
The converse to \autoref{thm:lec_10thm5} holds, i.e.~ any
$\phi: \R \to \C$ satisfying (a) and (b) of \autoref{thm:lec_10thm5}
2023-05-10 18:56:36 +02:00
must be the Fourier transform of a probability measure $\bP$
on $(\R, \cB(\R))$.
\end{theorem}
Unfortunately, we won't prove \autoref{bochnersthm} in this lecture.
\begin{definition}[Convergence in distribution / weak convergence]
We say that $\bP_n \subseteq M_1(\R)$ \vocab[Convergence!weak]{converges weakly} towards $\bP \in M_1(\R)$ (notation: $\bP_n \implies \bP$), iff
\[
\forall f \in C_b(\R)~ \int f d\bP_n \to \int f d\bP.
\]
Where
\[
C_b(\R) \coloneqq \{ f: \R \to \R \text{ continuous and bounded}\}
\]
In analysis, this is also known as $\text{weak}^\ast$ convergence.
\end{definition}
\begin{remark}
This notion of convergence makes $M_1(\R)$ a separable metric space. We can construc a metric on $M_1(\R)$ that turns $M_1(\R)$ into a complete
and separable metric space:
Consider the sets
\[
\{\bP \in M_1(\R): \forall i=1,\ldots,n ~ \int f d \bP - \int f_i d\bP < \epsilon \}
\]
for any $f,f_1,\ldots, f_n \in C_b(\R)$.
These sets form a basis for the topology on $M_1(\R)$.
More of this will follow later.
\end{remark}
\begin{example}
\begin{itemize}
\item Let $\bP_n = \delta_{\frac{1}{n}}$.
Then $\int f d \bP_n = f(\frac{1}{n}) \to f(0) = \int f d \delta_0$
for any continuous, bounded function $f$.
Hence $\bP_n \to \delta_0$.
\item $\bP_n \coloneqq \delta_n$ does not converge weakly,
as for example
\[
\int \cos(\pi x) d\bP_n(x)
\]
does not converge.
\item $\bP_n \coloneqq \frac{1}{n} \delta_n + (1- \frac{1}{n}) \delta_0$.
Let $f \in C_b(\R)$ arbitrary.
Then
\[
\int f d\bP_n = \frac{1}{n}(n) + (1 - \frac{1}{n}) f(0) \to f(0)
\]
since $f$ is bounded.
Hence $\bP_n \implies \delta_0$.
\item $\bP_n \coloneqq \frac{1}{\sqrt{2 \pi n}} e^{-\frac{x^2}{2n}}$.
This ``converges'' towards the $0$-measure, which is not a probability measure. Hence $\bP_n$ does not converge weakly.
(Exercise) % TODO
\end{itemize}
\end{example}
\begin{definition}
We say that a series of random variables $X_n$
\vocab[Convergence!in distribution]{converges in distribution}
to $X$ (notation: $X_n \xrightarrow{\text{dist}} X$), iff
$\bP_n \implies \bP$, where $\bP_n$ is the distribution of $X_n$
and $\bP$ is the distribution of $X$.
\end{definition}
\begin{example}
Let $X_n \coloneqq \frac{1}{n}$
and $F_n$ the distribution function, i.e.~$F_n = \One_{[\frac{1}{n},\infty)}$.
Then $\bP_n = \delta_{\frac{1}{n}} \implies \delta_0$
which is the distribution of $X \equiv 0$.
But $F_n(0) \centernot\to F(0)$.
\end{example}
\begin{theorem}
$X_n \xrightarrow{\text{dist}} X$ iff
$F_n(t) \to F(t)$ for all continuity points $t$ of $F$.
\end{theorem}
\begin{theorem}[Levy's continuity theorem]\label{levycontinuity}
$X_n \xrightarrow{\text{dist}} X$ iff
$\phi_{X_n}(t) \to \phi(t)$ for all $t \in \R$.
\end{theorem}
We will assume these two theorems for now and derive the central limit theorem.
The theorems will be proved later.