From 80664eba78bdb06726c93cba91a34f737a45be1b Mon Sep 17 00:00:00 2001 From: Josia Pietsch Date: Tue, 11 Jul 2023 23:38:47 +0200 Subject: [PATCH] some small changes --- inputs/lecture_02.tex | 2 +- inputs/lecture_03.tex | 3 ++- inputs/lecture_06.tex | 20 ++++++++++---------- inputs/lecture_07.tex | 17 ++++++++--------- inputs/lecture_09.tex | 1 + inputs/lecture_10.tex | 43 ++++++++++++++++++++++--------------------- inputs/lecture_11.tex | 2 +- inputs/lecture_12.tex | 1 + inputs/lecture_13.tex | 2 +- inputs/lecture_20.tex | 2 +- 10 files changed, 48 insertions(+), 45 deletions(-) diff --git a/inputs/lecture_02.tex b/inputs/lecture_02.tex index 5720809..c8fca18 100644 --- a/inputs/lecture_02.tex +++ b/inputs/lecture_02.tex @@ -87,7 +87,7 @@ to an infinite number of random variables. Let $\bP_n, n \in \N$ be probability measures on $(\R^n, \cB(\R^n))$ which are \vocab{consistent}, then there exists a unique probability measure $\bP^{\otimes}$ - on $(\R^\infty, B(R^\infty))$ (where $B(R^{\infty}$ has to be defined), + on $(\R^\infty, B(R^\infty))$ (where $B(R^{\infty})$ has to be defined), such that \[ \forall n \in \N, B_1,\ldots, B_n \in B(\R): diff --git a/inputs/lecture_03.tex b/inputs/lecture_03.tex index ffee178..b4436bd 100644 --- a/inputs/lecture_03.tex +++ b/inputs/lecture_03.tex @@ -43,7 +43,8 @@ Now, for any $C \subseteq \R^n$ let $C^\ast \coloneqq C \times \R^{\infty}$. Note that $C \in \cB_n \implies C^\ast \in \cF_n$. Thus $\cF_n = \{C^\ast : C \in \cB_n\}$. Define $\lambda_n : \cF_n : \to [0,1]$ by $\lambda_n(C^\ast) \coloneqq (\mu_1 \otimes \ldots \otimes \mu_n)(C)$. -It is easy to see that $\lambda_{n+1} \defon{\cF_n} = \lambda_n$ (\vocab{consistency}). +It is easy to see that $\lambda_{n+1} \defon{\cF_n} = \lambda_n$, +i.e. the $\lambda_n$ form a consistent family. Recall the following theorem from measure theory: diff --git a/inputs/lecture_06.tex b/inputs/lecture_06.tex index cb92fec..4e68c1f 100644 --- a/inputs/lecture_06.tex +++ b/inputs/lecture_06.tex @@ -56,7 +56,7 @@ In order to prove \autoref{thm2}, we need the following: \] \end{theorem} \begin{proof} - Let + Let \begin{IEEEeqnarray*}{rCl} A_1 &\coloneqq& \{\omega : |X_1(\omega)| > \epsilon\},\\ A_2 &\coloneqq & \{\omega: |X_1(\omega)| \le \epsilon, @@ -73,16 +73,16 @@ In order to prove \autoref{thm2}, we need the following: We have \begin{IEEEeqnarray*}{rCl} &&\int_{A_i} (\underbrace{X_1 + \ldots + X_i}_C - + \underbrace{X_{i+1} + \ldots + X_n}_D)^2 d \bP\\ - &=& \int_{A_i} C^2 d\bP - + \underbrace{\int_{A_i} D^2 d \bP}_{\ge 0} - + 2 \int_{A_i} CD d\bP\\ - &\ge& \int_{A_i} \underbrace{C^2}_{\ge \epsilon^2} d \bP - + 2 \int \underbrace{\One_{A_i} (X_1 + \ldots + X_i)}_E \underbrace{(X_{i+1} + \ldots + X_n)}_D d \bP\\ - &\ge& \int_{A_i} \epsilon^2 d\bP, + + \underbrace{X_{i+1} + \ldots + X_n}_D)^2 \dif\bP\\ + &=& \int_{A_i} C^2 \dif\bP + + \underbrace{\int_{A_i} D^2 \dif\bP}_{\ge 0} + + 2 \int_{A_i} CD \dif\bP\\ + &\ge& \int_{A_i} \underbrace{C^2}_{\ge \epsilon^2} \dif\bP + + 2 \int \underbrace{\One_{A_i} (X_1 + \ldots + X_i)}_E \underbrace{(X_{i+1} + \ldots + X_n)}_D \dif\bP\\ + &\ge& \int_{A_i} \epsilon^2 \dif\bP, \end{IEEEeqnarray*} since by the independence of $E$ and $D$, - and $\bE(X_{i+1}) = \ldots = \bE(X_n) = 0$ we have $\int D E d\bP = 0$. + and $\bE(X_{i+1}) = \ldots = \bE(X_n) = 0$ we have $\int D E \dif\bP = 0$. Hence \[ @@ -122,7 +122,7 @@ In order to prove \autoref{thm2}, we need the following: is a Cauchy sequence iff $a(\omega) = 0$. We want to show that $\bP[a(\omega) > 0] = 0$. - For this, it suffices to show that $\bP(a(\omega) > \epsilon] = 0$ + For this, it suffices to show that $\bP[a(\omega) > \epsilon] = 0$ for all $\epsilon > 0$. For a fixed $\epsilon > 0$, we obtain: \begin{IEEEeqnarray*}{rCl} diff --git a/inputs/lecture_07.tex b/inputs/lecture_07.tex index 8a63fd6..d18cde4 100644 --- a/inputs/lecture_07.tex +++ b/inputs/lecture_07.tex @@ -13,8 +13,8 @@ of numbers converge: \begin{itemize} \item $\sum_{n \ge 1} \bP(|X_n| > C)$, - \item $\sum_{n \ge 1} \underbrace{\int_{|X_n| \le C} X_n d\bP}_{\text{\vocab{truncated mean}}}$, - \item $\sum_{n \ge 1} \underbrace{\int_{|X_n| \le C} X_n^2 d\bP - \left( \int_{|X_n| \le C} X_n d\bP \right)^2}_{\text{\vocab{truncated variance} }}$. + \item $\sum_{n \ge 1} \underbrace{\int_{|X_n| \le C} X_n \dif\bP}_{\text{\vocab{truncated mean}}}$, + \item $\sum_{n \ge 1} \underbrace{\int_{|X_n| \le C} X_n^2 \dif\bP - \left( \int_{|X_n| \le C} X_n \dif\bP \right)^2}_{\text{\vocab{truncated variance} }}$. \end{itemize} Then $\sum_{n \ge 1} X_n$ converges almost surely. \item Suppose $\sum_{n \ge 1} X_n$ converges almost surely. @@ -37,8 +37,8 @@ For the proof we'll need a slight generalization of \autoref{thm2}: Since the $X_n$ are independent, the $Y_n$ are independent as well. Furthermore, the $Y_n$ are uniformly bounded. By our assumption, the series - $\sum_{n \ge 1} \int_{|X_n| \le C} X_n d\bP = \sum_{n \ge 1} \bE[Y_n]$ - and $\sum_{n \ge 1} \int_{|X_n| \le C} X_n^2 d\bP - \left( \int_{|X_n| \le C} X_n d\bP \right)^2 = \sum_{n \ge 1} \Var(Y_n)$ + $\sum_{n \ge 1} \int_{|X_n| \le C} X_n \dif\bP = \sum_{n \ge 1} \bE[Y_n]$ + and $\sum_{n \ge 1} \int_{|X_n| \le C} X_n^2 \dif\bP - \left( \int_{|X_n| \le C} X_n \dif\bP \right)^2 = \sum_{n \ge 1} \Var(Y_n)$ converges. By \autoref{thm4} it follows that $\sum_{n \ge 1} Y_n < \infty$ almost surely. @@ -76,10 +76,10 @@ For the proof we'll need a slight generalization of \autoref{thm2}: We have \begin{IEEEeqnarray*}{rCl} - \bE(Y_n) &=& \int_{|X_n| \le C} X_n d \bP + C \bP(|X_n| \ge C),\\ - \bE(Z_n) &=& \int_{|X_n| \le C} X_n d \bP - C \bP(|X_n| \ge C). + \bE(Y_n) &=& \int_{|X_n| \le C} X_n \dif\bP + C \bP(|X_n| \ge C),\\ + \bE(Z_n) &=& \int_{|X_n| \le C} X_n \dif\bP - C \bP(|X_n| \ge C). \end{IEEEeqnarray*} - Since $\bE(Y_n) + \bE(Z_n) = 2 \int_{|X_n| \le C} X_n d\bP$ + Since $\bE(Y_n) + \bE(Z_n) = 2 \int_{|X_n| \le C} X_n \dif\bP$ the second series converges, and since $\bE(Y_n) - \bE(Z_n)$ converges, the first series converges. @@ -160,8 +160,7 @@ More formally: and uniform boundedness was not used. The idea of `` $\implies$ '' will lead to coupling. % TODO ? \end{remark} - - % TODO Proof of thm5 in the notes +A proof of \autoref{thm5} can be found in the notes.\notes \begin{example}[Application of \autoref{thm5}] The series $\sum_{n} \frac{1}{n^{\frac{1}{2} + \epsilon}}$ does not converge for $\epsilon < \frac{1}{2}$. diff --git a/inputs/lecture_09.tex b/inputs/lecture_09.tex index 94f1afa..186f033 100644 --- a/inputs/lecture_09.tex +++ b/inputs/lecture_09.tex @@ -106,6 +106,7 @@ We have \item We have $\phi(0) = 1$. \item $|\phi(t)| \le \int_{\R} |e^{\i t x} | \bP(dx) = 1$. \end{itemize} +\todo{Properties of characteristic functions} \begin{remark} Suppose $(\Omega, \cF, \bP)$ is an arbitrary probability space and diff --git a/inputs/lecture_10.tex b/inputs/lecture_10.tex index c5fa42e..2153570 100644 --- a/inputs/lecture_10.tex +++ b/inputs/lecture_10.tex @@ -9,7 +9,7 @@ We consider $(\R, \cB(\R))$. By $M_1 (\R)$ we denote the set of all probability measures on $\left( \R, \cB(\R) \right)$. \end{notation} -For all $\bP \in M_1(\R)$ we define $\phi_{\bP}(t) = \int_{\R} e^{\i t x}d\bP(x)$. +For all $\bP \in M_1(\R)$ we define $\phi_{\bP}(t) = \int_{\R} e^{\i t x}\dif\bP(x)$. If $X: (\Omega, \cF) \to (\R, \cB(\R))$ is a random variable, we write $\phi_X(t) \coloneqq \bE[e^{\i t X}] = \phi_{\mu}(t)$, where $\mu = \bP X^{-1}$. @@ -27,15 +27,15 @@ where $\mu = \bP X^{-1}$. We have \begin{IEEEeqnarray*}{rCl} - &&\lim_{T \to \infty} \frac{1}{2 \pi} \int_{-T}^T \int_{\R} \frac{e^{-\i t b}- e^{-\i t a}}{-\i t} e^{\i t x} dt d \bP(x)\\ - &\overset{\text{Fubini for $L^1$}}{=}& \lim_{T \to \infty} \frac{1}{2 \pi} \int_{\R} \int_{-T}^T \frac{e^{-\i t b}- e^{-\i t a}}{-\i t} e^{\i t x} dt d \bP(x)\\ - &=& \lim_{T \to \infty} \frac{1}{2 \pi} \int_{\R} \int_{-T}^T \frac{e^{\i t (b-x)}- e^{\i t (x-a)}}{-\i t} dt d \bP(x)\\ - &=& \lim_{T \to \infty} \frac{1}{2 \pi} \int_{\R} \underbrace{\int_{-T}^T \left[ \frac{\cos(t (x-b)) - \cos(t(x-a))}{-\i t}\right] dt d \bP(x)}_{=0 \text{, as the function is odd}} + &&\lim_{T \to \infty} \frac{1}{2 \pi} \int_{-T}^T \int_{\R} \frac{e^{-\i t b}- e^{-\i t a}}{-\i t} e^{\i t x} dt \dif\bP(x)\\ + &\overset{\text{Fubini for $L^1$}}{=}& \lim_{T \to \infty} \frac{1}{2 \pi} \int_{\R} \int_{-T}^T \frac{e^{-\i t b}- e^{-\i t a}}{-\i t} e^{\i t x} dt \dif\bP(x)\\ + &=& \lim_{T \to \infty} \frac{1}{2 \pi} \int_{\R} \int_{-T}^T \frac{e^{\i t (b-x)}- e^{\i t (x-a)}}{-\i t} dt \dif\bP(x)\\ + &=& \lim_{T \to \infty} \frac{1}{2 \pi} \int_{\R} \underbrace{\int_{-T}^T \left[ \frac{\cos(t (x-b)) - \cos(t(x-a))}{-\i t}\right] dt \dif\bP(x)}_{=0 \text{, as the function is odd}} \\&& - + \lim_{T \to \infty} \frac{1}{2\pi} \int_{\R}\int_{-T}^T \frac{\sin(t ( x - b)) - \sin(t(x-a))}{-t} dt d\bP(x)\\ - &=& \lim_{T \to \infty} \frac{1}{\pi} \int_\R \int_{0}^T \frac{\sin(t(x-a)) - \sin(t(x-b))}{t} dt d\bP(x)\\ + + \lim_{T \to \infty} \frac{1}{2\pi} \int_{\R}\int_{-T}^T \frac{\sin(t ( x - b)) - \sin(t(x-a))}{-t} dt \dif\bP(x)\\ + &=& \lim_{T \to \infty} \frac{1}{\pi} \int_\R \int_{0}^T \frac{\sin(t(x-a)) - \sin(t(x-b))}{t} dt \dif\bP(x)\\ &\overset{\substack{\text{\autoref{fact:intsinxx},}\\\text{dominated convergence}}}{=}& \frac{1}{\pi} \int -\frac{\pi}{2} \One_{x < a} + \frac{\pi}{2} \One_{x > a } - - (- \frac{\pi}{2} \One_{x < b} + \frac{\pi}{2} \One_{x > b}) d\bP(x)\\ + - (- \frac{\pi}{2} \One_{x < b} + \frac{\pi}{2} \One_{x > b}) \dif\bP(x)\\ &=& \frac{1}{2} \bP(\{a\} ) + \frac{1}{2} \bP(\{b\}) + \bP((a,b))\\ &=& \frac{F(b) + F(b-)}{2} - \frac{F(a) - F(a-)}{2} \end{IEEEeqnarray*} @@ -129,10 +129,10 @@ However, Fourier analysis is not only useful for continuous probability density \begin{refproof}{bochnersformula} We have \begin{IEEEeqnarray*}{rCl} - RHS &=& \lim_{T \to \infty} \frac{1}{2 T} \int_{-T}^T e^{-\i t x} \int_{\R} e^{\i t y} d \bP(y) \\ + RHS &=& \lim_{T \to \infty} \frac{1}{2 T} \int_{-T}^T e^{-\i t x} \int_{\R} e^{\i t y} \dif\bP(y) \\ &\overset{\text{Fubini}}{=}& \lim_{T \to \infty} \frac{1}{2 T} \int_\R \bP(dy) \int_{-T}^T \underbrace{e^{-\i t (y - x)}}_{\cos(t ( y - x)) + \i \sin(t (y-x))} dt\\ - &=& \lim_{T \to \infty} \frac{1}{2T} \int_{\R} d\bP(y) \int_{-T}^T \cos(t(y - x)) dt\\ - &=& \lim_{T \to \infty} \frac{1}{2 T }\int_{\R} \frac{2 \sin(T (y-x)}{T (y-x)} d \bP(y)\\ + &=& \lim_{T \to \infty} \frac{1}{2T} \int_{\R} \dif\bP(y) \int_{-T}^T \cos(t(y - x)) dt\\ + &=& \lim_{T \to \infty} \frac{1}{2 T }\int_{\R} \frac{2 \sin(T (y-x)}{T (y-x)} \dif\bP(y)\\ \end{IEEEeqnarray*} Furthermore \[ @@ -143,7 +143,7 @@ However, Fourier analysis is not only useful for continuous probability density \] Hence \begin{IEEEeqnarray*}{rCl} - \lim_{T \to \infty} \frac{1}{2 T }\int_{\R} \frac{2 \sin(T (y-x)}{T (y-x)} d \bP(y) &=& \bP\left( \{x\}\right) + \lim_{T \to \infty} \frac{1}{2 T }\int_{\R} \frac{2 \sin(T (y-x)}{T (y-x)} \dif\bP(y) &=& \bP\left( \{x\}\right) \end{IEEEeqnarray*} % TODO by dominated convergence? \end{refproof} @@ -168,9 +168,9 @@ However, Fourier analysis is not only useful for continuous probability density For part (b) we have: \begin{IEEEeqnarray*}{rCl} - \sum_{j,k} c_j \overline{c_k} \phi(t_j - t_k) &=& \sum_{j,k} c_j \overline{c_k} \int_\R e^{\i (t_j - t_k) x} d \bP(x)\\ - &=& \int_{\R} \sum_{j,k} c_j \overline{c_k} e^{\i t_j x} \overline{e^{\i t_k x}} d\bP(x)\\ - &=& \int_{\R}\sum_{j,k} c_j e^{\i t_j x} \overline{c_k e^{\i t_k x}} d\bP(x)\\ + \sum_{j,k} c_j \overline{c_k} \phi(t_j - t_k) &=& \sum_{j,k} c_j \overline{c_k} \int_\R e^{\i (t_j - t_k) x} \dif\bP(x)\\ + &=& \int_{\R} \sum_{j,k} c_j \overline{c_k} e^{\i t_j x} \overline{e^{\i t_k x}} \dif\bP(x)\\ + &=& \int_{\R}\sum_{j,k} c_j e^{\i t_j x} \overline{c_k e^{\i t_k x}} \dif\bP(x)\\ &=& \int_{\R} \left| \sum_{l} c_l e^{\i t_l x}\right|^2 \ge 0 \end{IEEEeqnarray*} \end{refproof} @@ -187,7 +187,7 @@ Unfortunately, we won't prove \autoref{bochnersthm} in this lecture. \label{def:weakconvergence} We say that $\bP_n \subseteq M_1(\R)$ \vocab[Convergence!weak]{converges weakly} towards $\bP \in M_1(\R)$ (notation: $\bP_n \implies \bP$), iff \[ - \forall f \in C_b(\R)~ \int f d\bP_n \to \int f d\bP. + \forall f \in C_b(\R)~ \int f \dif\bP_n \to \int f \dif\bP. \] Where \[ @@ -197,12 +197,13 @@ Unfortunately, we won't prove \autoref{bochnersthm} in this lecture. In analysis, this is also known as $\text{weak}^\ast$ convergence. \end{definition} \begin{remark} - This notion of convergence makes $M_1(\R)$ a separable metric space. We can construc a metric on $M_1(\R)$ that turns $M_1(\R)$ into a complete + This notion of convergence makes $M_1(\R)$ a separable metric space. + We can construct a metric on $M_1(\R)$ that turns $M_1(\R)$ into a complete and separable metric space: Consider the sets \[ - \{\bP \in M_1(\R): \forall i=1,\ldots,n ~ \int f d \bP - \int f_i d\bP < \epsilon \} + \{\bP \in M_1(\R): \forall i=1,\ldots,n ~ \int f \dif\bP - \int f_i \dif\bP < \epsilon \} \] for any $f,f_1,\ldots, f_n \in C_b(\R)$. These sets form a basis for the topology on $M_1(\R)$. @@ -211,13 +212,13 @@ Unfortunately, we won't prove \autoref{bochnersthm} in this lecture. \begin{example} \begin{itemize} \item Let $\bP_n = \delta_{\frac{1}{n}}$. - Then $\int f d \bP_n = f(\frac{1}{n}) \to f(0) = \int f d \delta_0$ + Then $\int f \dif\bP_n = f(\frac{1}{n}) \to f(0) = \int f d \delta_0$ for any continuous, bounded function $f$. Hence $\bP_n \to \delta_0$. \item $\bP_n \coloneqq \delta_n$ does not converge weakly, as for example \[ - \int \cos(\pi x) d\bP_n(x) + \int \cos(\pi x) \dif\bP_n(x) \] does not converge. @@ -225,7 +226,7 @@ Unfortunately, we won't prove \autoref{bochnersthm} in this lecture. Let $f \in C_b(\R)$ arbitrary. Then \[ - \int f d\bP_n = \frac{1}{n}(n) + (1 - \frac{1}{n}) f(0) \to f(0) + \int f \dif\bP_n = \frac{1}{n}(n) + (1 - \frac{1}{n}) f(0) \to f(0) \] since $f$ is bounded. Hence $\bP_n \implies \delta_0$. diff --git a/inputs/lecture_11.tex b/inputs/lecture_11.tex index 4be3db3..5277228 100644 --- a/inputs/lecture_11.tex +++ b/inputs/lecture_11.tex @@ -124,7 +124,7 @@ If $S_n \sim \Bin(n,p)$ and $[a,b] \subseteq \R$, we have &\approx& \Phi(0.01 \sqrt{\frac{n}{p(1-p)}}) - \Phi(-0.01 \sqrt{\frac{n}{p(1-p)}})\\ &=& 2\Phi(0.01 \sqrt{\frac{n}{p(1-p)}}) - 1\\ \end{IEEEeqnarray*} - Hence, we want $\Phi(0.01 \sqrt{\frac{n}{p(1-p}}) \approx \frac{1.95}{2}$, + Hence, we want $\Phi(0.01 \sqrt{\frac{n}{p(1-p)}}) \approx \frac{1.95}{2}$, i.e.~$n = (1.96)^2 100^2 p\cdot (1-p)$ We have $p\cdot (1-p) \le \frac{1}{4}$, thus $n \approx (1.96)^2 \cdot 100^2 \cdot \frac{1}{4} = 9600$ suffices. diff --git a/inputs/lecture_12.tex b/inputs/lecture_12.tex index 1eaabfc..c866f2f 100644 --- a/inputs/lecture_12.tex +++ b/inputs/lecture_12.tex @@ -223,3 +223,4 @@ Now, we can finally prove the CLT: where $\langle t, X\rangle \coloneqq \sum_{i = 1}^d t_i X_i$. \end{remark} Exercise: Find out, which properties also hold for $d > 1$. +\todo{TODO} diff --git a/inputs/lecture_13.tex b/inputs/lecture_13.tex index e147b69..aea6d1a 100644 --- a/inputs/lecture_13.tex +++ b/inputs/lecture_13.tex @@ -16,7 +16,7 @@ if $X_1, X_2,\ldots$ are i.i.d.~with $ \mu = \bE[X_1]$, Assume $X_1, X_2, \ldots,$ are independent (but not necessarily identically distributed) with $\mu_i = \bE[X_i] < \infty$ and $\sigma_i^2 = \Var(X_i) < \infty$. Let $S_n = \sqrt{\sum_{i=1}^{n} \sigma_i^2}$ and assume that - \[\lim_{n \to \infty} \frac{1}{S_n^2} \bE\left[(X_i - \mu_i)^2 \One_{|X_i - \mu_i| > \epsilon \S_n}\right] = 0\] + \[\lim_{n \to \infty} \frac{1}{S_n^2} \bE\left[(X_i - \mu_i)^2 \One_{|X_i - \mu_i| > \epsilon S_n}\right] = 0\] for all $\epsilon > 0$ (\vocab{Lindeberg condition}\footnote{``The truncated variance is negligible compared to the variance.''}). diff --git a/inputs/lecture_20.tex b/inputs/lecture_20.tex index c23d6a7..db09cd4 100644 --- a/inputs/lecture_20.tex +++ b/inputs/lecture_20.tex @@ -87,7 +87,7 @@ we need the following theorem, which we won't prove here: via \begin{IEEEeqnarray*}{rCl} L^p &\longrightarrow & (L^q)^\ast \\ - f &\longmapsto & (g \mapsto \int g f \dif d\bP) + f &\longmapsto & (g \mapsto \int g f \dif\bP) \end{IEEEeqnarray*} We also have $(L^1)^\ast \cong L^\infty$,