\lecture{6}{}{Proof of SLLN} \begin{refproof}{lln} We want to deduce the SLLN (\yaref{lln}) from \yaref{thm2}. W.l.o.g.~let us assume that $\bE[X_i] = 0$ (otherwise define $X'_i \coloneqq X_i - \bE[X_i]$). We will show that $\frac{S_n}{n} \xrightarrow{a.s.} 0$. Define $Y_i \coloneqq \frac{X_i}{i}$. Then the $Y_i$ are independent and we have $\bE[Y_i] = 0$ and $\Var(Y_i) = \frac{\sigma^2}{i^2}$. Thus $\sum_{i=1}^\infty \Var(Y_i) < \infty$. From \yaref{thm2} we obtain that $\sum_{i=1}^\infty Y_i$ converges a.s. \begin{claim} Let $(a_n)$ be a sequence in $\R$ such that $\sum_{n=1}^{\infty} \frac{a_n}{n}$ converges, then $\frac{a_1 + \ldots + a_n}{n} \to 0$. \end{claim} \begin{subproof} Let $S_m \coloneqq \sum_{n=1}^\infty \frac{a_n}{n}$. By assumption, there exists $S \in \R$ such that $S_m \xrightarrow{m \to \infty} S$. Note that $j \cdot (S_{j} - S_{j-1}) = a_j$. Define $S_0 \coloneqq 0$. Then \begin{IEEEeqnarray*}{rCl} a_1 + \ldots + a_n &=& (S_1 - S_0) + 2(S_2 - S_1) + \ldots + n(S_n - S_{n-1})\\ &=& n S_n - (S_1 + S_2 + \ldots + S_{n-1}). \end{IEEEeqnarray*} Thus \begin{IEEEeqnarray*}{rCl} \frac{a_1 + \ldots + a_n}{n} &=& S_n - \frac{S_1 + \ldots + S_{n-1}}{n}\\ &=& \underbrace{S_n}_{\to S} - \underbrace{\left( \frac{n-1}{n} \right)}_{\mathclap{\to 1}} \cdot \underbrace{\frac{S_1 + \ldots + S_{n-1}}{n-1}}_{\to S}\\ &\to & 0, \end{IEEEeqnarray*} where we have used \begin{fact} \[ \lim_{n \to \infty} S_n = \lim_{n \to \infty} \frac{1}{n}\sum_{i=1}^{n} S_i \]. \end{fact} \end{subproof} The SLLN follows from the claim. \end{refproof} In order to prove \yaref{thm2}, we need the following: \begin{theorem}[Kolmogorov's inequality] \yalabel{Kolmogorov's Inequality}{Kolmogorov}{thm:kolmogorovineq} If $X_1,\ldots, X_n$ are independent with $\bE[X_i] = 0$ and $\Var(X_i) = \sigma_i^2$, then \[ \bP\left[\max_{1 \le i \le n} \left| \sum_{j=1}^{i} X_j \right| > \epsilon \right] \le \frac{1}{\epsilon^2} \sum_{i=1}^m \sigma_i^2. \] \end{theorem} \begin{proof} Let \begin{IEEEeqnarray*}{rCl} A_1 &\coloneqq& \{\omega : |X_1(\omega)| > \epsilon\},\\ A_2 &\coloneqq & \{\omega: |X_1(\omega)| \le \epsilon, |X_1(\omega) + X_2(\omega)| > \epsilon \},\\ \ldots\\ A_i &\coloneqq& \{\omega: |X_1(\omega)| \le \epsilon, |X_1(\omega) + X_2(\omega)| \le \epsilon, \ldots, % |X_1(\omega) + \ldots + X_{i-1}(\omega)| \le \epsilon,\\ && ~ ~|X_1(\omega) + \ldots + X_i(\omega)| > \epsilon\}. \end{IEEEeqnarray*} It is clear, that the $A_i$ are disjoint. We are interested in $\bigcup_{1 \le i \le n} A_i$. We have \begin{IEEEeqnarray*}{rCl} &&\int_{A_i} (\underbrace{X_1 + \ldots + X_i}_C + \underbrace{X_{i+1} + \ldots + X_n}_D)^2 \dif\bP\\ &=& \int_{A_i} C^2 \dif\bP + \underbrace{\int_{A_i} D^2 \dif\bP}_{\ge 0} + 2 \int_{A_i} CD \dif\bP\\ &\ge& \int_{A_i} \underbrace{C^2}_{\ge \epsilon^2} \dif\bP + 2 \int \underbrace{\One_{A_i} (X_1 + \ldots + X_i)}_E \underbrace{(X_{i+1} + \ldots + X_n)}_D \dif\bP\\ &\ge& \int_{A_i} \epsilon^2 \dif\bP, \end{IEEEeqnarray*} since by the independence of $E$ and $D$, and $\bE(X_{i+1}) = \ldots = \bE(X_n) = 0$ we have $\int D E \dif\bP = 0$. Hence \[ \bP(A_i) \le \frac{1}{\epsilon^2} \int_{A_i} (X_1 + \ldots + X_n)^2 \dif \bP. \] Since the $A_i$ are disjoint, we obtain \begin{IEEEeqnarray*}{rCl} \bP\left( \bigcup_{i \in \N} A_i \right) &\le & \frac{1}{\epsilon^2} \int_{\bigcup_{i \in \N} A_i} (X_1 + \ldots + X_n)^2 \dif \bP\\ &\le & \frac{1}{\epsilon^2} \int_{\Omega} (X_1 + \ldots + X_n)^2 \dif \bP\\ &\overset{\text{independence}}{=}& \frac{1}{\epsilon^2}(\bE[X_1^2] + \ldots + \bE[X_n^2])\\ &\overset{\bE[X_i] = 0}{=}& \frac{1}{\epsilon^2} \left( \Var(X_1) + \ldots + \Var(X_n)\right). \end{IEEEeqnarray*} \end{proof} \begin{refproof}{thm2} Let $S_n \coloneqq x_1 + \ldots + x_n$. We'll show that $\{S_n(\omega)\}_{n \in \N}$ is a Cauchy sequence for almost every $\omega$. Let \[ a_m(\omega) \coloneqq \sup_{k \in \N} \{ | S_{ m+k}(\omega) - S_m(\omega)|\} \] and \[ a(\omega) \coloneqq \inf_{m \in \N} a_m(\omega). \] Then $\{S_n(\omega)\}_{n \in \R}$ is a Cauchy sequence iff $a(\omega) = 0$. We want to show that $\bP[a(\omega) > 0] = 0$. For this, it suffices to show that $\bP[a(\omega) > \epsilon] = 0$ for all $\epsilon > 0$. For a fixed $\epsilon > 0$, we obtain: \begin{IEEEeqnarray*}{rCl} \bP[a_m > \epsilon] &=& \bP[ \sup_{k \in \N} | S_{m+k} - S_m| > \epsilon]\\ &=& \lim_{l \to \infty} \bP[% \underbrace{\sup_{k \le l} |S_{m+k} - S_m| > \epsilon}_{% \text{\reflectbox{$\coloneqq$}} B_l \uparrow% B \coloneqq \{\sup_{k \in \N} |S_{m+k} - S_m| > \epsilon\}}% ] \end{IEEEeqnarray*} Now, \begin{IEEEeqnarray*}{rCl} &&\max \{|S_{m+1} - S_m|, |S_{m+2} - S_m|, \ldots, |S_{m+l} - S_m|\}\\ &=& \max \{|X_{m+1}|, |X_{m+1} + X_{m+2}|, \ldots, |X_{m+1} + X_{m+2} + \ldots + X_{m+l}|\}\\ &\overset{\yaref{thm:kolmogorovineq}}{\le}& \frac{1}{\epsilon^2} \sum_{i=m}^{l} \Var(X_i)\\ &\le & \frac{1}{\epsilon^2} \sum_{i=m}^\infty \Var(X_i) \xrightarrow{m \to \infty} 0, \end{IEEEeqnarray*} since by our assumption, $\sum_{n \in \N} \Var(X_i) < \infty$. Hence \[ \bP[a_m > \epsilon] \xrightarrow{m \to \infty} 0. \] It follows that $\bP[a > \epsilon] = 0$, as claimed. \end{refproof} \subsubsection{Application: Renewal Theorem} \begin{theorem}[Renewal theorem] Let $X_1,X_2,\ldots$ i.i.d.~random variables with $X_i \ge 0$, $\bE[X_i] = m > 0$. The $X_i$ model waiting times. Let $S_n \coloneqq \sum_{i=1}^n X_i$. For all $t > 0$ let \[ N_t \coloneqq \sup \{n : S_n \le t\}. \] Then $\frac{N_t}{t} \xrightarrow{a.s.} \frac{1}{m}$ as $t \to \infty$. \end{theorem} The $X_i$ can be thought of as waiting times. $S_i$ models how long you have to wait for $i$ events to occur. \begin{proof} By SLLN, $\frac{S_n}{n} \xrightarrow{a.s.} m$ as $n \to \infty$. Note that \begin{equation} N_t \uparrow \infty \text{ a.s.~as } t \to \infty, \label{eqn:renewalnt} \end{equation} since $\{N_t \ge n\} = \{X_1 + \ldots+ X_n \le t\}$. \begin{claim} $\bP[\frac{S_n}{n} \xrightarrow{n \to \infty} m \land N_t \xrightarrow{t \to \infty} \infty] = 1$. \end{claim} \begin{subproof} Let $A \coloneqq \{\omega: \frac{S_n(\omega)}{n} \xrightarrow{n \to \infty} m\}$ and $B \coloneqq \{\omega : N_t(\omega) \xrightarrow{t \to \infty} \infty\}$. By the SLLN we have $\bP(A^C) = 0$ and by \eqref{eqn:renewalnt} it holds that $\bP(B^C) = 0$. \end{subproof} Equivalently, $\bP\left[ \frac{S_{N_t}}{N_t} \xrightarrow{t \to \infty} m \land \frac{S_{N_t + 1}}{N_t + 1} \xrightarrow{t \to \infty} m \right] = 1$. By definition, we have $S_{N_t} \le t \le S_{N_t + 1}$. Thus \[\frac{S_{N_t}}{N_t} \le \frac{t}{N_t} \le \frac {S_{N_t + 1}}{N_t} \le \frac{S_{N_t + 1}}{N_t + 1} \cdot \frac{N_t + 1}{N_t}.\] Hence $\frac{t}{N_t} \to m$. \end{proof}