\lecture{9}{}{Percolation, Introduction to characteristic functions} \subsubsection{Application: Percolation} We will now discuss another application of \yaref{kolmogorov01}, percolation. \begin{definition}[\vocab{Percolation}] Consider the graph with nodes $\Z^d$, $d \ge 2$, where edges from the lattice are added with probability $p$. The added edges are called \vocab[Percolation!Edge!open]{open}; all other edges are called \vocab[Percolation!Edge!closed]{closed}. More formally, we consider \begin{itemize} \item $\Omega = \{0,1\}^{\bE_d}$, where $\bE_d$ are all edges in $\Z^d$, \item $\cF \coloneqq \text{product $\sigma$-algebra}$, \item $\bP \coloneqq \left(p \underbrace{\delta_{\{1\} }}_{\text{edge is open}} + (1-p) \underbrace{\delta_{\{0\} }}_{\text{edge is absent closed}}\right)^{\otimes \bE_d}$. \end{itemize} \end{definition} \begin{question} Starting at the origin, what is the probability, that there exists an infinite path (without moving backwards)? \end{question} \begin{definition} An \vocab{infinite path} consists of an infinite sequence of distinct points $x_0, x_1, \ldots$ such that $x_n$ is connected to $x_{n+1}$, i.e.~the edge $\{x_n, x_{n+1}\}$ is open. \end{definition} Let $C_\infty \coloneqq \{\omega | \text{an infinite path exists}\}$. \begin{exercise} Show that changing the presence / absence of finitely many edges does not change the existence of an infinite path. Therefore $C_\infty$ is an element of the tail $\sigma$-algebra. Hence $\bP(C_\infty) \in \{0,1\}$. \end{exercise} Obviously, $\bP(C_\infty)$ is monotonic with respect to $p$. For $d = 2$ it is known that $p = \frac{1}{2}$ is the critical value. For $d > 2$ this is unknown. % TODO: more in the notes We'll get back to percolation later. \section{Characteristic Functions, Weak Convergence and the Central Limit Theorem} % Characteristic functions are also known as the \vocab{Fourier transform}. %Weak convergence is also known as \vocab{convergence in distribution} / \vocab{convergence in law}. So far we have dealt with the average behaviour, \[ \frac{\overbrace{X_1 + \ldots + X_n}^{\text{i.i.d.}}}{n} \to \bE(X_1). \] We now want to understand fluctuations from the average behaviour, i.e.\[ X_1 + \ldots + X_n - n \cdot \bE(X_1). \] % TODO improve The question is, what happens on other timescales than $n$? An example is \begin{equation} \frac{X_1 + \ldots + X_n - n \bE(X_1)}{\sqrt{n} } \xrightarrow{n \to \infty} \cN(0, \Var(X_i)) \label{eqn:lec09ast} \end{equation} Why is $\sqrt{n}$ the right order? Handwavey argument: Suppose $X_1, X_2,\ldots$ are i.i.d.~with $X_1 \sim \cN(0,1)$. The mean of the l.h.s.~is $0$ and for the variance we get \begin{IEEEeqnarray*}{rCl} \Var(\frac{X_1 + \ldots + X_n - n \bE(X_1)}{\sqrt{n} }) &=& \Var\left( \frac{X_1+ \ldots + X_n}{\sqrt{n} } \right)\\ &=& \frac{1}{n} \left( \Var(X_1) + \ldots + \Var(X_n) \right) = 1 \end{IEEEeqnarray*} For the r.h.s.~we get a mean of $0$ and a variance of $1$. So, to determine what \eqref{eqn:lec09ast} could mean, it is necessary that $\sqrt{n}$ is the right scaling. To make \eqref{eqn:lec09ast} precise, we need another notion of convergence. This will be the weakest notion of convergence, hence it is called \vocab{weak convergence}. This notion of convergence will be defined in terms of characteristic functions of Fourier transforms. \subsection{Convolutions${}^\dagger$} \begin{definition}+[Convolution] Let $\mu$ and $\nu$ be probability measures on $\R^d$. Then the \vocab{convolution} of $\mu$ and $\nu$, $\mu \ast \nu$, is the probability measure on $\R^d$ defined by \[ (\mu \ast \nu)(A) = \int_{\R^d} \int_{\R^d} \One_A(x + y) \mu(\dif x) \nu(\dif y). \] \end{definition} \begin{fact} If $\mu$ and $\nu$ have Lebesgue densities $f_\mu$ and $f_\nu$, then the convolution has Lebesgue density \[ f_{\mu \ast \nu}(x) = \int_{\R^d} f_\mu(x - t) f_\nu(t) \lambda^d(\dif t). \] \end{fact} \begin{fact}+[Exercise 6.1] If $X_1,X_2,\ldots$ are independent with distributions $X_1 \sim \mu_1$, $X_2 \sim \mu_2, \ldots$, then $X_1 + \ldots + X_n$ has distribution \[ \mu_1 \ast \mu_2 \ast \ldots \ast \mu_n. \] \end{fact} \todo{TODO} \subsection{Characteristic Functions and Fourier Transform} \begin{definition} \label{def:characteristicfunction} Consider $(\R, \cB(\R), \bP)$. The \vocab{characteristic function} of $\bP$ is defined as \begin{IEEEeqnarray*}{rCl} \phi_{\bP}: \R &\longrightarrow & \C \\ t &\longmapsto & \int_{\R} e^{\i t x} \bP(\dif x). \end{IEEEeqnarray*} \end{definition} \begin{abuse} $\phi_\bP(t)$ will often be abbreviated as $\phi(t)$. \end{abuse} We have \[ \phi(t) = \int_{\R} \cos(tx) \bP(dx) + \i \int_{\R} \sin(tx) \bP(dx). \] \begin{itemize} \item Since $|e^{\i t x}| \le 1$ the function $\phi(\cdot )$ is always defined. \item We have $\phi(0) = 1$. \item $|\phi(t)| \le \int_{\R} |e^{\i t x} | \bP(dx) = 1$. \end{itemize} \begin{fact}+ Let $X$, $Y$ be independent random variables and $a,b \in \R$. Then \begin{itemize} \item $\phi_{a X + b}(t) = e^{\i t b} \phi_X(\frac{t}{a})$, \item $\phi_{X + Y}(t) = \phi_X(t) \cdot \phi_Y(t)$. \end{itemize} \end{fact} \begin{proof} We have \begin{IEEEeqnarray*}{rCl} \phi_{a X + b}(t) &=& \bE[e^{\i t (aX + b)}]\\ &=& e^{\i t b} \bE[e^{\i t a X}]\\ &=& e^{\i t b} \phi_X(\frac{t}{a}). \end{IEEEeqnarray*} Furthermore \begin{IEEEeqnarray*}{rCl} \phi_{X + Y}(t) &=& \bE[e^{\i t (X + Y)}]\\ &=& \bE[e^{\i t X}] \bE[e^{\i t Y}]\\ &=& \phi_X(t) \phi_Y(t). \end{IEEEeqnarray*} \end{proof} \begin{remark} Suppose $(\Omega, \cF, \bP)$ is an arbitrary probability space and $X: (\Omega, \cF) \to (\R, \cB(\R))$ is a random variable. Then we can define \[ \phi_X(t) \coloneqq \bE[e^{\i t X}] = \int e^{\i t X(\omega)} \bP(\dif \omega) = \int_{\R} e^{\i t x} \mu(dx) = \phi_\mu(t), \] where $\mu = \bP \circ X^{-1}$. \end{remark} \begin{theorem}[Inversion formula] % thm1 \yalabel{Inversion Formula}{Inversion Formula}{inversionformula} Let $(\Omega, \cB(\R), \bP)$ be a probability space. Let $F$ be the distribution function of $\bP$ (i.e.~$F(x) = \bP((-\infty, x])$ for all $x \in \R$ ). Then for every $a < b$ we have \begin{eqnarray} \frac{F(b) + F(b-)}{2} - \frac{F(a) + F(a-)}{2} = \lim_{T \to \infty} \frac{1}{2 \pi} \int_{-T}^T \frac{e^{-\i t b} - e^{- \i t a}}{- \i t} \phi(t) dt \label{invf} \end{eqnarray} where $F(b-)$ is the left limit. \end{theorem} % TODO! We will prove this later. \begin{theorem}[Uniqueness theorem] % thm2 \yalabel{Uniqueness Theorem}{Uniqueness}{charfuncuniqueness} Let $\bP$ and $\Q$ be two probability measures on $(\R, \cB(\R))$. Then $\phi_\bP = \phi_\Q \implies \bP = \Q$. Therefore, probability measures are uniquely determined by their characteristic functions. Moreover, \eqref{invf} gives a representation of $\bP$ (via $F$) from $\phi$. \end{theorem} \begin{refproof}{charfuncuniqueness} Assume that we have already shown the \yaref{inversionformula}. Suppose that $F$ and $G$ are the distribution functions of $\bP$ and $\Q$. Let $a,b \in \R$ with $a < b$. Assume that $a $ and $b$ are continuity points of both $F$ and $G$. By the \yaref{inversionformula} we have \begin{IEEEeqnarray*}{rCl} F(b) - F(a) = G(b) - G(a) \label{eq:charfuncuniquefg} \end{IEEEeqnarray*} Since $F$ and $G$ are monotonic, \yaref{eq:charfuncuniquefg} holds for all $a < b$ outside a countable set. Take $a_n$ outside this countable set, such that $a_n \ssearrow -\infty$. Then, \yaref{eq:charfuncuniquefg} implies that $F(b) - F(a_n) = G(b) - G(a_n)$ hence $F(b) = G(b)$. Since $F$ and $G$ are right-continuous, it follows that $F = G$. \end{refproof}