\documentclass[11pt]{article}
\input{math_macros}
\usepackage{amssymb,amsmath,amsthm}
\usepackage[margin=1in]{geometry}
\begin{document}
\begin{flushleft}
\fbox{
\begin{minipage}{\textwidth}
{\bf CS 174: Combinatorics and Discrete Probability} \hfill Fall 2012 \bigskip \\
\centering{\Large Homework 9} \medskip \\
\centering{Due: Thursday, November 8, 2012 by {\bf 9:30am}}
\end{minipage}
} \bigskip \\
\end{flushleft}
\noindent{\it \textbf{Instructions}}: {\it You should upload your homework
solutions on bspace. You are strongly encouraged to type out your solutions
using \LaTeX . You may also want to consider using mathematical mode typing in
some office suite if you are not familiar with \LaTeX . If you must handwrite
your homeworks, please write clearly and legibly. We will not grade homeworks
that are unreadable. You are encouraged to work in groups of 2-4, but you {\bf
must} write solutions on your own. Please review the homework policy carefully
on the class homepage.} \medskip \\
\noindent {\bf Note}: You \emph{must} justify all your answers. In particular, you will get
no credit if you simply write the final answer without any explanation. \medskip
\\
\noindent {\bf Problem 1}. {\it (Exercise 7.12 from MU -- 6 points)} Let $X_n$ be the
sum of $n$ independent rolls of a fair die. Show that, for any $k \geq 2$,
\[ \lim_{n \rightarrow \infty} \Pr[X_n \mbox{ is divisible by } k] = \frac{1}{k}
\] \medskip \\
\noindent {\bf Problem 2}. {\it (Exercise 7.13 from MU -- 6 points)} Consider a
finite Markov chain on $n$ states with stationary distribution $\bar{\pi}$ and
transition probabilities $P_{i, j}$. Imagine starting the chain at time $0$ and
running it for $m$ steps, obtaining the sequence of states $X_0, X_1, \ldots,
X_m$. Here $X_0$ is chosen according to distribution $\bar{\pi}$. Consider the
states in reverse order, $X_m, X_{m-1}, \ldots, X_0$.
%
\begin{enumerate}
\item[(a)] Argue that given $X_{k+1}$, the state $X_k$ is independent of
$X_{k+2}, X_{k+3}, \ldots, X_m$. Thus, the reverse sequence is Markovian.
\item[(b)] Argue that for the reverse sequence, the transition probabilities,
$Q_{i, j}$, are given by
\[ Q_{i, j} = \frac{\pi_j P_{j, i}}{\pi_i}. \]
\item[(c)] Prove that if the original Markov chain is time reversible, so that
$\pi_i P_{i, j} = \pi_j P_{j, i}$, then $Q_{i, j} = P_{i, j}$. That is, the
states follow the same transition probabilities whether viewed in forward or
reverse order.
\end{enumerate} \medskip
\noindent {\bf Problem 3}. {\it (Exercise 7.20 from MU -- 6 points)} We have
considered the gambler's ruin problem in the case where the game is fair.
Consider the case where the game is not fair; instead, the probability of losing
a dollar each game is $2/3$ and the probability of winning a dollar each game is
$1/3$. Suppose you start with $i$ dollars and finish either when you reach $n$
or lose it all. Let $W_t$ be the amount you have gained after $t$ rounds of
play.
%
\begin{enumerate}
\item[(a)] Show that $\E[2^{W_{t+1}}] = \E[2^{W_t}]$.
\item[(b)] Use part (a) to determine the probability of finishing with $0$
dollars and the probability of finishing with $n$ dollars when starting at
position $i$.
\end{enumerate} \medskip
\noindent {\bf Problem 4}. {\it (Exercise 7.22 from MU -- 6 points)} A cat and a
mouse take a random walk on a connected, undirected, non-bipartite graph $G$.
They start at the same time on different nodes, and each makes one transition at
each time step. The cat eats the mouse if they are ever at the same node at some
time step. Let $n$ and $m$ denote, respectively, the number of vertices and
edges of $G$. Show an upper bound of $O(m^2 n)$ on the expected time before the
cat eats the mouse. ({\it Hint}: Consider a Markov chain whose states are the
ordered pair $(a, b)$, where $a$ is the position of the cat and $b$ is a
position of the mouse.) \medskip \\
\noindent {\bf Problem 5}. {\it (Exercise 7.24 from MU -- 6 points)} The
\emph{lollipop} graph on $n$ vertices is a clique on $n/2$ vertices connected to
a path on $n/2$ vertices. (See Figure 7.3 on pg. 186 of the text book.) The node
$u$ is a part of both the clique and the path. Let $v$ denote the other end of
the path.
\begin{enumerate}
\item[(a)] Show that the expected covering time of a random walk starting at $v$
is $\Theta(n^2)$.
\item[(b)] Show that the expected covering time for a random walk starting at
$u$ is $\Theta(n^3)$.
\end{enumerate}
\end{document}