\documentclass[12pt,twoside]{article}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Packages
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{amscd}
\usepackage{bbm}
\usepackage[hmarginratio=1:1]{geometry}
\usepackage{titlesec}
\usepackage{mathrsfs}
\usepackage{enumerate}
\usepackage{graphicx}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Modified Title
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newcommand{\makehwtitle}[4]{
\begin{center}
\Large
\textbf{#1 \,-- \:#2}
\\
#3 \\
#4
\normalsize
\end{center}
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Problem Headings
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newcounter{ProblemNumber}
\newcounter{SubProblemNumber}[ProblemNumber]
\renewcommand{\theProblemNumber}{\arabic{ProblemNumber}}
\renewcommand{\theSubProblemNumber}{\alph{SubProblemNumber}}
\newcommand{\problem}[1]{
\stepcounter{ProblemNumber}
\subsection*{#1}
\hrulefill
\vspace{12pt}
}
\newcommand{\nproblem}{
\problem{Problem \theProblemNumber}
}
\newcommand{\soln}[1]{\subsection*{#1}}
\newcommand{\solution}{\soln{Solution}}
\renewcommand{\part}{
\stepcounter{SubProblemNumber}
\soln{Part (\theSubProblemNumber)}
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% New Commands
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newcommand\numberthis{\addtocounter{equation}{1}\tag{\theequation}}
\newcommand{\deriv}[3][]{\ensuremath{\frac{\textrm{d}^{#1}#2}{\textrm{d}{#3}^{#1}}}}
\newcommand{\pderiv}[3][]{\ensuremath{\frac{\partial^{#1}#2}{\partial{#3}^{#1}}}}
\renewcommand{\d}{\ensuremath{\,\textrm{d}}}
\newcommand{\qed}{\ensuremath{\Box}}
\newcommand{\absval}[1]{\ensuremath{\left|#1\right|}}
\begin{document}
\begin{center}
\huge\textbf{Homework 5} \normalsize \\ Due Friday, August 4 2017
\end{center}
\problem{Problem 1 (10 points)}
Suppose that we have a species whose population $N(t)$ is governed by a simple linear birth-death process. That is, in a small interval of time $\Delta t$, the probability that any one individual gives birth is approximately $\beta\Delta t$ and the probability that any one individual dies is $\delta\Delta t$.
\begin{equation} \label{eq:simple-bd}
\deriv{P_n}{t} = \beta(n - 1)P_{n-1} - (\beta + \delta)nP_{n} + \delta(n + 1)P_{n+1},
\end{equation}
where
\begin{equation}
P_n(t) = \textrm{Pr}\left[N(t) = n \:\vert\: N(0) = N_0\right]
\end{equation}
is the probability that $N(t) = n$ given the initial population $N_0$. We will always assume that $P_{-1} = 0$.
In class, we solved this problem using a probability generating function and then used that function to find the expected value
\begin{equation} \label{eq:exval}
\langle N(t) \rangle = \sum_{n=0}^{\infty}nP_n(t).
\end{equation}
If all we need is the expected value, then there is a much simpler way to find it.
\begin{enumerate}[(a)]
\item Use equation \eqref{eq:simple-bd} to show that
\begin{equation} \label{eq:exp-growth}
\deriv{\langle N(t) \rangle}{t} = (\beta - \delta)\langle N(t) \rangle.
\end{equation}
\item Solve equation \eqref{eq:exp-growth} for $\langle N(t) \rangle$. (You should not have any arbitrary constants of integration in your solution.)
\end{enumerate}
\part
We have
\begin{align*}
\deriv{\langle N(t) \rangle}{t} &= \deriv{}{t}\sum_{n=0}^{\infty}nP_n(t) \\
&= \sum_{n=0}^{\infty}n\deriv{P_n}{t} \\
&= \sum_{n=0}^{\infty}n\left[\beta(n - 1)P_{n-1}(t) - (\beta + \delta)nP_n(t) + \delta(n + 1)P_{n+1}(t)\right] \\
&= \sum_{n=0}^{\infty}\beta n(n - 1)P_{n-1}(t) - \sum_{n=0}^{\infty}(\beta + \delta)n^2P_n(t) + \sum_{n=0}^{\infty}\delta n(n + 1)P_{n+1}(t).
\end{align*}
If we let $i = n - 1$ and $j = n + 1$, we have
\begin{align*}
\deriv{\langle N(t) \rangle}{t} &= \sum_{i=-1}^{\infty}\beta(i + 1)iP_i(t) - \sum_{n=0}^{\infty}(\beta + \delta)n^2P_n(t) + \sum_{j=1}^{\infty}\delta(j - 1)jP_j(t) \\
&= \sum_{i=0}^{\infty}\beta(i^2 + i)P_i(t) - \sum_{n=0}^{\infty}(\beta + \delta)n^2P_n(t) + \sum_{j=0}^{\infty}\delta(j^2 - j)P_j(t).
\end{align*}
Since $i$, $j$ and $n$ are just dummy variables, we can recombine these sums into
\begin{align*}
\deriv{\langle N(t) \rangle}{t} &= \sum_{n=0}^{\infty}\left[\beta(n^2 + n) - (\beta + \delta)n^2 + \delta(n^2 - n)\right]P_n(t) \\
&= \sum_{n=0}^{\infty}(\beta - \delta)nP_n(t) \\
&= (\beta - \delta)\sum_{n=0}^{\infty}nP_n(t) \\
&= (\beta - \delta)\langle N(t) \rangle,
\end{align*}
as desired.
\part
We have
\begin{equation}
\deriv{}{t}\langle N(t) \rangle = (\beta - \delta)\langle N(t) \rangle.
\end{equation}
This equation is separable, and we have solved it many times in this class. We get
\begin{equation*}
\langle N(t) \rangle = Ce^{(\beta - \delta)t}.
\end{equation*}
From the definition of $P_n(t)$, we know that $P_{N_0}(0) = 1$ and $P_n(t) = 0$ for all $n \neq N_0$. Therefore,
\begin{equation}
\langle N(0) \rangle = \sum_{n=0}^{\infty}nP_n(0) = N_0.
\end{equation}
(You don't really need to prove this. We know that $N(0) = N_0$, so it should be clear that $\langle N(0) \rangle = N_0$ as well.)
We can use this initial condition to find $C$, so we have
\begin{equation*}
\langle N(t) \rangle = N_0e^{(\beta - \delta) t}.
\end{equation*}
\problem{Problem 2 (10 points)}
To incorporate density dependence into birth-death processes, we let the birth and death rates depend on the population $N$. That is, we replace the constant values $\beta$ and $\delta$ with $\beta_n$ and $\delta_n$, which vary with $n$. One particularly simple version of density dependence arises if we assume that $\beta_n$ decreases linearly with $n$, while $\delta_n$ is constant. That is, let
\begin{equation}
\beta_n = \beta - \frac{n}{K} \:\textrm{ and }\: \delta_n = \delta,
\end{equation}
where $\beta$ and $\delta$ are constants and $K$ is a constant positive integer.
The governing equations are otherwise identical to those of problem 1, so we find that
\begin{equation} \label{eq:log-bd}
\deriv{P_n}{t} = \beta_{n-1}(n - 1)P_{n-1} - (\beta_n + \delta_n)nP_n + \delta_{n+1}(n+1)P_{n+1},
\end{equation}
where
\begin{equation}
P_n(t) = \textrm{Pr}\left[N(t) = n \:\vert\: N(0) = N_0\right]
\end{equation}
for all $n \geq 0$ and $P_{-1}(t) = 0$.
\begin{enumerate}[(a)]
\item Using a similar technique to that in problem 1, show that
\begin{equation}
\deriv{\langle N(t)\rangle}{t} = \left(\beta - \delta\right)\langle N(t) \rangle - \frac{\langle N(t)^2\rangle}{K}.
\end{equation}
\item Using the fact that the variance of $N(t)$ is
\begin{equation} \label{eq:var}
\textrm{Var}\left[N(t)\right] = \langle N(t)^2 \rangle - \langle N(t) \rangle^2,
\end{equation}
show that
\begin{equation} \label{eq:log-ode}
\deriv{\langle N(t) \rangle}{t} = \left(\beta - \delta\right)\langle N(t) \rangle - \frac{\langle N(t) \rangle^2}{K} - \frac{\textrm{Var}\left[N(t)\right]}{K}.
\end{equation}
\item If this were a deterministic problem, then $\textrm{Var}\left[N(t)\right]$ would be zero. Find the two equilibria of \eqref{eq:log-ode} assuming that $\textrm{Var}\left[N(t)\right] = 0$. (You may assume that $\beta > \delta > 0$.)
\item Since this really isn't a deterministic problem, we should not expect the variance to be zero. Instead, we will assume that $\textrm{Var}\left[N(t)\right] \approx \sigma^2$ is a small positive constant. Find the two equilibria of \eqref{eq:log-ode} under this assumption. (You may still assume that $\beta > \delta > 0$.)
\end{enumerate}
\part
As above, we have that
\begin{align*}
\deriv{\langle N(t) \rangle}{t} &= \deriv{}{t}\sum_{n=0}^{\infty}nP_n(t) \\
&= \sum_{n=0}^{\infty}n\deriv{P_n}{t} \\
&= \sum_{n=0}^{\infty}n\left[\beta_{n-1}(n-1)P_{n-1}(t) - (\beta_n + \delta_n)nP_n(t) + \delta_{n+1}(n+1)P_{n+1}(t)\right] \\
&= \sum_{n=0}^{\infty}\beta_{n-1}n(n-1)P_{n-1}(t) - \sum_{n=0}^{\infty}(\beta_n + \delta_n)n^2P_n(t) + \sum_{n=0}^{\infty}\delta_{n+1}n(n+1)P_{n+1}(t).
\end{align*}
As before, we can (temporarily) let $i = n - 1$ and $j = n + 1$. This gives
\begin{align*}
\deriv{\langle N(t) \rangle}{t} &= \sum_{i=-1}^{\infty}\beta_i(i + 1)iP_i(t) - \sum_{n=0}^{\infty}(\beta_n + \delta_n)n^2P_n(t) + \sum_{j=1}\delta_j(j - 1)jP_j(t) \\
&= \sum_{i=0}^{\infty}\beta_i(i^2 + i)P_i(t) - \sum_{n=0}^{\infty}(\beta_n + \delta_n)n^2P_n(t) + \sum_{j=0}^{\infty}\delta_j(j^2 - j)P_j(t) \\
&= \sum_{n=0}^{\infty}\left[\beta_n(n^2 + n) - (\beta_n + \delta_n)n^2 + \delta_n(n^2 - n)\right]P_n(t) \\
&= \sum_{n=0}^{\infty}(\beta_n - \delta_n)nP_n(t).
\end{align*}
This is the same result as in the previous problem, but here $\beta_n$ is not constant. We therefore have
\begin{align*}
\deriv{\langle N(t) \rangle}{t} &= \sum_{n=0}^{\infty}\left(\beta - \frac{n}{K} - \delta\right)nP_n(t) \\
&= \sum_{n=0}^{\infty}\left(\beta - \delta\right)nP_n(t) - \sum_{n=0}^{\infty}\frac{n^2}{K}P_n(t) \\
&= (\beta - \delta)\sum_{n=0}^{\infty}nP_n(t) - \frac{1}{K}\sum_{n=0}^{\infty}n^2P_n(t) \\
&= (\beta - \delta)\langle N(t) \rangle - \frac{\langle N(t)^2 \rangle}{K},
\end{align*}
as desired.
\part
Rearranging equation \eqref{eq:var}, we obtain
\begin{equation*}
\langle N(t)^2 \rangle = \textrm{Var}\left[N(t)\right] + \langle N(t) \rangle^2.
\end{equation*}
Substituting this into our answer from the previous section, we get
\begin{equation*}
\deriv{\langle N(t) \rangle}{t} = (\beta - \delta)\langle N(t) \rangle - \frac{\langle N(t) \rangle^2}{K} - \frac{\textrm{Var}\left[N(t)\right]}{K},
\end{equation*}
as desired.
\part
If we assume that $\textrm{Var}\left[N(t)\right] = 0$, then equation \eqref{eq:log-ode} becomes
\begin{equation*}
\deriv{\langle N(t) \rangle}{t} = (\beta - \delta)\langle N(t) \rangle - \frac{\langle N(t) \rangle^2}{K}.
\end{equation*}
(Note that this is just the logistic equation from our second homework.) Equilibria of this equation are constant solutions $\langle N(t) \rangle = N^*$. This means
\begin{equation*}
0 = (\beta - \delta)N^* - \frac{(N^*)^2}{K} = N^*\left((\beta - \delta) - \frac{N^*}{K}\right)
\end{equation*}
This means that either $N^* = 0$ or $N^* = K(\beta - \delta)$. (This means that if the equation were actually deterministic and $\beta > \delta > 0$, then the system would have an equilibrium population at $0$, which corresponds to extinction, and at some positive population $K(\beta - \delta)$, which corresponds to an extant population.)
\part
If we instead assume that $\textrm{Var}\left[N(t)\right] = \sigma^2$ is some small positive constant, then we have
\begin{equation*}
0 = (\beta - \delta)N^* - \frac{(N^*)^2}{K} - \frac{\sigma^2}{K},
\end{equation*}
so
\begin{equation*}
(N^*)^2 - K(\beta - \delta)N^* + \sigma^2 = 0.
\end{equation*}
This is a quadratic equation, so we find that
\begin{equation*}
N^* = \frac{1}{2}\left(K(\beta - \delta) \pm \sqrt{K^2(\beta - \delta)^2 - 4\sigma^2}\right).
\end{equation*}
Note that $\sqrt{K^2(\beta - \delta)^2 - 4\sigma^2} < K(\beta - \delta)$, but only slightly smaller. This means that one of the equilibria is almost zero (slightly positive, assuming $\beta > \delta$) and the other is slightly smaller than $K(\beta - \delta)$ (and positive, if $\beta > \delta$). It turns out that the variance really is almost constant when $N(t)$ is close to the larger equilibrium, but not when $N(t)$ is close to zero, so there really is a value of $N$ slightly below $K(\beta - \delta)$ that is almost a fixed point.
\problem{Problem 3 (10 points)}
Suppose that we have a birth-death process governed by
\begin{equation} \label{eq:nonlin-bd}
\deriv{P_n}{t} = \beta_{n-1}(n - 1)P_{n-1} - (\beta_n + \delta_n)nP_n + \delta_{n+1}(n+1)P_{n+1},
\end{equation}
where
\begin{equation}
P_n(t) = \textrm{Pr}\left[N(t) = n \:\vert\: N(0) = N_0\right]
\end{equation}
for all $n\geq 0$ and $P_{-1}(t) = 0$. Instead of the formulas for $\beta_n$ and $\delta_n$ from problem 2, suppose that $\beta_n$ and $\delta_n$ are arbitrary functions of $n$ with $\delta_n \neq 0$ for all $n \geq 0$.
Remember that $\pi = \left(\pi_0, \pi_1, \pi_2, \dotsc\right)$ is a stationary state if the constant functions $P_n(t) = \pi_n$ are solutions of \eqref{eq:nonlin-bd}. (Note, in particular, that $\sum_{n=0}^{\infty}\pi_n = 1$.)
Show that the only stationary solution to \eqref{eq:nonlin-bd} is given by $\pi_0 = 1$ and $\pi_n = 0$ for all $n > 0$. This means that the only stationary state of any birth-death process corresponds to extinction.
\solution
If we assume that $P_n(t) = \pi_n$ is constant for all $n \geq 0$, then we also know that $P_n'(t) = 0$ for all $n \geq 0$. In particular, for $n = 0$ this means that
\begin{align*}
0 &= \beta_{-1}\cdot(-1)P_{-1}(t) - (\beta_0 + \delta_0)\cdot 0P_0(t) + \delta_1\cdot 1P_1(t) \\
&= \delta_1\pi_1.
\end{align*}
(Here, we have used the fact that $P_{-1}(t) \equiv 0$ by definition.) Since $\delta_1 \neq 0$, we therefore know that $\pi_1 = 0$. Similarly, when $n = 1$ we get
\begin{align*}
0 &= \beta_0\cdot 0\cdot P_{0}(t) - (\beta_1 + \delta_1)\cdot 1P_1(t) + \delta_2\cdot 2P_2(t) \\
&= -(\beta_1 + \delta_1)\pi_1 + 2\delta_2\pi_2 \\
&= 2\delta_2\pi_2.
\end{align*}
As before, we know that $\delta_2 \neq 0$, so we must have $\pi_2 = 0$. We can continue this argument ad infinitum. To prove this, suppose that $\pi_k = \pi_{k-1} = 0$ for some $k\geq 2$. We therefore have
\begin{align*}
0 &= \beta_{k-1}(k-1)P_{k-1}(t) - (\beta_k + \delta_k)kP_k(t) + \delta_{k+1}(k+1)P_{k+1}(t) \\
&= (k+1)\beta_{k+1}\pi_{k+1} - k(\beta_k + \delta_k)\pi_k + (k + 1)\delta_{k+1}\pi_{k+1} \\
&= (k + 1)\delta_{k+1}\pi_{k+1}.
\end{align*}
Since $\delta_{k+1} \neq 0$, we must have $\pi_{k+1} = 0$ as well. Since we already proved that $\pi_1 = \pi_2 = 0$, this completes the proof that $\pi_n = 0$ for all $n \geq 0$. To find $\pi_0$, note that
\begin{equation*}
\sum_{n=0}^{\infty}\pi_n = 1.
\end{equation*}
All the terms in this sum are zero except for $\pi_0$, so we must have $\pi_0 = 1$, as desired.
\problem{Problem 4 (10 points)}
The result of problem 3 suggests that stationary states are probably too restrictive a notion for birth-death processes. The problem is that $N(t) = 0$ is an absorbing state, and so the only two long term possibilities are that the population dies out or grows to infinity. Many populations, however, seem to reach ``stable'' levels for a very long time before eventually succumbing to extinction.
To make this new idea of stability rigorous, we will define the conditional probability
\begin{equation} \label{eq:qn}
q_n(t) = \textrm{Pr}\left[N(t) = n \:\vert\: N(0) = N_0 \textrm{ and } N(t) \neq 0\right] = \frac{P_n(t)}{1 - P_0(t)},
\end{equation}
for all $n > 0$. This is the probability that $N(t) = n$ under the assumption that it has not yet gone extinct. Use equations \eqref{eq:nonlin-bd} and \eqref{eq:qn} to show that
\begin{equation} \label{eq:q-ode}
\deriv{q_n}{t} = \beta_{n-1}(n - 1)q_{n - 1} - (\beta_n + \delta_n)nq_n + \delta_{n+1}(n + 1)q_{n+1} + \delta_1 q_1q_n,
\end{equation}
for all $n > 0$. (We are assuming that $q_0 = 0$.)
\textbf{Extra credit} (5 points): Equation \eqref{eq:q-ode} often does have constant solutions. If $q_n(t) = \pi_n$ is constant for every $n > 0$, then we call $\pi$ a \emph{quasistationary state} of the birth-death process. Unfortunately, \eqref{eq:q-ode} is nonlinear, so it is generally very difficult to solve for $\pi$ explicitly. However, there are several ways to approximate the quasistationary state numerically. For instance, you can
\begin{enumerate}
\item Guess a nonzero value for $\pi_1$.
\item Calculate $\pi_2$, $\pi_3$, $\pi_4$, etc. by repeatedly using \eqref{eq:q-ode} with $\d q_n/\d t = 0$. Stop at $\pi_N$, where $N$ is chosen so that $\pi_N$ is sufficiently small.
\item Find $Q = \sum_{n=1}^{N}\pi_n$ and replace each $\pi_n$ with $\pi_n/Q$ so that they all sum to 1.
\item If the new value of $\pi_1$ is substantially different from your previous guess, start again using the new $\pi_1$ as your starting guess.
\end{enumerate}
For extra credit, implement this algorithm to find the quasistationary state of the birth-death process from problem 2 with $\beta = 0.1$, $\delta = 0.02$ and $K = 100$. Make a plot of $\pi_n$ versus $n$. How does the peak of this plot compare to your solutions from parts c and d of problem 2? How big do you think the variance is?
\solution
Since $q_n(t) = P_n(t)/(1 - P_0(t))$, we have
\begin{align*}
\deriv{q_n}{t} &= \frac{P_n'(t)(1 - P_0) + P_0'(t)P_n(t)}{(1 - P_0(t))^2} \\
&= \frac{P_n'(t)}{1 - P_0(t)} + \frac{P_0'(t)}{1 - P_0(t)}\cdot\frac{P_n(t)}{1 - P_0(t)} \\
&= \frac{\beta_{n-1}(n-1)P_{n-1}(t) - (\beta_n + \delta_n)nP_n(t) + \delta_{n+1}(n+1)P_{n+1}(t)}{1 - P_0(t)} \\
&\:\:\:\:\:\:\:\:+ \frac{\delta_1P_1(t)}{1 - P_0(t)}\cdot\frac{P_n(t)}{1 - P_0(t)} \\
&= \beta_{n-1}(n-1)\frac{P_{n-1}(t)}{1 - P_0(t)} - (\beta_n + \delta_n)n\frac{P_n(t)}{1 - P_0(t)} + \delta_{n+1}(n+1)\frac{P_{n+1}(t)}{1 - P_0(t)} \\
&\:\:\:\:\:\:\:\:+ \delta_1\frac{P_1(t)}{1 - P_0(t)}\cdot\frac{P_n(t)}{1 - P_0(t)} \\
&= \beta_{n-1}(n - 1)q_{n-1}(t) - (\beta_n + \delta_n)nq_n(t) + \delta_{n+1}(n+1)q_{n+1}(t) + \delta_1q_q(t)q_n(t),
\end{align*}
as desired.
If you implement the algorithm in the extra credit, you should obtain a graph like:
\begin{center}
\includegraphics[width=0.8\textwidth]{quasi_10}
\end{center}
Note that $K(\beta - \delta) = 8$, so the peak of the quasistationary distribution roughly corresponds to the equilibrium of the deterministic problem. This is probably a little easier to see if we choose a larger $K$. When $K = 1000$, we get the graph:
\begin{center}
\includegraphics[width=0.8\textwidth]{quasi_100}
\end{center}
Again, the peak roughly corresponds to $K(\beta - \delta) = 80$.
\end{document}